From 24ab0dca5c5df4173963863220a6777d35efbbbf Mon Sep 17 00:00:00 2001 From: Zachary James Williamson Date: Sat, 18 Mar 2023 17:38:49 +0000 Subject: [PATCH] Added dynamic array abstraction into standard library (#112) Implements RAM/ROM stuff and dynamic arrays as well as separated all fixed_base operation in standard plonk into a separate file, so that it is no longer part of composer --- cpp/.gitignore | 3 + .../standard_circuit_constructor.cpp | 240 ------------- .../standard_circuit_constructor.test.cpp | 168 ---------- .../honk/composer/standard_honk_composer.hpp | 12 - .../honk/composer/standard_plonk_composer.hpp | 12 - .../composer/standard_plonk_composer.test.cpp | 179 ---------- .../plonk/composer/standard_composer.cpp | 240 ------------- .../plonk/composer/standard_composer.hpp | 5 - .../plonk/composer/standard_composer.test.cpp | 181 +--------- .../plonk/composer/ultra_composer.cpp | 110 +----- .../plonk/composer/ultra_composer.hpp | 119 ++++--- .../plonk/composer/ultra_composer.test.cpp | 1 + .../proof_system/types/program_settings.hpp | 7 - .../plookup_arithmetic_widget.hpp | 2 +- .../types/polynomial_manifest.hpp | 63 ++-- .../proof_system/verification_key/sol_gen.hpp | 110 +++++- .../stdlib/hash/pedersen/pedersen.cpp | 20 +- .../stdlib/hash/pedersen/pedersen_gates.hpp | 316 ++++++++++++++++++ .../stdlib/primitives/group/group.hpp | 6 +- .../primitives/memory/dynamic_array.cpp | 279 ++++++++++++++++ .../primitives/memory/dynamic_array.hpp | 54 +++ .../primitives/memory/dynamic_array.test.cpp | 72 ++++ .../stdlib/primitives/memory/ram_table.cpp | 257 ++++++++++++++ .../stdlib/primitives/memory/ram_table.hpp | 63 ++++ .../primitives/memory/ram_table.test.cpp | 111 ++++++ .../recursion/verifier/program_settings.hpp | 8 - cpp/src/barretenberg/stdlib/types/types.hpp | 4 + 27 files changed, 1392 insertions(+), 1250 deletions(-) create mode 100644 cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_gates.hpp create mode 100644 cpp/src/barretenberg/stdlib/primitives/memory/dynamic_array.cpp create mode 100644 cpp/src/barretenberg/stdlib/primitives/memory/dynamic_array.hpp create mode 100644 cpp/src/barretenberg/stdlib/primitives/memory/dynamic_array.test.cpp create mode 100644 cpp/src/barretenberg/stdlib/primitives/memory/ram_table.cpp create mode 100644 cpp/src/barretenberg/stdlib/primitives/memory/ram_table.hpp create mode 100644 cpp/src/barretenberg/stdlib/primitives/memory/ram_table.test.cpp diff --git a/cpp/.gitignore b/cpp/.gitignore index 6a33a19de1..c6a6fba41b 100644 --- a/cpp/.gitignore +++ b/cpp/.gitignore @@ -5,3 +5,6 @@ src/barretenberg/proof_system/proving_key/fixtures src/barretenberg/rollup/proofs/*/fixtures srs_db/*/*/transcript* CMakeUserPresets.json +.vscode/settings.json +# to be unignored when we agree on clang-tidy rules +.clangd diff --git a/cpp/src/barretenberg/honk/circuit_constructors/standard_circuit_constructor.cpp b/cpp/src/barretenberg/honk/circuit_constructors/standard_circuit_constructor.cpp index ca31a8d5e3..08d8af9885 100644 --- a/cpp/src/barretenberg/honk/circuit_constructors/standard_circuit_constructor.cpp +++ b/cpp/src/barretenberg/honk/circuit_constructors/standard_circuit_constructor.cpp @@ -250,246 +250,6 @@ void StandardCircuitConstructor::create_poly_gate(const poly_triple& in) ++num_gates; } -void StandardCircuitConstructor::create_fixed_group_add_gate_with_init(const fixed_group_add_quad& in, - const fixed_group_init_quad& init) -{ - uint32_t x_0_idx = in.a; - uint32_t y_0_idx = in.b; - uint32_t x_alpha_idx = in.c; - uint32_t a_0_idx = in.d; - - fr x_alpha = get_variable(x_alpha_idx); - fr a_0 = get_variable(a_0_idx); - - // weird names here follow the Turbo notation - fr q_4 = init.q_x_1; - fr q_5 = init.q_x_2; - fr q_m = init.q_y_1; - fr q_c = init.q_y_2; - - // We will think of s = 1-a_0 as an auxiliary "switch" which is equal to either -x_alpha or 0 - // during the initialization step, but we will not add this variable to the composer for reasons of efficiency. - - // (ɑ^4 identity) impose 1-a_0 = 0 or -x_alpha - // // first check formula for sx_alpha - fr sx_alpha = (fr(1) - a_0) * x_alpha; - uint32_t sx_alpha_idx = add_variable(sx_alpha); - create_poly_gate( - { .a = a_0_idx, .b = x_alpha_idx, .c = sx_alpha_idx, .q_m = 1, .q_l = 0, .q_r = -1, .q_o = 1, .q_c = 0 }); - - // // now add the desired constraint on sx_alpha - // // s(s + x_alpha) = s*s + s*x_alpha = 0 - create_poly_gate( - { .a = a_0_idx, .b = a_0_idx, .c = sx_alpha_idx, .q_m = 1, .q_l = -2, .q_r = 0, .q_o = 1, .q_c = 1 }); - - // (ɑ^5 identity) - create_poly_gate( - { .a = x_0_idx, .b = x_alpha_idx, .c = a_0_idx, .q_m = -1, .q_l = 0, .q_r = q_4, .q_o = -q_5, .q_c = q_5 }); - - // (ɑ^6 identity) - create_poly_gate( - { .a = y_0_idx, .b = x_alpha_idx, .c = a_0_idx, .q_m = -1, .q_l = 0, .q_r = q_m, .q_o = -q_c, .q_c = q_c }); - - // There is no previous add quad. - previous_add_quad = in; -} - -void StandardCircuitConstructor::create_fixed_group_add_gate(const fixed_group_add_quad& in) -{ - assert_valid_variables({ in.a, in.b, in.c, in.d }); - - auto row_1 = previous_add_quad; - auto row_2 = in; - previous_add_quad = in; - - fr a_1 = get_variable(row_1.d); - fr a_2 = get_variable(row_2.d); - fr x_1 = get_variable(row_1.a); - fr y_1 = get_variable(row_1.b); - fr x_2 = get_variable(row_2.a); - fr y_2 = get_variable(row_2.b); - fr x_alpha = get_variable(row_2.c); - - fr q_x_alpha_1 = row_1.q_x_1; - fr q_x_alpha_2 = row_1.q_x_2; - fr q_y_alpha_1 = row_1.q_y_1; - fr q_y_alpha_2 = row_1.q_y_2; - - uint32_t a_1_idx = row_1.d; - uint32_t a_2_idx = row_2.d; - uint32_t x_1_idx = row_1.a; - uint32_t y_1_idx = row_1.b; - uint32_t x_2_idx = row_2.a; - uint32_t y_2_idx = row_2.b; - uint32_t x_alpha_idx = row_2.c; - - // add variable δ = a_2 - 4a_1 - fr delta = a_2 - (a_1 + a_1 + a_1 + a_1); - uint32_t delta_idx = add_variable(delta); - create_add_gate({ .a = a_2_idx, - .b = a_1_idx, - .c = delta_idx, - .a_scaling = 1, - .b_scaling = -4, - .c_scaling = -1, - .const_scaling = 0 }); - - // constraint: (δ + 3)(δ + 1)(δ - 1)(δ - 3) - // (δ + 3)(δ + 1)(δ - 1)(δ - 3) = (δ^2 - 9)(δ^2 - 1)=0 - // // first: (δ^2 - δ_sqr = 0) - fr delta_sqr = delta * delta; - uint32_t delta_sqr_idx = add_variable(delta_sqr); - create_mul_gate( - { .a = delta_idx, .b = delta_idx, .c = delta_sqr_idx, .mul_scaling = 1, .c_scaling = -1, .const_scaling = 0 }); - // // next (δ^2 - 9)( δ^2 - 1) = δ^2*δ^2 - 10 * δ^2 + 9 = 0 - create_mul_gate({ .a = delta_sqr_idx, - .b = delta_sqr_idx, - .c = delta_sqr_idx, - .mul_scaling = 1, - .c_scaling = -10, - .const_scaling = 9 }); - - // validate correctness of x_ɑ - // constraint: (δ^2) * q_x_ɑ,1 + q_x_ɑ,2 - x,ɑ = 0 - create_add_gate({ .a = delta_sqr_idx, - .b = x_alpha_idx, - .c = zero_idx, - .a_scaling = q_x_alpha_1, - .b_scaling = -1, - .c_scaling = 0, - .const_scaling = q_x_alpha_2 }); - - // compute y_alpha using lookup formula, instantiate as witness and validate - fr y_alpha = (x_alpha * q_y_alpha_1 + q_y_alpha_2) * delta; - uint32_t y_alpha_idx = add_variable(y_alpha); - create_poly_gate({ .a = delta_idx, - .b = x_alpha_idx, - .c = y_alpha_idx, - .q_m = q_y_alpha_1, - .q_l = q_y_alpha_2, - .q_r = 0, - .q_o = -1, - .q_c = 0 }); - - // show that (x_1, y_1) + (x_ɑ, y_ɑ) = (x_2, y_2) in 11 gates - // // 4 gates to compute commonly used expressions - // // // 2 differences: - fr diff_x_alpha_x_1 = x_alpha - x_1; - uint32_t diff_x_alpha_x_1_idx = add_variable(diff_x_alpha_x_1); - create_add_gate({ .a = diff_x_alpha_x_1_idx, - .b = x_1_idx, - .c = x_alpha_idx, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - fr diff_y_alpha_y_1 = y_alpha - y_1; - uint32_t diff_y_alpha_y_1_idx = add_variable(diff_y_alpha_y_1); - create_add_gate({ .a = diff_y_alpha_y_1_idx, - .b = y_1_idx, - .c = y_alpha_idx, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - // // // now the squares of these 2 differences - fr diff_x_alpha_x_1_sqr = diff_x_alpha_x_1 * diff_x_alpha_x_1; - uint32_t diff_x_alpha_x_1_sqr_idx = add_variable(diff_x_alpha_x_1_sqr); - create_mul_gate({ .a = diff_x_alpha_x_1_idx, - .b = diff_x_alpha_x_1_idx, - .c = diff_x_alpha_x_1_sqr_idx, - .mul_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - fr diff_y_alpha_y_1_sqr = diff_y_alpha_y_1 * diff_y_alpha_y_1; - uint32_t diff_y_alpha_y_1_sqr_idx = add_variable(diff_y_alpha_y_1_sqr); - create_mul_gate({ .a = diff_y_alpha_y_1_idx, - .b = diff_y_alpha_y_1_idx, - .c = diff_y_alpha_y_1_sqr_idx, - .mul_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - // // 3 gates to build identity for x_2 - // // // compute x_2 + x_ɑ + x_1 using 2 poly_gates via create_big_add_gate - fr sum_x_1_2_alpha = x_2 + x_alpha + x_1; - uint32_t sum_x_1_2_alpha_idx = add_variable(sum_x_1_2_alpha); - create_big_add_gate({ .a = x_2_idx, - .b = x_alpha_idx, - .c = x_1_idx, - .d = sum_x_1_2_alpha_idx, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = 1, - .d_scaling = -1, - .const_scaling = 0 }); - - // // // constraint: identity for x_2 - create_poly_gate({ .a = sum_x_1_2_alpha_idx, - .b = diff_x_alpha_x_1_sqr_idx, - .c = diff_y_alpha_y_1_sqr_idx, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0 }); - - // // 4 gates to build identity for y_2: - // // // 3 auxiliary - fr sum_y_1_y_2 = y_1 + y_2; - uint32_t sum_y_1_y_2_idx = add_variable(sum_y_1_y_2); - create_add_gate({ .a = y_1_idx, - .b = y_2_idx, - .c = sum_y_1_y_2_idx, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - fr diff_x_1_x_2 = x_1 - x_2; - uint32_t diff_x_1_x_2_idx = add_variable(diff_x_1_x_2); - create_add_gate({ .a = diff_x_1_x_2_idx, - .b = x_2_idx, - .c = x_1_idx, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - fr prod_y_diff_x_diff = diff_y_alpha_y_1 * diff_x_1_x_2; - uint32_t prod_y_diff_x_diff_idx = add_variable(prod_y_diff_x_diff); - create_mul_gate({ .a = diff_y_alpha_y_1_idx, - .b = diff_x_1_x_2_idx, - .c = prod_y_diff_x_diff_idx, - .mul_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - // // // identity for y_2 - create_mul_gate({ .a = sum_y_1_y_2_idx, - .b = diff_x_alpha_x_1_idx, - .c = prod_y_diff_x_diff_idx, - .mul_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); -} - -void StandardCircuitConstructor::create_fixed_group_add_gate_final(const add_quad& in) -{ - fixed_group_add_quad final_round_quad{ .a = in.a, - .b = in.b, - .c = in.c, - .d = in.d, - .q_x_1 = fr::zero(), - .q_x_2 = fr::zero(), - .q_y_1 = fr::zero(), - .q_y_2 = fr::zero() }; - create_fixed_group_add_gate(final_round_quad); -} - std::vector StandardCircuitConstructor::decompose_into_base4_accumulators(const uint32_t witness_index, const size_t num_bits, std::string const& msg) diff --git a/cpp/src/barretenberg/honk/circuit_constructors/standard_circuit_constructor.test.cpp b/cpp/src/barretenberg/honk/circuit_constructors/standard_circuit_constructor.test.cpp index e584c0c4df..3822ad63fe 100644 --- a/cpp/src/barretenberg/honk/circuit_constructors/standard_circuit_constructor.test.cpp +++ b/cpp/src/barretenberg/honk/circuit_constructors/standard_circuit_constructor.test.cpp @@ -417,172 +417,4 @@ TEST(standard_circuit_constructor, test_check_circuit_broken) EXPECT_EQ(result, false); } -TEST(standard_circuit_constructor, test_fixed_group_add_gate_with_init) -{ - StandardCircuitConstructor composer = StandardCircuitConstructor(); - auto gen_data = crypto::pedersen::get_generator_data({ 0, 0 }); - - // 1. generate two origin points P, Q - // 2. derive gate constant values from P, Q - // 3. instantiate P as accumulator - // 4. generate accumulator initial value 1 and instantiate as circuit variable - // 5. use the above to call `create_fixed_group_add_gate_with_init` - // 6. validate proof passes - constexpr size_t num_bits = 63; - constexpr size_t initial_exponent = ((num_bits & 1) == 1) ? num_bits - 1 : num_bits; - - const crypto::pedersen::fixed_base_ladder* ladder = gen_data.get_ladder(num_bits); - grumpkin::g1::affine_element generator = gen_data.aux_generator; - - grumpkin::g1::element origin_points[2]; - origin_points[0] = grumpkin::g1::element(ladder[0].one); // this is P - origin_points[1] = origin_points[0] + generator; - origin_points[1] = origin_points[1].normalize(); // this is Q - - fr accumulator_offset = (fr::one() + fr::one()).pow(static_cast(initial_exponent)).invert(); - fr origin_accumulators[2]{ fr::one(), accumulator_offset + fr::one() }; - - for (size_t i = 0; i < 2; ++i) { - fr starting_accumulator = origin_accumulators[i]; // skew = 0 - - fixed_group_init_quad init_quad{ origin_points[0].x, - (origin_points[0].x - origin_points[1].x), - origin_points[0].y, - (origin_points[0].y - origin_points[1].y) }; - - fixed_group_add_quad round_quad{ - .a = composer.add_variable(origin_points[i].x), - .b = composer.add_variable(origin_points[i].y), - .c = composer.add_variable(accumulator_offset), - .d = composer.add_variable(starting_accumulator), - .q_x_1 = 0, - .q_x_2 = 0, - .q_y_1 = 0, - .q_y_2 = 0, - }; - composer.create_fixed_group_add_gate_with_init(round_quad, init_quad); - } - - bool result = composer.check_circuit(); - - EXPECT_EQ(result, true); -} - -TEST(standard_circuit_constructor, test_fixed_group_add_gate) -{ - auto composer = StandardCircuitConstructor(); - auto gen_data = crypto::pedersen::get_generator_data({ 0, 0 }); - - constexpr size_t num_bits = 63; - constexpr size_t num_quads_base = (num_bits - 1) >> 1; - constexpr size_t num_quads = ((num_quads_base << 1) + 1 < num_bits) ? num_quads_base + 1 : num_quads_base; - constexpr size_t num_wnaf_bits = (num_quads << 1) + 1; - constexpr size_t initial_exponent = ((num_bits & 1) == 1) ? num_bits - 1 : num_bits; - constexpr uint64_t bit_mask = (1ULL << num_bits) - 1UL; - const crypto::pedersen::fixed_base_ladder* ladder = gen_data.get_hash_ladder(num_bits); - grumpkin::g1::affine_element generator = gen_data.aux_generator; // also passes with aux_generator? - - grumpkin::g1::element origin_points[2]; - origin_points[0] = grumpkin::g1::element(ladder[0].one); - origin_points[1] = origin_points[0] + generator; - origin_points[1] = origin_points[1].normalize(); - - grumpkin::fr scalar_multiplier_entropy = grumpkin::fr::random_element(); - grumpkin::fr scalar_multiplier_base{ scalar_multiplier_entropy.data[0] & bit_mask, 0, 0, 0 }; - scalar_multiplier_base.data[0] = scalar_multiplier_base.data[0] & (~1ULL); - - uint64_t wnaf_entries[num_quads + 1] = { 0 }; - if ((scalar_multiplier_base.data[0] & 1) == 0) { - scalar_multiplier_base.data[0] -= 2; - } - bool skew = false; - barretenberg::wnaf::fixed_wnaf(&scalar_multiplier_base.data[0], &wnaf_entries[0], skew, 0); - - fr accumulator_offset = (fr::one() + fr::one()).pow(static_cast(initial_exponent)).invert(); - fr origin_accumulators[2]{ fr::one(), accumulator_offset + fr::one() }; - - grumpkin::g1::element* multiplication_transcript = - static_cast(aligned_alloc(64, sizeof(grumpkin::g1::element) * (num_quads + 1))); - fr* accumulator_transcript = static_cast(aligned_alloc(64, sizeof(fr) * (num_quads + 1))); - - if (skew) { - multiplication_transcript[0] = origin_points[1]; - accumulator_transcript[0] = origin_accumulators[1]; - } else { - multiplication_transcript[0] = origin_points[0]; - accumulator_transcript[0] = origin_accumulators[0]; - } - - fr one = fr::one(); - fr three = ((one + one) + one); - for (size_t i = 0; i < num_quads; ++i) { - uint64_t entry = wnaf_entries[i + 1] & crypto::pedersen::WNAF_MASK; - fr prev_accumulator = accumulator_transcript[i] + accumulator_transcript[i]; - prev_accumulator = prev_accumulator + prev_accumulator; - - grumpkin::g1::affine_element point_to_add = (entry == 1) ? ladder[i + 1].three : ladder[i + 1].one; - fr scalar_to_add = (entry == 1) ? three : one; - uint64_t predicate = (wnaf_entries[i + 1] >> 31U) & 1U; - if (predicate) { - point_to_add = -point_to_add; - scalar_to_add.self_neg(); - } - accumulator_transcript[i + 1] = prev_accumulator + scalar_to_add; - multiplication_transcript[i + 1] = multiplication_transcript[i] + point_to_add; - } - grumpkin::g1::element::batch_normalize(&multiplication_transcript[0], num_quads + 1); - - fixed_group_init_quad init_quad{ origin_points[0].x, - (origin_points[0].x - origin_points[1].x), - origin_points[0].y, - (origin_points[0].y - origin_points[1].y) }; - - fr x_alpha = accumulator_offset; - for (size_t i = 0; i < 2; ++i) { - fixed_group_add_quad round_quad; - round_quad.d = composer.add_variable(accumulator_transcript[i]); - round_quad.a = composer.add_variable(multiplication_transcript[i].x); - round_quad.b = composer.add_variable(multiplication_transcript[i].y); - - if (i == 0) { - // we need to ensure that the first value of x_alpha is a defined constant. - // However, repeated applications of the pedersen hash will use the same constant value. - // `put_constant_variable` will create a gate that fixes the value of x_alpha, but only once - round_quad.c = composer.put_constant_variable(x_alpha); - } else { - round_quad.c = composer.add_variable(x_alpha); - } - - if ((wnaf_entries[i + 1] & 0xffffffU) == 0) { - x_alpha = ladder[i + 1].one.x; - } else { - x_alpha = ladder[i + 1].three.x; - } - round_quad.q_x_1 = ladder[i + 1].q_x_1; - round_quad.q_x_2 = ladder[i + 1].q_x_2; - round_quad.q_y_1 = ladder[i + 1].q_y_1; - round_quad.q_y_2 = ladder[i + 1].q_y_2; - - if (i > 0) { - composer.create_fixed_group_add_gate(round_quad); - } else { - composer.create_fixed_group_add_gate_with_init(round_quad, init_quad); - } - } - - add_quad add_quad{ composer.add_variable(multiplication_transcript[2].x), - composer.add_variable(multiplication_transcript[2].y), - composer.add_variable(x_alpha), - composer.add_variable(accumulator_transcript[2]), - fr::zero(), - fr::zero(), - fr::zero(), - fr::zero(), - fr::zero() }; - composer.create_fixed_group_add_gate_final(add_quad); - - bool result = composer.check_circuit(); - - EXPECT_EQ(result, true); -} } // namespace standard_circuit_constructor_tests diff --git a/cpp/src/barretenberg/honk/composer/standard_honk_composer.hpp b/cpp/src/barretenberg/honk/composer/standard_honk_composer.hpp index 0f6265ca13..7b69ef834c 100644 --- a/cpp/src/barretenberg/honk/composer/standard_honk_composer.hpp +++ b/cpp/src/barretenberg/honk/composer/standard_honk_composer.hpp @@ -101,18 +101,6 @@ class StandardHonkComposer { } void create_big_mul_gate(const mul_quad& in) { circuit_constructor.create_big_mul_gate(in); } void create_balanced_add_gate(const add_quad& in) { circuit_constructor.create_balanced_add_gate(in); } - void create_fixed_group_add_gate(const fixed_group_add_quad& in) - { - circuit_constructor.create_fixed_group_add_gate(in); - } - void create_fixed_group_add_gate_with_init(const fixed_group_add_quad& in, const fixed_group_init_quad& init) - { - circuit_constructor.create_fixed_group_add_gate_with_init(in, init); - } - void create_fixed_group_add_gate_final(const add_quad& in) - { - circuit_constructor.create_fixed_group_add_gate_final(in); - } void fix_witness(const uint32_t witness_index, const barretenberg::fr& witness_value) { diff --git a/cpp/src/barretenberg/honk/composer/standard_plonk_composer.hpp b/cpp/src/barretenberg/honk/composer/standard_plonk_composer.hpp index bc8cfcacc7..12e5b6bf56 100644 --- a/cpp/src/barretenberg/honk/composer/standard_plonk_composer.hpp +++ b/cpp/src/barretenberg/honk/composer/standard_plonk_composer.hpp @@ -102,18 +102,6 @@ class StandardPlonkComposer { } void create_big_mul_gate(const mul_quad& in) { circuit_constructor.create_big_mul_gate(in); } void create_balanced_add_gate(const add_quad& in) { circuit_constructor.create_balanced_add_gate(in); } - void create_fixed_group_add_gate(const fixed_group_add_quad& in) - { - circuit_constructor.create_fixed_group_add_gate(in); - } - void create_fixed_group_add_gate_with_init(const fixed_group_add_quad& in, const fixed_group_init_quad& init) - { - circuit_constructor.create_fixed_group_add_gate_with_init(in, init); - } - void create_fixed_group_add_gate_final(const add_quad& in) - { - circuit_constructor.create_fixed_group_add_gate_final(in); - } void fix_witness(const uint32_t witness_index, const barretenberg::fr& witness_value) { diff --git a/cpp/src/barretenberg/honk/composer/standard_plonk_composer.test.cpp b/cpp/src/barretenberg/honk/composer/standard_plonk_composer.test.cpp index d0053283d6..86d0dfb6b4 100644 --- a/cpp/src/barretenberg/honk/composer/standard_plonk_composer.test.cpp +++ b/cpp/src/barretenberg/honk/composer/standard_plonk_composer.test.cpp @@ -495,182 +495,3 @@ TEST(standard_composer, test_check_circuit_broken) bool result = composer.check_circuit(); EXPECT_EQ(result, false); } - -TEST(standard_composer, test_fixed_group_add_gate_with_init) -{ - plonk::StandardPlonkComposer composer = plonk::StandardPlonkComposer(); - auto gen_data = crypto::pedersen::get_generator_data({ 0, 0 }); - - // 1. generate two origin points P, Q - // 2. derive gate constant values from P, Q - // 3. instantiate P as accumulator - // 4. generate accumulator initial value 1 and instantiate as circuit variable - // 5. use the above to call `create_fixed_group_add_gate_with_init` - // 6. validate proof passes - constexpr size_t num_bits = 63; - constexpr size_t initial_exponent = ((num_bits & 1) == 1) ? num_bits - 1 : num_bits; - - const crypto::pedersen::fixed_base_ladder* ladder = gen_data.get_ladder(num_bits); - grumpkin::g1::affine_element generator = gen_data.aux_generator; - - grumpkin::g1::element origin_points[2]; - origin_points[0] = grumpkin::g1::element(ladder[0].one); // this is P - origin_points[1] = origin_points[0] + generator; - origin_points[1] = origin_points[1].normalize(); // this is Q - - fr accumulator_offset = (fr::one() + fr::one()).pow(static_cast(initial_exponent)).invert(); - fr origin_accumulators[2]{ fr::one(), accumulator_offset + fr::one() }; - - for (size_t i = 0; i < 2; ++i) { - fr starting_accumulator = origin_accumulators[i]; // skew = 0 - - fixed_group_init_quad init_quad{ origin_points[0].x, - (origin_points[0].x - origin_points[1].x), - origin_points[0].y, - (origin_points[0].y - origin_points[1].y) }; - - fixed_group_add_quad round_quad{ - .a = composer.add_variable(origin_points[i].x), - .b = composer.add_variable(origin_points[i].y), - .c = composer.add_variable(accumulator_offset), - .d = composer.add_variable(starting_accumulator), - .q_x_1 = 0, - .q_x_2 = 0, - .q_y_1 = 0, - .q_y_2 = 0, - }; - composer.create_fixed_group_add_gate_with_init(round_quad, init_quad); - } - plonk::Prover prover = composer.preprocess(); - - plonk::Verifier verifier = composer.create_verifier(); - - plonk::proof proof = prover.construct_proof(); - - bool result = verifier.verify_proof(proof); - - EXPECT_EQ(result, true); -} - -TEST(standard_composer, test_fixed_group_add_gate) -{ - auto composer = plonk::StandardPlonkComposer(); - auto gen_data = crypto::pedersen::get_generator_data({ 0, 0 }); - - constexpr size_t num_bits = 63; - constexpr size_t num_quads_base = (num_bits - 1) >> 1; - constexpr size_t num_quads = ((num_quads_base << 1) + 1 < num_bits) ? num_quads_base + 1 : num_quads_base; - constexpr size_t num_wnaf_bits = (num_quads << 1) + 1; - constexpr size_t initial_exponent = ((num_bits & 1) == 1) ? num_bits - 1 : num_bits; - constexpr uint64_t bit_mask = (1ULL << num_bits) - 1UL; - const crypto::pedersen::fixed_base_ladder* ladder = gen_data.get_hash_ladder(num_bits); - grumpkin::g1::affine_element generator = gen_data.aux_generator; // also passes with aux_generator? - - grumpkin::g1::element origin_points[2]; - origin_points[0] = grumpkin::g1::element(ladder[0].one); - origin_points[1] = origin_points[0] + generator; - origin_points[1] = origin_points[1].normalize(); - - grumpkin::fr scalar_multiplier_entropy = grumpkin::fr::random_element(); - grumpkin::fr scalar_multiplier_base{ scalar_multiplier_entropy.data[0] & bit_mask, 0, 0, 0 }; - scalar_multiplier_base.data[0] = scalar_multiplier_base.data[0] & (~1ULL); - - uint64_t wnaf_entries[num_quads + 1] = { 0 }; - if ((scalar_multiplier_base.data[0] & 1) == 0) { - scalar_multiplier_base.data[0] -= 2; - } - bool skew = false; - barretenberg::wnaf::fixed_wnaf(&scalar_multiplier_base.data[0], &wnaf_entries[0], skew, 0); - - fr accumulator_offset = (fr::one() + fr::one()).pow(static_cast(initial_exponent)).invert(); - fr origin_accumulators[2]{ fr::one(), accumulator_offset + fr::one() }; - - grumpkin::g1::element* multiplication_transcript = - static_cast(aligned_alloc(64, sizeof(grumpkin::g1::element) * (num_quads + 1))); - fr* accumulator_transcript = static_cast(aligned_alloc(64, sizeof(fr) * (num_quads + 1))); - - if (skew) { - multiplication_transcript[0] = origin_points[1]; - accumulator_transcript[0] = origin_accumulators[1]; - } else { - multiplication_transcript[0] = origin_points[0]; - accumulator_transcript[0] = origin_accumulators[0]; - } - - fr one = fr::one(); - fr three = ((one + one) + one); - for (size_t i = 0; i < num_quads; ++i) { - uint64_t entry = wnaf_entries[i + 1] & crypto::pedersen::WNAF_MASK; - fr prev_accumulator = accumulator_transcript[i] + accumulator_transcript[i]; - prev_accumulator = prev_accumulator + prev_accumulator; - - grumpkin::g1::affine_element point_to_add = (entry == 1) ? ladder[i + 1].three : ladder[i + 1].one; - fr scalar_to_add = (entry == 1) ? three : one; - uint64_t predicate = (wnaf_entries[i + 1] >> 31U) & 1U; - if (predicate) { - point_to_add = -point_to_add; - scalar_to_add.self_neg(); - } - accumulator_transcript[i + 1] = prev_accumulator + scalar_to_add; - multiplication_transcript[i + 1] = multiplication_transcript[i] + point_to_add; - } - grumpkin::g1::element::batch_normalize(&multiplication_transcript[0], num_quads + 1); - - fixed_group_init_quad init_quad{ origin_points[0].x, - (origin_points[0].x - origin_points[1].x), - origin_points[0].y, - (origin_points[0].y - origin_points[1].y) }; - - fr x_alpha = accumulator_offset; - for (size_t i = 0; i < 2; ++i) { - fixed_group_add_quad round_quad; - round_quad.d = composer.add_variable(accumulator_transcript[i]); - round_quad.a = composer.add_variable(multiplication_transcript[i].x); - round_quad.b = composer.add_variable(multiplication_transcript[i].y); - - if (i == 0) { - // we need to ensure that the first value of x_alpha is a defined constant. - // However, repeated applications of the pedersen hash will use the same constant value. - // `put_constant_variable` will create a gate that fixes the value of x_alpha, but only once - round_quad.c = composer.put_constant_variable(x_alpha); - } else { - round_quad.c = composer.add_variable(x_alpha); - } - - if ((wnaf_entries[i + 1] & 0xffffffU) == 0) { - x_alpha = ladder[i + 1].one.x; - } else { - x_alpha = ladder[i + 1].three.x; - } - round_quad.q_x_1 = ladder[i + 1].q_x_1; - round_quad.q_x_2 = ladder[i + 1].q_x_2; - round_quad.q_y_1 = ladder[i + 1].q_y_1; - round_quad.q_y_2 = ladder[i + 1].q_y_2; - - if (i > 0) { - composer.create_fixed_group_add_gate(round_quad); - } else { - composer.create_fixed_group_add_gate_with_init(round_quad, init_quad); - } - } - - add_quad add_quad{ composer.add_variable(multiplication_transcript[2].x), - composer.add_variable(multiplication_transcript[2].y), - composer.add_variable(x_alpha), - composer.add_variable(accumulator_transcript[2]), - fr::zero(), - fr::zero(), - fr::zero(), - fr::zero(), - fr::zero() }; - composer.create_fixed_group_add_gate_final(add_quad); - plonk::Prover prover = composer.create_prover(); - - plonk::Verifier verifier = composer.create_verifier(); - - plonk::proof proof = prover.construct_proof(); - - bool result = verifier.verify_proof(proof); - - EXPECT_EQ(result, true); -} diff --git a/cpp/src/barretenberg/plonk/composer/standard_composer.cpp b/cpp/src/barretenberg/plonk/composer/standard_composer.cpp index 56b2c28206..dfa084e79f 100644 --- a/cpp/src/barretenberg/plonk/composer/standard_composer.cpp +++ b/cpp/src/barretenberg/plonk/composer/standard_composer.cpp @@ -257,246 +257,6 @@ void StandardComposer::create_poly_gate(const poly_triple& in) ++num_gates; } -void StandardComposer::create_fixed_group_add_gate_with_init(const fixed_group_add_quad& in, - const fixed_group_init_quad& init) -{ - uint32_t x_0_idx = in.a; - uint32_t y_0_idx = in.b; - uint32_t x_alpha_idx = in.c; - uint32_t a_0_idx = in.d; - - fr x_alpha = get_variable(x_alpha_idx); - fr a_0 = get_variable(a_0_idx); - - // weird names here follow the Turbo notation - fr q_4 = init.q_x_1; - fr q_5 = init.q_x_2; - fr q_m = init.q_y_1; - fr q_c = init.q_y_2; - - // We will think of s = 1-a_0 as an auxiliary "switch" which is equal to either -x_alpha or 0 - // during the initialization step, but we will not add this variable to the composer for reasons of efficiency. - - // (ɑ^4 identity) impose 1-a_0 = 0 or -x_alpha - // // first check formula for sx_alpha - fr sx_alpha = (fr(1) - a_0) * x_alpha; - uint32_t sx_alpha_idx = add_variable(sx_alpha); - create_poly_gate( - { .a = a_0_idx, .b = x_alpha_idx, .c = sx_alpha_idx, .q_m = 1, .q_l = 0, .q_r = -1, .q_o = 1, .q_c = 0 }); - - // // now add the desired constraint on sx_alpha - // // s(s + x_alpha) = s*s + s*x_alpha = 0 - create_poly_gate( - { .a = a_0_idx, .b = a_0_idx, .c = sx_alpha_idx, .q_m = 1, .q_l = -2, .q_r = 0, .q_o = 1, .q_c = 1 }); - - // (ɑ^5 identity) - create_poly_gate( - { .a = x_0_idx, .b = x_alpha_idx, .c = a_0_idx, .q_m = -1, .q_l = 0, .q_r = q_4, .q_o = -q_5, .q_c = q_5 }); - - // (ɑ^6 identity) - create_poly_gate( - { .a = y_0_idx, .b = x_alpha_idx, .c = a_0_idx, .q_m = -1, .q_l = 0, .q_r = q_m, .q_o = -q_c, .q_c = q_c }); - - // There is no previous add quad. - previous_add_quad = in; -} - -void StandardComposer::create_fixed_group_add_gate(const fixed_group_add_quad& in) -{ - assert_valid_variables({ in.a, in.b, in.c, in.d }); - - auto row_1 = previous_add_quad; - auto row_2 = in; - previous_add_quad = in; - - fr a_1 = get_variable(row_1.d); - fr a_2 = get_variable(row_2.d); - fr x_1 = get_variable(row_1.a); - fr y_1 = get_variable(row_1.b); - fr x_2 = get_variable(row_2.a); - fr y_2 = get_variable(row_2.b); - fr x_alpha = get_variable(row_2.c); - - fr q_x_alpha_1 = row_1.q_x_1; - fr q_x_alpha_2 = row_1.q_x_2; - fr q_y_alpha_1 = row_1.q_y_1; - fr q_y_alpha_2 = row_1.q_y_2; - - uint32_t a_1_idx = row_1.d; - uint32_t a_2_idx = row_2.d; - uint32_t x_1_idx = row_1.a; - uint32_t y_1_idx = row_1.b; - uint32_t x_2_idx = row_2.a; - uint32_t y_2_idx = row_2.b; - uint32_t x_alpha_idx = row_2.c; - - // add variable δ = a_2 - 4a_1 - fr delta = a_2 - (a_1 + a_1 + a_1 + a_1); - uint32_t delta_idx = add_variable(delta); - create_add_gate({ .a = a_2_idx, - .b = a_1_idx, - .c = delta_idx, - .a_scaling = 1, - .b_scaling = -4, - .c_scaling = -1, - .const_scaling = 0 }); - - // constraint: (δ + 3)(δ + 1)(δ - 1)(δ - 3) - // (δ + 3)(δ + 1)(δ - 1)(δ - 3) = (δ^2 - 9)(δ^2 - 1)=0 - // // first: (δ^2 - δ_sqr = 0) - fr delta_sqr = delta * delta; - uint32_t delta_sqr_idx = add_variable(delta_sqr); - create_mul_gate( - { .a = delta_idx, .b = delta_idx, .c = delta_sqr_idx, .mul_scaling = 1, .c_scaling = -1, .const_scaling = 0 }); - // // next (δ^2 - 9)( δ^2 - 1) = δ^2*δ^2 - 10 * δ^2 + 9 = 0 - create_mul_gate({ .a = delta_sqr_idx, - .b = delta_sqr_idx, - .c = delta_sqr_idx, - .mul_scaling = 1, - .c_scaling = -10, - .const_scaling = 9 }); - - // validate correctness of x_ɑ - // constraint: (δ^2) * q_x_ɑ,1 + q_x_ɑ,2 - x,ɑ = 0 - create_add_gate({ .a = delta_sqr_idx, - .b = x_alpha_idx, - .c = zero_idx, - .a_scaling = q_x_alpha_1, - .b_scaling = -1, - .c_scaling = 0, - .const_scaling = q_x_alpha_2 }); - - // compute y_alpha using lookup formula, instantiate as witness and validate - fr y_alpha = (x_alpha * q_y_alpha_1 + q_y_alpha_2) * delta; - uint32_t y_alpha_idx = add_variable(y_alpha); - create_poly_gate({ .a = delta_idx, - .b = x_alpha_idx, - .c = y_alpha_idx, - .q_m = q_y_alpha_1, - .q_l = q_y_alpha_2, - .q_r = 0, - .q_o = -1, - .q_c = 0 }); - - // show that (x_1, y_1) + (x_ɑ, y_ɑ) = (x_2, y_2) in 11 gates - // // 4 gates to compute commonly used expressions - // // // 2 differences: - fr diff_x_alpha_x_1 = x_alpha - x_1; - uint32_t diff_x_alpha_x_1_idx = add_variable(diff_x_alpha_x_1); - create_add_gate({ .a = diff_x_alpha_x_1_idx, - .b = x_1_idx, - .c = x_alpha_idx, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - fr diff_y_alpha_y_1 = y_alpha - y_1; - uint32_t diff_y_alpha_y_1_idx = add_variable(diff_y_alpha_y_1); - create_add_gate({ .a = diff_y_alpha_y_1_idx, - .b = y_1_idx, - .c = y_alpha_idx, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - // // // now the squares of these 2 differences - fr diff_x_alpha_x_1_sqr = diff_x_alpha_x_1 * diff_x_alpha_x_1; - uint32_t diff_x_alpha_x_1_sqr_idx = add_variable(diff_x_alpha_x_1_sqr); - create_mul_gate({ .a = diff_x_alpha_x_1_idx, - .b = diff_x_alpha_x_1_idx, - .c = diff_x_alpha_x_1_sqr_idx, - .mul_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - fr diff_y_alpha_y_1_sqr = diff_y_alpha_y_1 * diff_y_alpha_y_1; - uint32_t diff_y_alpha_y_1_sqr_idx = add_variable(diff_y_alpha_y_1_sqr); - create_mul_gate({ .a = diff_y_alpha_y_1_idx, - .b = diff_y_alpha_y_1_idx, - .c = diff_y_alpha_y_1_sqr_idx, - .mul_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - // // 3 gates to build identity for x_2 - // // // compute x_2 + x_ɑ + x_1 using 2 poly_gates via create_big_add_gate - fr sum_x_1_2_alpha = x_2 + x_alpha + x_1; - uint32_t sum_x_1_2_alpha_idx = add_variable(sum_x_1_2_alpha); - create_big_add_gate({ .a = x_2_idx, - .b = x_alpha_idx, - .c = x_1_idx, - .d = sum_x_1_2_alpha_idx, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = 1, - .d_scaling = -1, - .const_scaling = 0 }); - - // // // constraint: identity for x_2 - create_poly_gate({ .a = sum_x_1_2_alpha_idx, - .b = diff_x_alpha_x_1_sqr_idx, - .c = diff_y_alpha_y_1_sqr_idx, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0 }); - - // // 4 gates to build identity for y_2: - // // // 3 auxiliary - fr sum_y_1_y_2 = y_1 + y_2; - uint32_t sum_y_1_y_2_idx = add_variable(sum_y_1_y_2); - create_add_gate({ .a = y_1_idx, - .b = y_2_idx, - .c = sum_y_1_y_2_idx, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - fr diff_x_1_x_2 = x_1 - x_2; - uint32_t diff_x_1_x_2_idx = add_variable(diff_x_1_x_2); - create_add_gate({ .a = diff_x_1_x_2_idx, - .b = x_2_idx, - .c = x_1_idx, - .a_scaling = 1, - .b_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - fr prod_y_diff_x_diff = diff_y_alpha_y_1 * diff_x_1_x_2; - uint32_t prod_y_diff_x_diff_idx = add_variable(prod_y_diff_x_diff); - create_mul_gate({ .a = diff_y_alpha_y_1_idx, - .b = diff_x_1_x_2_idx, - .c = prod_y_diff_x_diff_idx, - .mul_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); - - // // // identity for y_2 - create_mul_gate({ .a = sum_y_1_y_2_idx, - .b = diff_x_alpha_x_1_idx, - .c = prod_y_diff_x_diff_idx, - .mul_scaling = 1, - .c_scaling = -1, - .const_scaling = 0 }); -} - -void StandardComposer::create_fixed_group_add_gate_final(const add_quad& in) -{ - fixed_group_add_quad final_round_quad{ .a = in.a, - .b = in.b, - .c = in.c, - .d = in.d, - .q_x_1 = fr::zero(), - .q_x_2 = fr::zero(), - .q_y_1 = fr::zero(), - .q_y_2 = fr::zero() }; - create_fixed_group_add_gate(final_round_quad); -} - std::vector StandardComposer::decompose_into_base4_accumulators(const uint32_t witness_index, const size_t num_bits, std::string const& msg) diff --git a/cpp/src/barretenberg/plonk/composer/standard_composer.hpp b/cpp/src/barretenberg/plonk/composer/standard_composer.hpp index a621f9840f..20f490e22c 100644 --- a/cpp/src/barretenberg/plonk/composer/standard_composer.hpp +++ b/cpp/src/barretenberg/plonk/composer/standard_composer.hpp @@ -97,11 +97,6 @@ class StandardComposer : public ComposerBase { void create_big_add_gate_with_bit_extraction(const add_quad& in); void create_big_mul_gate(const mul_quad& in); void create_balanced_add_gate(const add_quad& in); - void create_fixed_group_add_gate(const fixed_group_add_quad& in); - void create_fixed_group_add_gate_with_init(const fixed_group_add_quad& in, const fixed_group_init_quad& init); - void create_fixed_group_add_gate_final(const add_quad& in); - - fixed_group_add_quad previous_add_quad; void fix_witness(const uint32_t witness_index, const barretenberg::fr& witness_value); diff --git a/cpp/src/barretenberg/plonk/composer/standard_composer.test.cpp b/cpp/src/barretenberg/plonk/composer/standard_composer.test.cpp index f1894b941b..632b40da09 100644 --- a/cpp/src/barretenberg/plonk/composer/standard_composer.test.cpp +++ b/cpp/src/barretenberg/plonk/composer/standard_composer.test.cpp @@ -540,183 +540,4 @@ TEST(standard_composer, test_check_circuit_broken) bool result = composer.check_circuit(); EXPECT_EQ(result, false); } - -TEST(standard_composer, test_fixed_group_add_gate_with_init) -{ - StandardComposer composer = StandardComposer(); - auto gen_data = crypto::pedersen::get_generator_data({ 0, 0 }); - - // 1. generate two origin points P, Q - // 2. derive gate constant values from P, Q - // 3. instantiate P as accumulator - // 4. generate accumulator initial value 1 and instantiate as circuit variable - // 5. use the above to call `create_fixed_group_add_gate_with_init` - // 6. validate proof passes - constexpr size_t num_bits = 63; - constexpr size_t initial_exponent = ((num_bits & 1) == 1) ? num_bits - 1 : num_bits; - - const crypto::pedersen::fixed_base_ladder* ladder = gen_data.get_ladder(num_bits); - grumpkin::g1::affine_element generator = gen_data.aux_generator; - - grumpkin::g1::element origin_points[2]; - origin_points[0] = grumpkin::g1::element(ladder[0].one); // this is P - origin_points[1] = origin_points[0] + generator; - origin_points[1] = origin_points[1].normalize(); // this is Q - - fr accumulator_offset = (fr::one() + fr::one()).pow(static_cast(initial_exponent)).invert(); - fr origin_accumulators[2]{ fr::one(), accumulator_offset + fr::one() }; - - for (size_t i = 0; i < 2; ++i) { - fr starting_accumulator = origin_accumulators[i]; // skew = 0 - - fixed_group_init_quad init_quad{ origin_points[0].x, - (origin_points[0].x - origin_points[1].x), - origin_points[0].y, - (origin_points[0].y - origin_points[1].y) }; - - fixed_group_add_quad round_quad{ - .a = composer.add_variable(origin_points[i].x), - .b = composer.add_variable(origin_points[i].y), - .c = composer.add_variable(accumulator_offset), - .d = composer.add_variable(starting_accumulator), - .q_x_1 = 0, - .q_x_2 = 0, - .q_y_1 = 0, - .q_y_2 = 0, - }; - composer.create_fixed_group_add_gate_with_init(round_quad, init_quad); - } - auto prover = composer.create_prover(); - - auto verifier = composer.create_verifier(); - - proof proof = prover.construct_proof(); - - bool result = verifier.verify_proof(proof); - - EXPECT_EQ(result, true); -} - -TEST(standard_composer, test_fixed_group_add_gate) -{ - auto composer = StandardComposer(); - auto gen_data = crypto::pedersen::get_generator_data({ 0, 0 }); - - constexpr size_t num_bits = 63; - constexpr size_t num_quads_base = (num_bits - 1) >> 1; - constexpr size_t num_quads = ((num_quads_base << 1) + 1 < num_bits) ? num_quads_base + 1 : num_quads_base; - constexpr size_t num_wnaf_bits = (num_quads << 1) + 1; - constexpr size_t initial_exponent = ((num_bits & 1) == 1) ? num_bits - 1 : num_bits; - constexpr uint64_t bit_mask = (1ULL << num_bits) - 1UL; - const crypto::pedersen::fixed_base_ladder* ladder = gen_data.get_hash_ladder(num_bits); - grumpkin::g1::affine_element generator = gen_data.aux_generator; // also passes with aux_generator? - - grumpkin::g1::element origin_points[2]; - origin_points[0] = grumpkin::g1::element(ladder[0].one); - origin_points[1] = origin_points[0] + generator; - origin_points[1] = origin_points[1].normalize(); - - grumpkin::fr scalar_multiplier_entropy = grumpkin::fr::random_element(); - grumpkin::fr scalar_multiplier_base{ scalar_multiplier_entropy.data[0] & bit_mask, 0, 0, 0 }; - scalar_multiplier_base.data[0] = scalar_multiplier_base.data[0] & (~1ULL); - - uint64_t wnaf_entries[num_quads + 1] = { 0 }; - if ((scalar_multiplier_base.data[0] & 1) == 0) { - scalar_multiplier_base.data[0] -= 2; - } - bool skew = false; - barretenberg::wnaf::fixed_wnaf(&scalar_multiplier_base.data[0], &wnaf_entries[0], skew, 0); - - fr accumulator_offset = (fr::one() + fr::one()).pow(static_cast(initial_exponent)).invert(); - fr origin_accumulators[2]{ fr::one(), accumulator_offset + fr::one() }; - - grumpkin::g1::element* multiplication_transcript = - static_cast(aligned_alloc(64, sizeof(grumpkin::g1::element) * (num_quads + 1))); - fr* accumulator_transcript = static_cast(aligned_alloc(64, sizeof(fr) * (num_quads + 1))); - - if (skew) { - multiplication_transcript[0] = origin_points[1]; - accumulator_transcript[0] = origin_accumulators[1]; - } else { - multiplication_transcript[0] = origin_points[0]; - accumulator_transcript[0] = origin_accumulators[0]; - } - - fr one = fr::one(); - fr three = ((one + one) + one); - for (size_t i = 0; i < num_quads; ++i) { - uint64_t entry = wnaf_entries[i + 1] & crypto::pedersen::WNAF_MASK; - fr prev_accumulator = accumulator_transcript[i] + accumulator_transcript[i]; - prev_accumulator = prev_accumulator + prev_accumulator; - - grumpkin::g1::affine_element point_to_add = (entry == 1) ? ladder[i + 1].three : ladder[i + 1].one; - fr scalar_to_add = (entry == 1) ? three : one; - uint64_t predicate = (wnaf_entries[i + 1] >> 31U) & 1U; - if (predicate) { - point_to_add = -point_to_add; - scalar_to_add.self_neg(); - } - accumulator_transcript[i + 1] = prev_accumulator + scalar_to_add; - multiplication_transcript[i + 1] = multiplication_transcript[i] + point_to_add; - } - grumpkin::g1::element::batch_normalize(&multiplication_transcript[0], num_quads + 1); - - fixed_group_init_quad init_quad{ origin_points[0].x, - (origin_points[0].x - origin_points[1].x), - origin_points[0].y, - (origin_points[0].y - origin_points[1].y) }; - - fr x_alpha = accumulator_offset; - for (size_t i = 0; i < 2; ++i) { - fixed_group_add_quad round_quad; - round_quad.d = composer.add_variable(accumulator_transcript[i]); - round_quad.a = composer.add_variable(multiplication_transcript[i].x); - round_quad.b = composer.add_variable(multiplication_transcript[i].y); - - if (i == 0) { - // we need to ensure that the first value of x_alpha is a defined constant. - // However, repeated applications of the pedersen hash will use the same constant value. - // `put_constant_variable` will create a gate that fixes the value of x_alpha, but only once - round_quad.c = composer.put_constant_variable(x_alpha); - } else { - round_quad.c = composer.add_variable(x_alpha); - } - - if ((wnaf_entries[i + 1] & 0xffffffU) == 0) { - x_alpha = ladder[i + 1].one.x; - } else { - x_alpha = ladder[i + 1].three.x; - } - round_quad.q_x_1 = ladder[i + 1].q_x_1; - round_quad.q_x_2 = ladder[i + 1].q_x_2; - round_quad.q_y_1 = ladder[i + 1].q_y_1; - round_quad.q_y_2 = ladder[i + 1].q_y_2; - - if (i > 0) { - composer.create_fixed_group_add_gate(round_quad); - } else { - composer.create_fixed_group_add_gate_with_init(round_quad, init_quad); - } - } - - add_quad add_quad{ composer.add_variable(multiplication_transcript[2].x), - composer.add_variable(multiplication_transcript[2].y), - composer.add_variable(x_alpha), - composer.add_variable(accumulator_transcript[2]), - fr::zero(), - fr::zero(), - fr::zero(), - fr::zero(), - fr::zero() }; - composer.create_fixed_group_add_gate_final(add_quad); - auto prover = composer.create_prover(); - - auto verifier = composer.create_verifier(); - - proof proof = prover.construct_proof(); - - bool result = verifier.verify_proof(proof); - - EXPECT_EQ(result, true); -} -} // namespace plonk +} // namespace plonk \ No newline at end of file diff --git a/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp b/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp index c6bbf7cbae..10141371bb 100644 --- a/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp +++ b/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp @@ -39,7 +39,6 @@ namespace plonk { auto& q_3 = selectors[UltraSelectors::Q3]; \ auto& q_4 = selectors[UltraSelectors::Q4]; \ auto& q_arith = selectors[UltraSelectors::QARITH]; \ - auto& q_fixed_base = selectors[UltraSelectors::QFIXED]; \ auto& q_sort = selectors[UltraSelectors::QSORT]; \ auto& q_elliptic = selectors[UltraSelectors::QELLIPTIC]; \ auto& q_aux = selectors[UltraSelectors::QAUX]; \ @@ -48,9 +47,9 @@ namespace plonk { std::vector ultra_selector_properties() { std::vector result{ - { "q_m", true }, { "q_c", true }, { "q_1", true }, { "q_2", true }, - { "q_3", true }, { "q_4", false }, { "q_arith", false }, { "q_fixed_base", false }, - { "q_sort", false }, { "q_elliptic", false }, { "q_aux", false }, { "table_type", true }, + { "q_m", true }, { "q_c", true }, { "q_1", true }, { "q_2", true }, + { "q_3", true }, { "q_4", false }, { "q_arith", false }, { "q_sort", false }, + { "q_elliptic", false }, { "q_aux", false }, { "table_type", true }, }; return result; } @@ -108,7 +107,6 @@ void UltraComposer::create_add_gate(const add_triple& in) q_c.emplace_back(in.const_scaling); q_arith.emplace_back(1); q_4.emplace_back(0); - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); @@ -140,7 +138,6 @@ void UltraComposer::create_big_add_gate(const add_quad& in, const bool include_n q_c.emplace_back(in.const_scaling); q_arith.emplace_back(include_next_gate_w_4 ? 2 : 1); q_4.emplace_back(in.d_scaling); - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); @@ -233,7 +230,6 @@ void UltraComposer::create_big_mul_gate(const mul_quad& in) q_c.emplace_back(in.const_scaling); q_arith.emplace_back(1); q_4.emplace_back(in.d_scaling); - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); @@ -259,7 +255,6 @@ void UltraComposer::create_balanced_add_gate(const add_quad& in) q_c.emplace_back(in.const_scaling); q_arith.emplace_back(1); q_4.emplace_back(in.d_scaling); - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); @@ -301,7 +296,6 @@ void UltraComposer::create_mul_gate(const mul_triple& in) q_c.emplace_back(in.const_scaling); q_arith.emplace_back(1); q_4.emplace_back(0); - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); @@ -331,7 +325,6 @@ void UltraComposer::create_bool_gate(const uint32_t variable_index) q_arith.emplace_back(1); q_4.emplace_back(0); - q_fixed_base.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); q_aux.emplace_back(0); @@ -361,75 +354,12 @@ void UltraComposer::create_poly_gate(const poly_triple& in) q_arith.emplace_back(1); q_4.emplace_back(0); - q_fixed_base.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); q_aux.emplace_back(0); ++num_gates; } -// adds a grumpkin point, from a 2-bit lookup table, into an accumulator point -void UltraComposer::create_fixed_group_add_gate(const fixed_group_add_quad& in) -{ - ULTRA_SELECTOR_REFS - assert_valid_variables({ in.a, in.b, in.c, in.d }); - - w_l.emplace_back(in.a); - w_r.emplace_back(in.b); - w_o.emplace_back(in.c); - w_4.emplace_back(in.d); - - q_1.emplace_back(in.q_x_1); - q_2.emplace_back(in.q_x_2); - q_3.emplace_back(in.q_y_1); - q_fixed_base.emplace_back(in.q_y_2); - - q_arith.emplace_back(0); - q_4.emplace_back(0); - q_m.emplace_back(0); - q_c.emplace_back(0); - q_lookup_type.emplace_back(0); - q_sort.emplace_back(0); - q_elliptic.emplace_back(0); - q_aux.emplace_back(0); - ++num_gates; -} - -// adds a grumpkin point into an accumulator, while also initializing the accumulator -void UltraComposer::create_fixed_group_add_gate_with_init(const fixed_group_add_quad& in, - const fixed_group_init_quad& init) -{ - ULTRA_SELECTOR_REFS - assert_valid_variables({ in.a, in.b, in.c, in.d }); - - w_l.emplace_back(in.a); - w_r.emplace_back(in.b); - w_o.emplace_back(in.c); - w_4.emplace_back(in.d); - - // Initialization differs slightly with that in TurboComposer. - q_m.emplace_back(init.q_y_1); - q_c.emplace_back(init.q_y_2); - - q_1.emplace_back(in.q_x_1); - q_2.emplace_back(in.q_x_2); - q_3.emplace_back(in.q_y_1); - q_fixed_base.emplace_back(in.q_y_2); - - q_4.emplace_back(0); - q_aux.emplace_back(0); - q_arith.emplace_back(0); - q_lookup_type.emplace_back(0); - q_sort.emplace_back(0); - q_elliptic.emplace_back(0); - - ++num_gates; -} - -void UltraComposer::create_fixed_group_add_gate_final(const add_quad& in) -{ - create_big_add_gate(in); -} /** * @brief Create an elliptic curve addition gate * @@ -481,7 +411,6 @@ void UltraComposer::create_ecc_add_gate(const ecc_add_gate& in) q_2.emplace_back(0); q_m.emplace_back(0); q_c.emplace_back(0); - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(1); @@ -500,7 +429,6 @@ void UltraComposer::create_ecc_add_gate(const ecc_add_gate& in) q_c.emplace_back(0); q_arith.emplace_back(0); q_4.emplace_back(0); - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); @@ -530,7 +458,6 @@ void UltraComposer::fix_witness(const uint32_t witness_index, const barretenberg q_c.emplace_back(-witness_value); q_arith.emplace_back(1); q_4.emplace_back(0); - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); @@ -612,7 +539,6 @@ std::shared_ptr UltraComposer::compute_proving_key() ASSERT(num_gates == q_3.size()); ASSERT(num_gates == q_4.size()); ASSERT(num_gates == q_arith.size()); - ASSERT(num_gates == q_fixed_base.size()); ASSERT(num_gates == q_elliptic.size()); ASSERT(num_gates == q_sort.size()); ASSERT(num_gates == q_lookup_type.size()); @@ -886,9 +812,6 @@ UltraProver UltraComposer::create_prover() std::unique_ptr> arithmetic_widget = std::make_unique>(circuit_proving_key.get()); - std::unique_ptr> fixed_base_widget = - std::make_unique>(circuit_proving_key.get()); - std::unique_ptr> sort_widget = std::make_unique>(circuit_proving_key.get()); @@ -902,7 +825,6 @@ UltraProver UltraComposer::create_prover() output_state.random_widgets.emplace_back(std::move(plookup_widget)); output_state.transition_widgets.emplace_back(std::move(arithmetic_widget)); - output_state.transition_widgets.emplace_back(std::move(fixed_base_widget)); output_state.transition_widgets.emplace_back(std::move(sort_widget)); output_state.transition_widgets.emplace_back(std::move(elliptic_widget)); output_state.transition_widgets.emplace_back(std::move(auxiliary_widget)); @@ -934,9 +856,6 @@ UltraToStandardProver UltraComposer::create_ultra_to_standard_prover() std::unique_ptr> arithmetic_widget = std::make_unique>(circuit_proving_key.get()); - std::unique_ptr> fixed_base_widget = - std::make_unique>(circuit_proving_key.get()); - std::unique_ptr> sort_widget = std::make_unique>(circuit_proving_key.get()); @@ -950,7 +869,6 @@ UltraToStandardProver UltraComposer::create_ultra_to_standard_prover() output_state.random_widgets.emplace_back(std::move(plookup_widget)); output_state.transition_widgets.emplace_back(std::move(arithmetic_widget)); - output_state.transition_widgets.emplace_back(std::move(fixed_base_widget)); output_state.transition_widgets.emplace_back(std::move(sort_widget)); output_state.transition_widgets.emplace_back(std::move(elliptic_widget)); output_state.transition_widgets.emplace_back(std::move(auxiliary_widget)); @@ -1061,7 +979,6 @@ plookup::ReadData UltraComposer::create_gates_from_plookup_accumulator q_c.emplace_back((i == (num_lookups - 1) ? 0 : -multi_table.column_3_step_sizes[i + 1])); q_arith.emplace_back(0); q_4.emplace_back(0); - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_elliptic.emplace_back(0); q_aux.emplace_back(0); @@ -1219,9 +1136,15 @@ std::vector UltraComposer::decompose_into_default_range(const uint32_t * @param variable_index * @param target_range */ -void UltraComposer::create_new_range_constraint(const uint32_t variable_index, const uint64_t target_range) +void UltraComposer::create_new_range_constraint(const uint32_t variable_index, + const uint64_t target_range, + std::string const msg) { - ASSERT(target_range != 0); + if (uint256_t(get_variable(variable_index)).data[0] > target_range) { + if (!failed()) { + failure(msg); + } + } if (range_lists.count(target_range) == 0) { range_lists.insert({ target_range, create_range_list(target_range) }); } @@ -1310,7 +1233,6 @@ void UltraComposer::create_sort_constraint(const std::vector& variable q_c.emplace_back(0); q_arith.emplace_back(0); q_4.emplace_back(0); - q_fixed_base.emplace_back(0); q_sort.emplace_back(1); q_elliptic.emplace_back(0); q_lookup_type.emplace_back(0); @@ -1329,7 +1251,6 @@ void UltraComposer::create_sort_constraint(const std::vector& variable q_c.emplace_back(0); q_arith.emplace_back(0); q_4.emplace_back(0); - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_elliptic.emplace_back(0); q_lookup_type.emplace_back(0); @@ -1363,7 +1284,6 @@ void UltraComposer::create_dummy_constraints(const std::vector& variab q_c.emplace_back(0); q_arith.emplace_back(0); q_4.emplace_back(0); - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_elliptic.emplace_back(0); q_lookup_type.emplace_back(0); @@ -1395,7 +1315,6 @@ void UltraComposer::create_sort_constraint_with_edges(const std::vector UltraComposer::decompose_into_default_range_better_for_odd void UltraComposer::apply_aux_selectors(const AUX_SELECTORS type) { ULTRA_SELECTOR_REFS; - q_fixed_base.emplace_back(0); q_aux.emplace_back(type == AUX_SELECTORS::NONE ? 0 : 1); q_sort.emplace_back(0); q_lookup_type.emplace_back(0); @@ -2182,7 +2097,6 @@ std::array UltraComposer::evaluate_non_native_field_addition( q_arith.emplace_back(1); for (size_t i = 0; i < 4; ++i) { - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); @@ -2309,7 +2223,6 @@ std::array UltraComposer::evaluate_non_native_field_subtraction( q_arith.emplace_back(1); for (size_t i = 0; i < 4; ++i) { - q_fixed_base.emplace_back(0); q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); @@ -2846,6 +2759,7 @@ void UltraComposer::process_RAM_array(const size_t ram_id, const size_t gate_off std::vector timestamp_deltas; for (size_t i = 0; i < ram_array.records.size() - 1; ++i) { + // create_RAM_timestamp_gate(sorted_records[i], sorted_records[i + 1]) const auto& current = ram_array.records[i]; const auto& next = ram_array.records[i + 1]; diff --git a/cpp/src/barretenberg/plonk/composer/ultra_composer.hpp b/cpp/src/barretenberg/plonk/composer/ultra_composer.hpp index 967a351bc5..599e9e921b 100644 --- a/cpp/src/barretenberg/plonk/composer/ultra_composer.hpp +++ b/cpp/src/barretenberg/plonk/composer/ultra_composer.hpp @@ -11,7 +11,7 @@ class UltraComposer : public ComposerBase { public: static constexpr ComposerType type = ComposerType::PLOOKUP; - static constexpr MerkleHashType merkle_hash_type = MerkleHashType::FIXED_BASE_PEDERSEN; + static constexpr MerkleHashType merkle_hash_type = MerkleHashType::LOOKUP_PEDERSEN; static constexpr size_t NUM_RESERVED_GATES = 4; // This must be >= num_roots_cut_out_of_vanishing_polynomial // See the comment in plonk/proof_system/prover/prover.cpp // ProverBase::compute_quotient_commitments() for why 4 exactly. @@ -24,6 +24,8 @@ class UltraComposer : public ComposerBase { static constexpr size_t DEFAULT_PLOOKUP_RANGE_SIZE = (1 << DEFAULT_PLOOKUP_RANGE_BITNUM) - 1; static constexpr size_t DEFAULT_NON_NATIVE_FIELD_LIMB_BITS = 68; static constexpr uint32_t UNINITIALIZED_MEMORY_RECORD = UINT32_MAX; + static constexpr size_t NUMBER_OF_GATES_PER_RAM_ACCESS = 2; + static constexpr size_t NUMBER_OF_ARITHMETIC_GATES_PER_RAM_ARRAY = 1; struct non_native_field_witnesses { // first 4 array elements = limbs @@ -105,20 +107,16 @@ class UltraComposer : public ComposerBase { * */ struct RamTranscript { - // Represents the current state of the array. Elements are variable indices. - // Every update requires a new entry in the `records` vector below. + // Contains the value of each index of the array std::vector state; // A vector of records, each of which contains: - // - Witnesses for [index, timestamp, value, record] - // (record is initialized during the proof creation, and points to 0 until then) - // - Index of the element in the `state` vector - // - READ/WRITE flag - // - Real timestamp value, initialized to the current `access_count` + // + The constant witness with the index + // + The value in the memory slot + // + The actual index value std::vector records; // used for RAM records, to compute the timestamp when performing a read/write - // Incremented at every init/read/write operation. size_t access_count = 0; }; @@ -139,7 +137,7 @@ class UltraComposer : public ComposerBase { std::vector records; }; - enum UltraSelectors { QM, QC, Q1, Q2, Q3, Q4, QARITH, QFIXED, QSORT, QELLIPTIC, QAUX, QLOOKUPTYPE, NUM }; + enum UltraSelectors { QM, QC, Q1, Q2, Q3, Q4, QARITH, QSORT, QELLIPTIC, QAUX, QLOOKUPTYPE, NUM }; UltraComposer(); UltraComposer(std::string const& crs_path, const size_t size_hint = 0); @@ -171,10 +169,6 @@ class UltraComposer : public ComposerBase { void create_mul_gate(const mul_triple& in) override; void create_bool_gate(const uint32_t a) override; void create_poly_gate(const poly_triple& in) override; - void create_fixed_group_add_gate(const fixed_group_add_quad& in); - void create_fixed_group_add_gate_with_init(const fixed_group_add_quad& in, const fixed_group_init_quad& init); - void create_fixed_group_add_gate_final(const add_quad& in); - void create_ecc_add_gate(const ecc_add_gate& in); void fix_witness(const uint32_t witness_index, const barretenberg::fr& witness_value); @@ -192,8 +186,10 @@ class UltraComposer : public ComposerBase { } } - void create_new_range_constraint(const uint32_t variable_index, const uint64_t target_range); - void create_range_constraint(const uint32_t variable_index, const size_t num_bits, std::string const&) + void create_new_range_constraint(const uint32_t variable_index, + const uint64_t target_range, + std::string const msg = "create_new_range_constraint"); + void create_range_constraint(const uint32_t variable_index, const size_t num_bits, std::string const& msg) { if (num_bits <= DEFAULT_PLOOKUP_RANGE_BITNUM) { /** @@ -205,9 +201,9 @@ class UltraComposer : public ComposerBase { * and throwing an error would require a refactor of the Composer to catelog all 'orphan' variables not * assigned to gates. **/ - create_new_range_constraint(variable_index, 1ULL << num_bits); + create_new_range_constraint(variable_index, 1ULL << num_bits, msg); } else { - decompose_into_default_range(variable_index, num_bits); + decompose_into_default_range(variable_index, num_bits, DEFAULT_PLOOKUP_RANGE_BITNUM, msg); } } @@ -241,9 +237,6 @@ class UltraComposer : public ComposerBase { size_t& ramcount) const { count = num_gates; - rangecount = 0; - romcount = 0; - ramcount = 0; // each ROM gate adds +1 extra gate due to the rom reads being copied to a sorted list set for (size_t i = 0; i < rom_arrays.size(); ++i) { for (size_t j = 0; j < rom_arrays[i].state.size(); ++j) { @@ -258,20 +251,24 @@ class UltraComposer : public ComposerBase { constexpr size_t gate_width = ultra_settings::program_width; // each RAM gate adds +2 extra gates due to the ram reads being copied to a sorted list set, // as well as an extra gate to validate timestamps + std::vector ram_timestamps; + std::vector ram_range_sizes; + std::vector ram_range_exists; for (size_t i = 0; i < ram_arrays.size(); ++i) { for (size_t j = 0; j < ram_arrays[i].state.size(); ++j) { if (ram_arrays[i].state[j] == UNINITIALIZED_MEMORY_RECORD) { - ramcount += 2; + ramcount += NUMBER_OF_GATES_PER_RAM_ACCESS; } } - ramcount += (ram_arrays[i].records.size() * 2); - ramcount += 1; // we add an addition gate after procesing a ram array + ramcount += (ram_arrays[i].records.size() * NUMBER_OF_GATES_PER_RAM_ACCESS); + ramcount += NUMBER_OF_ARITHMETIC_GATES_PER_RAM_ARRAY; // we add an addition gate after procesing a ram array // there will be 'max_timestamp' number of range checks, need to calculate. const auto max_timestamp = ram_arrays[i].access_count - 1; - // TODO: if a range check of length `max_timestamp` already exists, this will be innacurate! - // TODO: fix this + // if a range check of length `max_timestamp` already exists, we are double counting. + // We record `ram_timestamps` to detect and correct for this error when we process range lists. + ram_timestamps.push_back(max_timestamp); size_t padding = (gate_width - (max_timestamp % gate_width)) % gate_width; if (max_timestamp == gate_width) padding += gate_width; @@ -280,7 +277,9 @@ class UltraComposer : public ComposerBase { size_t ram_range_check_gate_count = (ram_range_check_list_size / gate_width); ram_range_check_gate_count += 1; // we need to add 1 extra addition gates for every distinct range list - ramcount += ram_range_check_gate_count; + ram_range_sizes.push_back(ram_range_check_gate_count); + ram_range_exists.push_back(false); + // rangecount += ram_range_check_gate_count; } for (const auto& list : range_lists) { auto list_size = list.second.variable_indices.size(); @@ -288,9 +287,21 @@ class UltraComposer : public ComposerBase { if (list.second.variable_indices.size() == gate_width) padding += gate_width; list_size += padding; + + for (size_t i = 0; i < ram_timestamps.size(); ++i) { + if (list.second.target_range == ram_timestamps[i]) { + ram_range_exists[i] = true; + } + } rangecount += (list_size / gate_width); rangecount += 1; // we need to add 1 extra addition gates for every distinct range list } + // update rangecount to include the ram range checks the composer will eventually be creating + for (size_t i = 0; i < ram_range_sizes.size(); ++i) { + if (!ram_range_exists[i]) { + rangecount += ram_range_sizes[i]; + } + } } /** @@ -318,33 +329,16 @@ class UltraComposer : public ComposerBase { virtual void print_num_gates() const override { - size_t count = 0; size_t rangecount = 0; size_t romcount = 0; size_t ramcount = 0; - size_t constant_rangecount = 0; - - size_t plookupcount = 0; get_num_gates_split_into_components(count, rangecount, romcount, ramcount); - for (const auto& table : lookup_tables) { - plookupcount += table.lookup_gates.size(); - count -= table.lookup_gates.size(); - } - - for (const auto& list : range_lists) { - // rough estimate - const auto constant_cost = static_cast(list.second.target_range / 6); - constant_rangecount += constant_cost; - rangecount -= constant_cost; - } - size_t total = count + romcount + rangecount; - std::cout << "gates = " << total << " (arith " << count << ", plookup " << plookupcount << ", rom " << romcount - << ", ram " << ramcount << romcount << ", range " << rangecount - << ", range table init cost = " << constant_rangecount << "), pubinp = " << public_inputs.size() - << std::endl; + size_t total = count + romcount + ramcount + rangecount; + std::cout << "gates = " << total << " (arith " << count << ", rom " << romcount << ", ram " << ramcount + << ", range " << rangecount << "), pubinp = " << public_inputs.size() << std::endl; } void assert_equal_constant(const uint32_t a_idx, @@ -508,7 +502,7 @@ class UltraComposer : public ComposerBase { std::vector ram_arrays; /** - * @brief Each entry in rom_arrays represents an independent ROM table. + * @brief Each entry in ram_arrays represents an independent ROM table. * RomTranscript tracks the current table state, * as well as the 'records' produced by each read operation. * Used in `compute_proving_key` to generate consistency check gates required to validate the ROM read history @@ -523,6 +517,17 @@ class UltraComposer : public ComposerBase { std::vector recursive_proof_public_input_indices; bool contains_recursive_proof = false; + /** + * Program Manifests + **/ + + /** + * @brief Create a manifest object + * + * @note UltraPlonk manifest does not use linearisation trick + * @param num_public_inputs + * @return transcript::Manifest + */ static transcript::Manifest create_manifest(const size_t num_public_inputs) { // add public inputs.... @@ -597,7 +602,6 @@ class UltraComposer : public ComposerBase { { "q_sort", fr_size, false, 14 }, // * { "q_elliptic", fr_size, false, 15 }, // * { "q_aux", fr_size, false, 16 }, - { "q_fixed_base", fr_size, false, 30 }, { "sigma_1", fr_size, false, 17 }, { "sigma_2", fr_size, false, 18 }, { "sigma_3", fr_size, false, 19 }, @@ -638,5 +642,22 @@ class UltraComposer : public ComposerBase { return output; } + + // @note 'unrolled' means "don't use linearisation techniques from the plonk paper". + /** + * @brief Create a unrolled manifest object + * + * @note UP rolled/unrolled manifests are the same. Difference between regulur && unrolled Prover/Verifier is that + * unrolled Prover/Verifier uses 16-byte challenges and a SNARK-friendly hash algorithm to generate challenges. + * (i.e. unrolled Prover/Verifier is used in recursive setting) + * + * TODO: remove linearisation trick entirely from barretenberg and relabel `unrolled` to `recursive`! + * @param num_public_inputs + * @return transcript::Manifest + */ + static transcript::Manifest create_unrolled_manifest(const size_t num_public_inputs) + { + return create_manifest(num_public_inputs); + } }; } // namespace plonk diff --git a/cpp/src/barretenberg/plonk/composer/ultra_composer.test.cpp b/cpp/src/barretenberg/plonk/composer/ultra_composer.test.cpp index 9755d4bb20..51e0838b5f 100644 --- a/cpp/src/barretenberg/plonk/composer/ultra_composer.test.cpp +++ b/cpp/src/barretenberg/plonk/composer/ultra_composer.test.cpp @@ -855,4 +855,5 @@ TEST(ultra_composer, ram) bool result = verifier.verify_proof(proof); EXPECT_EQ(result, true); } + } // namespace plonk diff --git a/cpp/src/barretenberg/plonk/proof_system/types/program_settings.hpp b/cpp/src/barretenberg/plonk/proof_system/types/program_settings.hpp index 16b086292c..8e4b072c67 100644 --- a/cpp/src/barretenberg/plonk/proof_system/types/program_settings.hpp +++ b/cpp/src/barretenberg/plonk/proof_system/types/program_settings.hpp @@ -120,7 +120,6 @@ class ultra_verifier_settings : public ultra_settings { typedef barretenberg::g1 g1; typedef transcript::StandardTranscript Transcript; typedef VerifierPlookupArithmeticWidget PlookupArithmeticWidget; - typedef VerifierUltraFixedBaseWidget UltraFixedBaseWidget; typedef VerifierGenPermSortWidget GenPermSortWidget; typedef VerifierTurboLogicWidget TurboLogicWidget; typedef VerifierPermutationWidget PermutationWidget; @@ -141,8 +140,6 @@ class ultra_verifier_settings : public ultra_settings { updated_alpha = PlookupWidget::append_scalar_multiplication_inputs(key, updated_alpha, transcript, scalars); updated_alpha = PlookupArithmeticWidget::append_scalar_multiplication_inputs(key, updated_alpha, transcript, scalars); - updated_alpha = - UltraFixedBaseWidget::append_scalar_multiplication_inputs(key, updated_alpha, transcript, scalars); updated_alpha = GenPermSortWidget::append_scalar_multiplication_inputs(key, updated_alpha, transcript, scalars); updated_alpha = EllipticWidget::append_scalar_multiplication_inputs(key, updated_alpha, transcript, scalars); updated_alpha = @@ -162,8 +159,6 @@ class ultra_verifier_settings : public ultra_settings { key, updated_alpha_base, transcript, quotient_numerator_eval); updated_alpha_base = PlookupArithmeticWidget::compute_quotient_evaluation_contribution( key, updated_alpha_base, transcript, quotient_numerator_eval); - updated_alpha_base = UltraFixedBaseWidget::compute_quotient_evaluation_contribution( - key, updated_alpha_base, transcript, quotient_numerator_eval); updated_alpha_base = GenPermSortWidget::compute_quotient_evaluation_contribution( key, updated_alpha_base, transcript, quotient_numerator_eval); updated_alpha_base = EllipticWidget::compute_quotient_evaluation_contribution( @@ -181,8 +176,6 @@ class ultra_to_standard_verifier_settings : public ultra_verifier_settings { public: typedef VerifierPlookupArithmeticWidget PlookupArithmeticWidget; - typedef VerifierUltraFixedBaseWidget - UltraFixedBaseWidget; typedef VerifierGenPermSortWidget GenPermSortWidget; typedef VerifierTurboLogicWidget TurboLogicWidget; typedef VerifierPermutationWidget PermutationWidget; diff --git a/cpp/src/barretenberg/plonk/proof_system/widgets/transition_widgets/plookup_arithmetic_widget.hpp b/cpp/src/barretenberg/plonk/proof_system/widgets/transition_widgets/plookup_arithmetic_widget.hpp index 85f27e0e28..bf6458fb6e 100644 --- a/cpp/src/barretenberg/plonk/proof_system/widgets/transition_widgets/plookup_arithmetic_widget.hpp +++ b/cpp/src/barretenberg/plonk/proof_system/widgets/transition_widgets/plookup_arithmetic_widget.hpp @@ -128,7 +128,7 @@ template class PlookupArith Getters::template get_value(polynomials, i); const Field& alpha_base = challenges.alpha_powers[0]; - const Field& alpha = challenges.alpha_powers[1]; + const Field& alpha = challenges.elements[ChallengeIndex::ALPHA]; // basic arithmetic gate identity // (w_1 . w_2 . q_m) + (w_1 . q_1) + (w_2 . q_2) + (w_3 . q_3) + (w_4 . q_4) + q_c = 0 diff --git a/cpp/src/barretenberg/proof_system/types/polynomial_manifest.hpp b/cpp/src/barretenberg/proof_system/types/polynomial_manifest.hpp index 1b3a80c915..0196173dd7 100644 --- a/cpp/src/barretenberg/proof_system/types/polynomial_manifest.hpp +++ b/cpp/src/barretenberg/proof_system/types/polynomial_manifest.hpp @@ -129,39 +129,38 @@ static constexpr PolynomialDescriptor turbo_polynomial_manifest[TURBO_MANIFEST_S PolynomialDescriptor("SIGMA_4", "sigma_4", false, PERMUTATION, SIGMA_4), // }; -static constexpr size_t ULTRA_MANIFEST_SIZE = 31; +static constexpr size_t ULTRA_MANIFEST_SIZE = 30; static constexpr PolynomialDescriptor ultra_polynomial_manifest[ULTRA_MANIFEST_SIZE]{ - PolynomialDescriptor("W_1", "w_1", true, WITNESS, W_1), // - PolynomialDescriptor("W_2", "w_2", true, WITNESS, W_2), // - PolynomialDescriptor("W_3", "w_3", true, WITNESS, W_3), // - PolynomialDescriptor("W_4", "w_4", true, WITNESS, W_4), // - PolynomialDescriptor("S", "s", true, WITNESS, S), // - PolynomialDescriptor("Z_PERM", "z_perm", true, WITNESS, Z), // - PolynomialDescriptor("Z_LOOKUP", "z_lookup", true, WITNESS, Z_LOOKUP), // - PolynomialDescriptor("Q_1", "q_1", false, SELECTOR, Q_1), // - PolynomialDescriptor("Q_2", "q_2", false, SELECTOR, Q_2), // - PolynomialDescriptor("Q_3", "q_3", false, SELECTOR, Q_3), // - PolynomialDescriptor("Q_4", "q_4", false, SELECTOR, Q_4), // - PolynomialDescriptor("Q_M", "q_m", false, SELECTOR, Q_M), // - PolynomialDescriptor("Q_C", "q_c", false, SELECTOR, Q_C), // - PolynomialDescriptor("Q_ARITHMETIC", "q_arith", false, SELECTOR, Q_ARITHMETIC), // - PolynomialDescriptor("Q_FIXED_BASE", "q_fixed_base", false, SELECTOR, Q_FIXED_BASE), // - PolynomialDescriptor("Q_SORT", "q_sort", false, SELECTOR, Q_SORT), // - PolynomialDescriptor("Q_ELLIPTIC", "q_elliptic", false, SELECTOR, Q_ELLIPTIC), // - PolynomialDescriptor("Q_AUX", "q_aux", false, SELECTOR, Q_AUX), // - PolynomialDescriptor("SIGMA_1", "sigma_1", false, PERMUTATION, SIGMA_1), // - PolynomialDescriptor("SIGMA_2", "sigma_2", false, PERMUTATION, SIGMA_2), // - PolynomialDescriptor("SIGMA_3", "sigma_3", false, PERMUTATION, SIGMA_3), // - PolynomialDescriptor("SIGMA_4", "sigma_4", false, PERMUTATION, SIGMA_4), // - PolynomialDescriptor("TABLE_1", "table_value_1", true, SELECTOR, TABLE_1), // - PolynomialDescriptor("TABLE_2", "table_value_2", true, SELECTOR, TABLE_2), // - PolynomialDescriptor("TABLE_3", "table_value_3", true, SELECTOR, TABLE_3), // - PolynomialDescriptor("TABLE_4", "table_value_4", true, SELECTOR, TABLE_4), // - PolynomialDescriptor("TABLE_TYPE", "table_type", false, SELECTOR, TABLE_TYPE), // - PolynomialDescriptor("ID_1", "id_1", false, PERMUTATION, ID_1), // - PolynomialDescriptor("ID_2", "id_2", false, PERMUTATION, ID_2), // - PolynomialDescriptor("ID_3", "id_3", false, PERMUTATION, ID_3), // - PolynomialDescriptor("ID_4", "id_4", false, PERMUTATION, ID_4), // + PolynomialDescriptor("W_1", "w_1", true, WITNESS, W_1), // + PolynomialDescriptor("W_2", "w_2", true, WITNESS, W_2), // + PolynomialDescriptor("W_3", "w_3", true, WITNESS, W_3), // + PolynomialDescriptor("W_4", "w_4", true, WITNESS, W_4), // + PolynomialDescriptor("S", "s", true, WITNESS, S), // + PolynomialDescriptor("Z_PERM", "z_perm", true, WITNESS, Z), // + PolynomialDescriptor("Z_LOOKUP", "z_lookup", true, WITNESS, Z_LOOKUP), // + PolynomialDescriptor("Q_1", "q_1", false, SELECTOR, Q_1), // + PolynomialDescriptor("Q_2", "q_2", false, SELECTOR, Q_2), // + PolynomialDescriptor("Q_3", "q_3", false, SELECTOR, Q_3), // + PolynomialDescriptor("Q_4", "q_4", false, SELECTOR, Q_4), // + PolynomialDescriptor("Q_M", "q_m", false, SELECTOR, Q_M), // + PolynomialDescriptor("Q_C", "q_c", false, SELECTOR, Q_C), // + PolynomialDescriptor("Q_ARITHMETIC", "q_arith", false, SELECTOR, Q_ARITHMETIC), // + PolynomialDescriptor("Q_SORT", "q_sort", false, SELECTOR, Q_SORT), // + PolynomialDescriptor("Q_ELLIPTIC", "q_elliptic", false, SELECTOR, Q_ELLIPTIC), // + PolynomialDescriptor("Q_AUX", "q_aux", false, SELECTOR, Q_AUX), // + PolynomialDescriptor("SIGMA_1", "sigma_1", false, PERMUTATION, SIGMA_1), // + PolynomialDescriptor("SIGMA_2", "sigma_2", false, PERMUTATION, SIGMA_2), // + PolynomialDescriptor("SIGMA_3", "sigma_3", false, PERMUTATION, SIGMA_3), // + PolynomialDescriptor("SIGMA_4", "sigma_4", false, PERMUTATION, SIGMA_4), // + PolynomialDescriptor("TABLE_1", "table_value_1", true, SELECTOR, TABLE_1), // + PolynomialDescriptor("TABLE_2", "table_value_2", true, SELECTOR, TABLE_2), // + PolynomialDescriptor("TABLE_3", "table_value_3", true, SELECTOR, TABLE_3), // + PolynomialDescriptor("TABLE_4", "table_value_4", true, SELECTOR, TABLE_4), // + PolynomialDescriptor("TABLE_TYPE", "table_type", false, SELECTOR, TABLE_TYPE), // + PolynomialDescriptor("ID_1", "id_1", false, PERMUTATION, ID_1), // + PolynomialDescriptor("ID_2", "id_2", false, PERMUTATION, ID_2), // + PolynomialDescriptor("ID_3", "id_3", false, PERMUTATION, ID_3), // + PolynomialDescriptor("ID_4", "id_4", false, PERMUTATION, ID_4), // }; // Simple class allowing for access to a polynomial manifest based on composer type diff --git a/cpp/src/barretenberg/proof_system/verification_key/sol_gen.hpp b/cpp/src/barretenberg/proof_system/verification_key/sol_gen.hpp index 03a9efdf72..096085b577 100644 --- a/cpp/src/barretenberg/proof_system/verification_key/sol_gen.hpp +++ b/cpp/src/barretenberg/proof_system/verification_key/sol_gen.hpp @@ -4,7 +4,9 @@ namespace bonk { * Write a solidity file containing the vk params to the given stream. * Uses StandardPlonk **/ -inline void output_vk_sol(std::ostream& os, std::shared_ptr const& key, std::string const& class_name) +inline void output_vk_sol_standard(std::ostream& os, + std::shared_ptr const& key, + std::string const& class_name) { const auto print_u256 = [&](const std::string& offset, const barretenberg::fr& element, const std::string& name) { os << " mstore(add(_vk, " << offset << "), " << element << ") // " << name << std::endl; @@ -57,4 +59,108 @@ inline void output_vk_sol(std::ostream& os, std::shared_ptr co os << std::flush; } -} // namespace bonk \ No newline at end of file + + +/** + * Write a solidity file containing the vk params to the given stream. + * Uses UltraPlonk + **/ +inline void output_vk_sol_ultra(std::ostream& os, std::shared_ptr const& key, std::string const& class_name) +{ + const auto print_u256 = [&](const std::string& offset, const barretenberg::fr& element, const std::string& name) { + os << " mstore(add(_vk, " << offset << "), " << element << ") // " << name << std::endl; + }; + + const auto print_g1 = [&](const std::string& offsetX, + const std::string& offsetY, + const barretenberg::g1::affine_element& element, + const std::string& name) { + os << " mstore(add(_vk, " << offsetX << "), " << element.x << ") // " << name << ".x" << std::endl; + os << " mstore(add(_vk, " << offsetY << "), " << element.y << ") // " << name << ".y" << std::endl; + }; + + // clang-format off + os << + "// Verification Key Hash: " << key->sha256_hash() << "\n" + "// SPDX-License-Identifier: Apache-2.0\n" + "// Copyright 2022 Aztec\n" + "pragma solidity >=0.8.4;\n" + "\n" + "library " << class_name << " {\n" + " function verificationKeyHash() internal pure returns(bytes32) {\n" + " return 0x" << key->sha256_hash() << ";\n" + " }\n\n" + " function loadVerificationKey(uint256 _vk, uint256 _omegaInverseLoc) internal pure {\n" + " assembly {\n"; + print_u256("0x00", key->domain.size, "vk.circuit_size"); + print_u256("0x20", key->num_public_inputs, "vk.num_inputs"); + print_u256("0x40", key->domain.root, "vk.work_root"); + print_u256("0x60", key->domain.domain_inverse, "vk.domain_inverse"); + print_g1("0x80", "0xa0", key->constraint_selectors.at("Q_1"), "vk.Q1"); + print_g1("0xc0", "0xe0", key->constraint_selectors.at("Q_2"), "vk.Q2"); + print_g1("0x100", "0x120", key->constraint_selectors.at("Q_3"), "vk.Q3"); + print_g1("0x140", "0x160", key->constraint_selectors.at("Q_4"), "vk.Q4"); + print_g1("0x180", "0x1a0", key->constraint_selectors.at("Q_M"), "vk.Q_M"); + print_g1("0x1c0", "0x1e0", key->constraint_selectors.at("Q_C"), "vk.Q_C"); + print_g1("0x200", "0x220", key->constraint_selectors.at("Q_ARITHMETIC"), "vk.Q_ARITHMETIC"); + print_g1("0x240", "0x260", key->constraint_selectors.at("Q_SORT"), "vk.QSORT"); + print_g1("0x280", "0x2a0", key->constraint_selectors.at("Q_ELLIPTIC"), "vk.Q_ELLIPTIC"); + print_g1("0x2c0", "0x2e0", key->constraint_selectors.at("Q_AUX"), "vk.Q_AUX"); + print_g1("0x300", "0x320", key->permutation_selectors.at("SIGMA_1"), "vk.SIGMA1"); + print_g1("0x340", "0x360", key->permutation_selectors.at("SIGMA_2"), "vk.SIGMA2"); + print_g1("0x380", "0x3a0", key->permutation_selectors.at("SIGMA_3"), "vk.SIGMA3"); + print_g1("0x3c0", "0x3e0", key->permutation_selectors.at("SIGMA_4"), "vk.SIGMA4"); + print_g1("0x400", "0x420", key->constraint_selectors.at("TABLE_1"), "vk.TABLE1"); + print_g1("0x440", "0x460", key->constraint_selectors.at("TABLE_2"), "vk.TABLE2"); + print_g1("0x480", "0x4a0", key->constraint_selectors.at("TABLE_3"), "vk.TABLE3"); + print_g1("0x4c0", "0x4e0", key->constraint_selectors.at("TABLE_4"), "vk.TABLE4"); + print_g1("0x500", "0x520", key->constraint_selectors.at("TABLE_TYPE"), "vk.TABLE_TYPE"); + print_g1("0x540", "0x560", key->permutation_selectors.at("ID_1"), "vk.ID1"); + print_g1("0x580", "0x5a0", key->permutation_selectors.at("ID_2"), "vk.ID2"); + print_g1("0x5c0", "0x5e0", key->permutation_selectors.at("ID_3"), "vk.ID3"); + print_g1("0x600", "0x620", key->permutation_selectors.at("ID_4"), "vk.ID4"); + os << + " mstore(add(_vk, 0x640), " << (key->contains_recursive_proof ? "0x01" : "0x00") << ") // vk.contains_recursive_proof\n" + " mstore(add(_vk, 0x660), " << (key->contains_recursive_proof ? key->recursive_proof_public_input_indices[0] : 0) << ") // vk.recursive_proof_public_input_indices\n" + " mstore(add(_vk, 0x680), " << key->reference_string->get_g2x().x.c1 << ") // vk.g2_x.X.c1 \n" + " mstore(add(_vk, 0x6a0), " << key->reference_string->get_g2x().x.c0 << ") // vk.g2_x.X.c0 \n" + " mstore(add(_vk, 0x6c0), " << key->reference_string->get_g2x().y.c1 << ") // vk.g2_x.Y.c1 \n" + " mstore(add(_vk, 0x6e0), " << key->reference_string->get_g2x().y.c0 << ") // vk.g2_x.Y.c0 \n" + " mstore(_omegaInverseLoc, " << key->domain.root_inverse << ") // vk.work_root_inverse\n" + " }\n" + " }\n" + "}\n"; + + os << std::flush; +} + +/** + * @brief Wrapper method to output a solidity verification key. Composer type determined from key + * + * @param os + * @param key + * @param class_name + */ +inline void output_vk_sol(std::ostream& os, std::shared_ptr const& key, std::string const& class_name) +{ + plonk::ComposerType composer_type = static_cast(key->composer_type); + switch (composer_type) { + case plonk::ComposerType::STANDARD: { + return output_vk_sol_standard(os, key, class_name); + break; + } + // case plonk::ComposerType::TURBO: { + // return output_vk_sol_turbo(os, key, class_name); + // break; + // } + case plonk::ComposerType::PLOOKUP: { + return output_vk_sol_ultra(os, key, class_name); + break; + } + default: { + std::cerr << "bonk::output_vk_sol unsupported composer type. Defaulting to standard composer" << std::endl; + return output_vk_sol_standard(os, key, class_name); + } + } +} +} // namespace bonk diff --git a/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.cpp b/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.cpp index 2d24f76bfd..5db83ffca1 100644 --- a/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.cpp +++ b/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.cpp @@ -2,6 +2,7 @@ #include "pedersen_plookup.hpp" #include "barretenberg/crypto/pedersen/pedersen.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" +#include "pedersen_gates.hpp" #include "../../primitives/composers/composers.hpp" #include "../../primitives/packed_byte_array/packed_byte_array.hpp" @@ -195,6 +196,7 @@ point pedersen::hash_single(const field_t& in, * * where x_{α,i} is decided based on the corresponding quad value. */ + pedersen_gates gates(ctx); fr x_alpha = accumulator_offset; std::vector accumulator_witnesses; for (size_t i = 0; i < num_quads; ++i) { @@ -221,7 +223,7 @@ point pedersen::hash_single(const field_t& in, round_quad.q_y_2 = ladder[i + 1].q_y_2; if (i > 0) { - ctx->create_fixed_group_add_gate(round_quad); + gates.create_fixed_group_add_gate(round_quad); } else { if constexpr (C::type == plonk::PLOOKUP && C::merkle_hash_type == plonk::MerkleHashType::FIXED_BASE_PEDERSEN) { @@ -255,7 +257,7 @@ point pedersen::hash_single(const field_t& in, .const_scaling = init_quad.q_x_2 }; ctx->create_big_mul_gate(x_init_quad); } - ctx->create_fixed_group_add_gate_with_init(round_quad, init_quad); + gates.create_fixed_group_add_gate_with_init(round_quad, init_quad); }; accumulator_witnesses.push_back(round_quad.d); @@ -272,7 +274,7 @@ point pedersen::hash_single(const field_t& in, fr::zero(), fr::zero(), fr::zero() }; - ctx->create_fixed_group_add_gate_final(add_quad); + gates.create_fixed_group_add_gate_final(add_quad); accumulator_witnesses.push_back(add_quad.d); point result; @@ -516,10 +518,6 @@ field_t pedersen::compress_unsafe(const field_t& in_left, const size_t hash_index, const bool validate_input_is_in_field) { - if constexpr (C::type == ComposerType::PLOOKUP && C::merkle_hash_type == plonk::MerkleHashType::LOOKUP_PEDERSEN) { - return pedersen_plookup::compress({ in_left, in_right }); - } - std::vector accumulators; generator_index_t index_1 = { hash_index, 0 }; generator_index_t index_2 = { hash_index, 1 }; @@ -530,10 +528,6 @@ field_t pedersen::compress_unsafe(const field_t& in_left, template point pedersen::commit(const std::vector& inputs, const size_t hash_index) { - if constexpr (C::type == ComposerType::PLOOKUP && C::merkle_hash_type == plonk::MerkleHashType::LOOKUP_PEDERSEN) { - return pedersen_plookup::commit(inputs, hash_index); - } - std::vector to_accumulate; for (size_t i = 0; i < inputs.size(); ++i) { generator_index_t index = { hash_index, i }; @@ -544,10 +538,6 @@ template point pedersen::commit(const std::vector& i template field_t pedersen::compress(const std::vector& inputs, const size_t hash_index) { - if constexpr (C::type == ComposerType::PLOOKUP && C::merkle_hash_type == plonk::MerkleHashType::LOOKUP_PEDERSEN) { - return pedersen_plookup::compress(inputs, hash_index); - } - return commit(inputs, hash_index).x; } diff --git a/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_gates.hpp b/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_gates.hpp new file mode 100644 index 0000000000..637f206d03 --- /dev/null +++ b/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_gates.hpp @@ -0,0 +1,316 @@ +#pragma once +#include "barretenberg/proof_system/composer/composer_base.hpp" +#include "../../primitives/composers/composers_fwd.hpp" +#include "../../primitives/field/field.hpp" +#include "../../primitives/point/point.hpp" +#include "../../primitives/byte_array/byte_array.hpp" + +namespace plonk { +namespace stdlib { + +/** + * @brief Creates constraints required for TurboPlonk pedersen hash algorithm + * (see https://hackmd.io/@aztec-network/S1mRod9wF?type=view for details) + * + * StandardPlonk and UltraPlonk do not have support the custom TurboPlonk pedersen hash gate. + * This class reduces the TP gate to a sequence of regular arithmetic gates for compatability purposes. + * + * N.B. wherever possible, UltraPlonk should use pedersen_plookup as it is MUCH more efficient! + * pedersen_plookup produces different hash outputs to the TurboPlonk pedersen hash, use this if interoperability + * between proof systems is required + * @tparam Composer + */ +template class pedersen_gates { + public: + using fixed_group_add_quad = bonk::fixed_group_add_quad; + using fixed_group_init_quad = bonk::fixed_group_init_quad; + using add_quad = bonk::add_quad; + using ComposerType = plonk::ComposerType; + + Composer* context; + fixed_group_add_quad previous_add_quad; + + pedersen_gates(Composer* input_context = nullptr) + : context(input_context) + {} + + void create_fixed_group_add_gate(const fixed_group_add_quad& in) + { + if constexpr (Composer::type == ComposerType::TURBO) { + context->create_fixed_group_add_gate(in); + } else { + + // TODO: not supported by honk composer? + // context->assert_valid_variables({ in.a, in.b, in.c, in.d }); + + auto row_1 = previous_add_quad; + auto row_2 = in; + previous_add_quad = in; + + fr a_1 = context->get_variable(row_1.d); + fr a_2 = context->get_variable(row_2.d); + fr x_1 = context->get_variable(row_1.a); + fr y_1 = context->get_variable(row_1.b); + fr x_2 = context->get_variable(row_2.a); + fr y_2 = context->get_variable(row_2.b); + fr x_alpha = context->get_variable(row_2.c); + + fr q_x_alpha_1 = row_1.q_x_1; + fr q_x_alpha_2 = row_1.q_x_2; + fr q_y_alpha_1 = row_1.q_y_1; + fr q_y_alpha_2 = row_1.q_y_2; + + uint32_t a_1_idx = row_1.d; + uint32_t a_2_idx = row_2.d; + uint32_t x_1_idx = row_1.a; + uint32_t y_1_idx = row_1.b; + uint32_t x_2_idx = row_2.a; + uint32_t y_2_idx = row_2.b; + uint32_t x_alpha_idx = row_2.c; + + // add variable δ = a_2 - 4a_1 + fr delta = a_2 - (a_1 + a_1 + a_1 + a_1); + uint32_t delta_idx = context->add_variable(delta); + context->create_add_gate({ .a = a_2_idx, + .b = a_1_idx, + .c = delta_idx, + .a_scaling = 1, + .b_scaling = -4, + .c_scaling = -1, + .const_scaling = 0 }); + + // constraint: (δ + 3)(δ + 1)(δ - 1)(δ - 3) + // (δ + 3)(δ + 1)(δ - 1)(δ - 3) = (δ^2 - 9)(δ^2 - 1)=0 + // // first: (δ^2 - δ_sqr = 0) + fr delta_sqr = delta * delta; + uint32_t delta_sqr_idx = context->add_variable(delta_sqr); + context->create_mul_gate({ .a = delta_idx, + .b = delta_idx, + .c = delta_sqr_idx, + .mul_scaling = 1, + .c_scaling = -1, + .const_scaling = 0 }); + // // next (δ^2 - 9)( δ^2 - 1) = δ^2*δ^2 - 10 * δ^2 + 9 = 0 + context->create_mul_gate({ .a = delta_sqr_idx, + .b = delta_sqr_idx, + .c = delta_sqr_idx, + .mul_scaling = 1, + .c_scaling = -10, + .const_scaling = 9 }); + + // validate correctness of x_ɑ + // constraint: (δ^2) * q_x_ɑ,1 + q_x_ɑ,2 - x,ɑ = 0 + context->create_add_gate({ .a = delta_sqr_idx, + .b = x_alpha_idx, + .c = context->zero_idx, + .a_scaling = q_x_alpha_1, + .b_scaling = -1, + .c_scaling = 0, + .const_scaling = q_x_alpha_2 }); + + // compute y_alpha using lookup formula, instantiate as witness and validate + fr y_alpha = (x_alpha * q_y_alpha_1 + q_y_alpha_2) * delta; + uint32_t y_alpha_idx = context->add_variable(y_alpha); + context->create_poly_gate({ .a = delta_idx, + .b = x_alpha_idx, + .c = y_alpha_idx, + .q_m = q_y_alpha_1, + .q_l = q_y_alpha_2, + .q_r = 0, + .q_o = -1, + .q_c = 0 }); + + // show that (x_1, y_1) + (x_ɑ, y_ɑ) = (x_2, y_2) in 11 gates + // // 4 gates to compute commonly used expressions + // // // 2 differences: + fr diff_x_alpha_x_1 = x_alpha - x_1; + uint32_t diff_x_alpha_x_1_idx = context->add_variable(diff_x_alpha_x_1); + context->create_add_gate({ .a = diff_x_alpha_x_1_idx, + .b = x_1_idx, + .c = x_alpha_idx, + .a_scaling = 1, + .b_scaling = 1, + .c_scaling = -1, + .const_scaling = 0 }); + + fr diff_y_alpha_y_1 = y_alpha - y_1; + uint32_t diff_y_alpha_y_1_idx = context->add_variable(diff_y_alpha_y_1); + context->create_add_gate({ .a = diff_y_alpha_y_1_idx, + .b = y_1_idx, + .c = y_alpha_idx, + .a_scaling = 1, + .b_scaling = 1, + .c_scaling = -1, + .const_scaling = 0 }); + + // // // now the squares of these 2 differences + fr diff_x_alpha_x_1_sqr = diff_x_alpha_x_1 * diff_x_alpha_x_1; + uint32_t diff_x_alpha_x_1_sqr_idx = context->add_variable(diff_x_alpha_x_1_sqr); + context->create_mul_gate({ .a = diff_x_alpha_x_1_idx, + .b = diff_x_alpha_x_1_idx, + .c = diff_x_alpha_x_1_sqr_idx, + .mul_scaling = 1, + .c_scaling = -1, + .const_scaling = 0 }); + + fr diff_y_alpha_y_1_sqr = diff_y_alpha_y_1 * diff_y_alpha_y_1; + uint32_t diff_y_alpha_y_1_sqr_idx = context->add_variable(diff_y_alpha_y_1_sqr); + context->create_mul_gate({ .a = diff_y_alpha_y_1_idx, + .b = diff_y_alpha_y_1_idx, + .c = diff_y_alpha_y_1_sqr_idx, + .mul_scaling = 1, + .c_scaling = -1, + .const_scaling = 0 }); + + // // 3 gates to build identity for x_2 + // // // compute x_2 + x_ɑ + x_1 using 2 poly_gates via create_big_add_gate + fr sum_x_1_2_alpha = x_2 + x_alpha + x_1; + uint32_t sum_x_1_2_alpha_idx = context->add_variable(sum_x_1_2_alpha); + context->create_big_add_gate({ .a = x_2_idx, + .b = x_alpha_idx, + .c = x_1_idx, + .d = sum_x_1_2_alpha_idx, + .a_scaling = 1, + .b_scaling = 1, + .c_scaling = 1, + .d_scaling = -1, + .const_scaling = 0 }); + + // // // constraint: identity for x_2 + context->create_poly_gate({ .a = sum_x_1_2_alpha_idx, + .b = diff_x_alpha_x_1_sqr_idx, + .c = diff_y_alpha_y_1_sqr_idx, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0 }); + + // // 4 gates to build identity for y_2: + // // // 3 auxiliary + fr sum_y_1_y_2 = y_1 + y_2; + uint32_t sum_y_1_y_2_idx = context->add_variable(sum_y_1_y_2); + context->create_add_gate({ .a = y_1_idx, + .b = y_2_idx, + .c = sum_y_1_y_2_idx, + .a_scaling = 1, + .b_scaling = 1, + .c_scaling = -1, + .const_scaling = 0 }); + + fr diff_x_1_x_2 = x_1 - x_2; + uint32_t diff_x_1_x_2_idx = context->add_variable(diff_x_1_x_2); + context->create_add_gate({ .a = diff_x_1_x_2_idx, + .b = x_2_idx, + .c = x_1_idx, + .a_scaling = 1, + .b_scaling = 1, + .c_scaling = -1, + .const_scaling = 0 }); + + fr prod_y_diff_x_diff = diff_y_alpha_y_1 * diff_x_1_x_2; + uint32_t prod_y_diff_x_diff_idx = context->add_variable(prod_y_diff_x_diff); + context->create_mul_gate({ .a = diff_y_alpha_y_1_idx, + .b = diff_x_1_x_2_idx, + .c = prod_y_diff_x_diff_idx, + .mul_scaling = 1, + .c_scaling = -1, + .const_scaling = 0 }); + + // // // identity for y_2 + context->create_mul_gate({ .a = sum_y_1_y_2_idx, + .b = diff_x_alpha_x_1_idx, + .c = prod_y_diff_x_diff_idx, + .mul_scaling = 1, + .c_scaling = -1, + .const_scaling = 0 }); + } + } + + void create_fixed_group_add_gate_with_init(const fixed_group_add_quad& in, const fixed_group_init_quad& init) + { + if constexpr (Composer::type == ComposerType::TURBO) { + context->create_fixed_group_add_gate_with_init(in, init); + } else { + uint32_t x_0_idx = in.a; + uint32_t y_0_idx = in.b; + uint32_t x_alpha_idx = in.c; + uint32_t a_0_idx = in.d; + + fr x_alpha = context->get_variable(x_alpha_idx); + fr a_0 = context->get_variable(a_0_idx); + + // weird names here follow the Turbo notation + fr q_4 = init.q_x_1; + fr q_5 = init.q_x_2; + fr q_m = init.q_y_1; + fr q_c = init.q_y_2; + + // We will think of s = 1-a_0 as an auxiliary "switch" which is equal to either -x_alpha or 0 + // during the initialization step, but we will not add this variable to the composer for reasons of + // efficiency. + + // (ɑ^4 identity) impose 1-a_0 = 0 or -x_alpha + // // first check formula for sx_alpha + fr sx_alpha = (fr(1) - a_0) * x_alpha; + uint32_t sx_alpha_idx = context->add_variable(sx_alpha); + context->create_poly_gate({ .a = a_0_idx, + .b = x_alpha_idx, + .c = sx_alpha_idx, + .q_m = 1, + .q_l = 0, + .q_r = -1, + .q_o = 1, + .q_c = 0 }); + + // // now add the desired constraint on sx_alpha + // // s(s + x_alpha) = s*s + s*x_alpha = 0 + context->create_poly_gate( + { .a = a_0_idx, .b = a_0_idx, .c = sx_alpha_idx, .q_m = 1, .q_l = -2, .q_r = 0, .q_o = 1, .q_c = 1 }); + + // (ɑ^5 identity) + context->create_poly_gate({ .a = x_0_idx, + .b = x_alpha_idx, + .c = a_0_idx, + .q_m = -1, + .q_l = 0, + .q_r = q_4, + .q_o = -q_5, + .q_c = q_5 }); + + // (ɑ^6 identity) + context->create_poly_gate({ .a = y_0_idx, + .b = x_alpha_idx, + .c = a_0_idx, + .q_m = -1, + .q_l = 0, + .q_r = q_m, + .q_o = -q_c, + .q_c = q_c }); + + // There is no previous add quad. + previous_add_quad = in; + } + } + + void create_fixed_group_add_gate_final(const add_quad& in) + { + if constexpr (Composer::type == ComposerType::TURBO) { + context->create_fixed_group_add_gate_final(in); + } else { + + fixed_group_add_quad final_round_quad{ .a = in.a, + .b = in.b, + .c = in.c, + .d = in.d, + .q_x_1 = fr::zero(), + .q_x_2 = fr::zero(), + .q_y_1 = fr::zero(), + .q_y_2 = fr::zero() }; + create_fixed_group_add_gate(final_round_quad); + } + } +}; + +} // namespace stdlib +} // namespace plonk \ No newline at end of file diff --git a/cpp/src/barretenberg/stdlib/primitives/group/group.hpp b/cpp/src/barretenberg/stdlib/primitives/group/group.hpp index 9697231122..5886cc964f 100644 --- a/cpp/src/barretenberg/stdlib/primitives/group/group.hpp +++ b/cpp/src/barretenberg/stdlib/primitives/group/group.hpp @@ -6,6 +6,7 @@ #include "barretenberg/crypto/pedersen/pedersen.hpp" #include "../../hash/pedersen/pedersen.hpp" +#include "../../hash/pedersen/pedersen_gates.hpp" using namespace bonk; @@ -162,6 +163,7 @@ auto group::fixed_base_scalar_mul_internal(const field_t accumulator_witnesses; + pedersen_gates pedersen_gates(ctx); for (size_t i = 0; i < num_quads; ++i) { fixed_group_add_quad round_quad; round_quad.d = ctx->add_variable(accumulator_transcript[i]); @@ -187,9 +189,9 @@ auto group::fixed_base_scalar_mul_internal(const field_t 0) { - ctx->create_fixed_group_add_gate(round_quad); + pedersen_gates.create_fixed_group_add_gate(round_quad); } else { - ctx->create_fixed_group_add_gate_with_init(round_quad, init_quad); + pedersen_gates.create_fixed_group_add_gate_with_init(round_quad, init_quad); } accumulator_witnesses.push_back(round_quad.d); } diff --git a/cpp/src/barretenberg/stdlib/primitives/memory/dynamic_array.cpp b/cpp/src/barretenberg/stdlib/primitives/memory/dynamic_array.cpp new file mode 100644 index 0000000000..94ab20d074 --- /dev/null +++ b/cpp/src/barretenberg/stdlib/primitives/memory/dynamic_array.cpp @@ -0,0 +1,279 @@ +#include "dynamic_array.hpp" + +#include "../composers/composers.hpp" +#include "../bool/bool.hpp" + +namespace plonk { +namespace stdlib { + +/** + * @brief Construct a new Dynamic Array< Composer>:: Dynamic Array object + * + * @details Dynamic arrays require a maximum size when created, that cannot be exceeded. + * Read and write operations cost 3.25 UltraPlonk gates. + * Each dynamic array requires an additional 3.25 * maximum_size number of gates. + * If the dynamic array also requires a unique range constraint table due to its length (e.g. not a power of + * 2), this will add an additional (maximum_size / 6) gates. + * + * @tparam Composer + * @param composer + * @param maximum_size The maximum size of the array + */ +template +DynamicArray::DynamicArray(Composer* composer, const size_t maximum_size) + : _context(composer) + , _max_size(maximum_size) + , _length(0) +{ + static_assert(Composer::type == plonk::ComposerType::PLOOKUP); + ASSERT(_context != nullptr); + _inner_table = ram_table(_context, maximum_size); + // Initialize the ram table with all zeroes + for (size_t i = 0; i < maximum_size; ++i) { + _inner_table.write(i, 0); + } +} + +/** + * @brief Construct a new Dynamic Array< Composer>:: Dynamic Array object + * + * @tparam Composer + * @param other + */ +template +DynamicArray::DynamicArray(const DynamicArray& other) + : _context(other._context) + , _max_size(other._max_size) + , _length(other._length) + , _inner_table(other._inner_table) +{} + +/** + * @brief Construct a new Dynamic Array< Composer>:: Dynamic Array object + * + * @tparam Composer + * @param other + */ +template +DynamicArray::DynamicArray(DynamicArray&& other) + : _context(other._context) + , _max_size(other._max_size) + , _length(other._length) + , _inner_table(other._inner_table) +{} + +/** + * @brief Assignment Operator + * + * @tparam Composer + * @param other + * @return DynamicArray& + */ +template DynamicArray& DynamicArray::operator=(const DynamicArray& other) +{ + _context = other._context; + _max_size = other._max_size; + _length = other._length; + _inner_table = other._inner_table; + return *this; +} + +/** + * @brief Move Assignment Operator + * + * @tparam Composer + * @param other + * @return DynamicArray& + */ +template DynamicArray& DynamicArray::operator=(DynamicArray&& other) +{ + _context = other._context; + _max_size = other._max_size; + _length = other._length; + _inner_table = other._inner_table; + return *this; +} + +/** + * @brief Resize array. Current method v. inefficient! + * + * @tparam Composer + * @param new_length + */ +template +void DynamicArray::resize(const field_pt& new_length, const field_pt default_value) +{ + // 1: assert new_length < max_size + field_pt max_bounds_check = (field_pt(_max_size) - new_length - 1); + if (max_bounds_check.is_constant()) { + ASSERT(uint256_t(new_length.get_value()) <= _max_size); + } else { + _context->create_new_range_constraint(max_bounds_check.normalize().get_witness_index(), _max_size); + } + + /** + * Iterate over max array size + * if i is currently >= length but will be < new_length, write `default_value` into ram table + */ + for (size_t i = 0; i < _max_size; ++i) { + bool_pt index_valid = bool_pt(witness_pt(_context, (uint256_t)(new_length.get_value()) > i)); + { + // index_delta will be between 0 and length - 1 if index valid + // i.e. will pass check that index_delta < _max_size + field_pt index_delta = (new_length - i - 1); + + // reverse_delta will be between 0 and (_max_size - length) if *invalid* + // i.e. will pass check that reverse_delta < _max_size + field_pt reverse_delta = (-new_length + i); + + field_pt bounds_check = field_pt::conditional_assign(index_valid, index_delta, reverse_delta); + + // this should do the same for only 2 gates, but hard to read + // field_pt t1 = new_length - i; + // field_pt t2 = field_pt(index_valid); + // field_pt bounds_check = (t2 + t2).madd(t1 - 1, -t1); + + _context->create_new_range_constraint(bounds_check.normalize().get_witness_index(), _max_size); + } + + bool_pt index_currently_invalid = bool_pt(witness_pt(_context, i >= native_size())); + { + // index_delta will be between 0 and length - 1 if index valid + // i.e. will pass check that index_delta < _max_size + field_pt index_delta = (_length - i - 1); + + // reverse_delta will be between 0 and (_max_size - length) if *invalid* + // i.e. will pass check that reverse_delta < _max_size + field_pt reverse_delta = (-_length + i); + + field_pt bounds_check = field_pt::conditional_assign(index_currently_invalid, reverse_delta, index_delta); + + _context->create_new_range_constraint(bounds_check.normalize().get_witness_index(), _max_size); + } + + field_pt old_value = _inner_table.read(i); + field_pt new_value = + field_pt::conditional_assign(index_currently_invalid && index_valid, default_value, old_value); + _inner_table.write(i, new_value); + } + + _length = new_length; +} + +/** + * @brief Read a field element from the dynamic array at an index value + * + * @tparam Composer + * @param index + * @return field_t + */ +template field_t DynamicArray::read(const field_pt& index) const +{ + const field_pt index_delta = (_length - index - 1); + + if (index_delta.is_constant()) { + bool valid = (uint256_t(index_delta.get_value()) < _max_size); + if (!valid) { + _context->failure("DynamicArray::read access out of bounds"); + } + } else { + _context->create_new_range_constraint( + index_delta.normalize().get_witness_index(), _max_size, "DynamicArray::read access out of bounds"); + } + + return _inner_table.read(index); +} + +/** + * @brief Write a field element into the dynamic array at an index value + * + * @tparam Composer + * @param index + * @param value + */ +template void DynamicArray::write(const field_pt& index, const field_pt& value) +{ + const field_pt index_delta = (_length - index - 1); + + if (index_delta.is_constant()) { + bool valid = (uint256_t(index_delta.get_value()) < _max_size); + if (!valid) { + _context->failure("DynamicArray::read access out of bounds"); + } + } else { + _context->create_new_range_constraint( + index_delta.normalize().get_witness_index(), _max_size, "DynamicArray::read access out of bounds"); + } + + _inner_table.write(index, value); +} + +/** + * @brief Push a field element onto the dynamic array + * + * @tparam Composer + * @param value + */ +template void DynamicArray::push(const field_pt& value) +{ + if (native_size() >= _max_size) { + _context->failure("DynamicArray::push array is already at its maximum size"); + } + + _inner_table.write(_length, value); + _length += 1; +} + +/** + * @brief Pop a field element off of the dynamic array + * + * @tparam Composer + */ +template void DynamicArray::pop() +{ + if (native_size() == 0) { + _context->failure("DynamicArray::pop array is already empty"); + } + + _length.assert_is_not_zero(); + _length -= 1; +} + +/** + * @brief Conditionally push a field element onto the dynamic array + * + * @tparam Composer + * @param predicate + * @param value + */ +template +void DynamicArray::conditional_push(const bool_pt& predicate, const field_pt& value) +{ + if (native_size() >= _max_size) { + _context->failure("DynamicArray::push array is already at its maximum size"); + } + + _inner_table.write(_length, value); + _length += predicate; +} + +/** + * @brief Conditionallhy pop a field element off of the dynamic array + * + * @tparam Composer + * @param predicate + */ +template void DynamicArray::conditional_pop(const bool_pt& predicate) +{ + if (native_size() == 0) { + _context->failure("DynamicArray::pop array is already empty"); + } + + field_pt length_check = field_pt::conditional_assign(predicate, _length, 1); + length_check.assert_is_not_zero(); + _length -= predicate; +} + +INSTANTIATE_STDLIB_ULTRA_TYPE(DynamicArray); +} // namespace stdlib +} // namespace plonk \ No newline at end of file diff --git a/cpp/src/barretenberg/stdlib/primitives/memory/dynamic_array.hpp b/cpp/src/barretenberg/stdlib/primitives/memory/dynamic_array.hpp new file mode 100644 index 0000000000..f5d81f71ff --- /dev/null +++ b/cpp/src/barretenberg/stdlib/primitives/memory/dynamic_array.hpp @@ -0,0 +1,54 @@ +#pragma once +#include "ram_table.hpp" + +namespace plonk { +namespace stdlib { + +/** + * @brief A dynamic array of field elements + * + * @tparam Composer (must support plookup) + */ +template class DynamicArray { + private: + typedef field_t field_pt; + typedef bool_t bool_pt; + typedef witness_t witness_pt; + + public: + DynamicArray(Composer* composer, const size_t maximum_size); + + DynamicArray(const DynamicArray& other); + DynamicArray(DynamicArray&& other); + + DynamicArray& operator=(const DynamicArray& other); + DynamicArray& operator=(DynamicArray&& other); + + void resize(const field_pt& new_length, const field_pt default_value = 0); + + field_pt read(const field_pt& index) const; + void write(const field_pt& index, const field_pt& value); + + void push(const field_pt& index); + void pop(); + + void conditional_push(const bool_pt& predicate, const field_pt& index); + void conditional_pop(const bool_pt& predicate); + + field_pt size() const { return _length; } + size_t native_size() const { return static_cast(static_cast(_length.get_value())); } + size_t max_size() const { return _max_size; } + + Composer* get_context() const { return _context; } + + private: + Composer* _context = nullptr; + size_t _max_size; + field_pt _length = 0; + mutable ram_table _inner_table; +}; + +EXTERN_STDLIB_ULTRA_TYPE(DynamicArray); + +} // namespace stdlib +} // namespace plonk \ No newline at end of file diff --git a/cpp/src/barretenberg/stdlib/primitives/memory/dynamic_array.test.cpp b/cpp/src/barretenberg/stdlib/primitives/memory/dynamic_array.test.cpp new file mode 100644 index 0000000000..9db1d609dc --- /dev/null +++ b/cpp/src/barretenberg/stdlib/primitives/memory/dynamic_array.test.cpp @@ -0,0 +1,72 @@ +#include "dynamic_array.hpp" + +#include + +#include "barretenberg/numeric/random/engine.hpp" + +#include "barretenberg/plonk/composer/ultra_composer.hpp" + +#include "../bool/bool.hpp" + +namespace test_stdlib_dynamic_array { +using namespace barretenberg; +using namespace plonk; + +namespace { +auto& engine = numeric::random::get_debug_engine(); +} + +// Defining ultra-specific types for local testing. +using Composer = plonk::UltraComposer; +using bool_ct = stdlib::bool_t; +using field_ct = stdlib::field_t; +using witness_ct = stdlib::witness_t; +using DynamicArray_ct = stdlib::DynamicArray; + +TEST(DynamicArray, DynamicArrayReadWriteConsistency) +{ + + Composer composer; + const size_t max_size = 10; + + DynamicArray_ct array(&composer, max_size); + + for (size_t i = 0; i < max_size; ++i) { + array.push(field_ct::from_witness(&composer, i)); + EXPECT_EQ(array.read(i).get_value(), i); + } + + EXPECT_EQ(array.native_size(), max_size); + for (size_t i = 0; i < max_size; ++i) { + array.pop(); + } + EXPECT_EQ(array.native_size(), 0); + + array.resize(max_size - 1, 7); + + EXPECT_EQ(array.native_size(), max_size - 1); + for (size_t i = 0; i < max_size - 1; ++i) { + EXPECT_EQ(array.read(i).get_value(), 7); + } + + array.conditional_push(false, 100); + EXPECT_EQ(array.native_size(), max_size - 1); + + array.conditional_push(true, 100); + EXPECT_EQ(array.native_size(), max_size); + EXPECT_EQ(array.read(max_size - 1).get_value(), 100); + + array.conditional_pop(false); + EXPECT_EQ(array.native_size(), max_size); + + array.conditional_pop(true); + EXPECT_EQ(array.native_size(), max_size - 1); + + auto prover = composer.create_prover(); + auto verifier = composer.create_verifier(); + auto proof = prover.construct_proof(); + bool verified = verifier.verify_proof(proof); + EXPECT_EQ(verified, true); +} + +} // namespace test_stdlib_dynamic_array \ No newline at end of file diff --git a/cpp/src/barretenberg/stdlib/primitives/memory/ram_table.cpp b/cpp/src/barretenberg/stdlib/primitives/memory/ram_table.cpp new file mode 100644 index 0000000000..d7a3e1ac16 --- /dev/null +++ b/cpp/src/barretenberg/stdlib/primitives/memory/ram_table.cpp @@ -0,0 +1,257 @@ +#include "ram_table.hpp" + +#include "../composers/composers.hpp" + +namespace plonk { +namespace stdlib { + +/** + * @brief Construct a new ram table::ram table object. It's dynamic memory! + * + * @tparam Composer + * @param table_entries vector of field elements that will initialize the RAM table + */ +template ram_table::ram_table(Composer* composer, const size_t table_size) +{ + static_assert(Composer::type == plonk::ComposerType::PLOOKUP); + _context = composer; + _length = table_size; + _index_initialized.resize(table_size); + for (size_t i = 0; i < _index_initialized.size(); ++i) { + _index_initialized[i] = false; + } + + // do not initialize the table yet. The input entries might all be constant, + // if this is the case we might not have a valid pointer to a Composer + // We get around this, by initializing the table when `read` or `write` operator is called + // with a non-const field element. +} + +/** + * @brief Construct a new ram table::ram table object. It's dynamic memory! + * + * @tparam Composer + * @param table_entries vector of field elements that will initialize the RAM table + */ +template ram_table::ram_table(const std::vector& table_entries) +{ + static_assert(Composer::type == plonk::ComposerType::PLOOKUP); + // get the composer _context + for (const auto& entry : table_entries) { + if (entry.get_context() != nullptr) { + _context = entry.get_context(); + break; + } + } + _raw_entries = table_entries; + _length = _raw_entries.size(); + _index_initialized.resize(_length); + for (size_t i = 0; i < _index_initialized.size(); ++i) { + _index_initialized[i] = false; + } + // do not initialize the table yet. The input entries might all be constant, + // if this is the case we might not have a valid pointer to a Composer + // We get around this, by initializing the table when `read` or `write` operator is called + // with a non-const field element. +} + +/** + * @brief internal method, is used to call Composer methods that will generate RAM table. + * + * @details initialize the table once we perform a read. This ensures we always have a pointer to a Composer. + * (if both the table entries and the index are constant, we don't need a composer as we + * can directly extract the desired value fram `_raw_entries`) + * + * @tparam Composer + */ +template void ram_table::initialize_table() const +{ + if (_ram_table_generated_in_composer) { + return; + } + ASSERT(_context != nullptr); + + _ram_id = _context->create_RAM_array(_length); + + if (_raw_entries.size() > 0) { + for (size_t i = 0; i < _length; ++i) { + if (!_index_initialized[i]) { + field_pt entry; + if (_raw_entries[i].is_constant()) { + entry = field_pt::from_witness_index(_context, + _context->put_constant_variable(_raw_entries[i].get_value())); + } else { + entry = _raw_entries[i].normalize(); + } + _context->init_RAM_element(_ram_id, i, entry.get_witness_index()); + _index_initialized[i] = true; + } + } + } + + _ram_table_generated_in_composer = true; +} + +/** + * @brief Construct a new ram table::ram table object + * + * @tparam Composer + * @param other + */ +template +ram_table::ram_table(const ram_table& other) + : _raw_entries(other._raw_entries) + , _index_initialized(other._index_initialized) + , _length(other._length) + , _ram_id(other._ram_id) + , _ram_table_generated_in_composer(other._ram_table_generated_in_composer) + , _all_entries_written_to_with_constant_index(other._all_entries_written_to_with_constant_index) + , _context(other._context) +{} + +/** + * @brief Construct a new ram table::ram table object + * + * @tparam Composer + * @param other + */ +template +ram_table::ram_table(ram_table&& other) + : _raw_entries(other._raw_entries) + , _index_initialized(other._index_initialized) + , _length(other._length) + , _ram_id(other._ram_id) + , _ram_table_generated_in_composer(other._ram_table_generated_in_composer) + , _all_entries_written_to_with_constant_index(other._all_entries_written_to_with_constant_index) + , _context(other._context) +{} + +/** + * @brief Copy assignment operator + * + * @tparam Composer + * @param other + * @return ram_table& + */ +template ram_table& ram_table::operator=(const ram_table& other) +{ + _raw_entries = other._raw_entries; + _length = other._length; + _ram_id = other._ram_id; + _index_initialized = other._index_initialized; + _ram_table_generated_in_composer = other._ram_table_generated_in_composer; + _all_entries_written_to_with_constant_index = other._all_entries_written_to_with_constant_index; + + _context = other._context; + return *this; +} + +/** + * @brief Move assignment operator + * + * @tparam Composer + * @param other + * @return ram_table& + */ +template ram_table& ram_table::operator=(ram_table&& other) +{ + _raw_entries = other._raw_entries; + _length = other._length; + _ram_id = other._ram_id; + _index_initialized = other._index_initialized; + _ram_table_generated_in_composer = other._ram_table_generated_in_composer; + _all_entries_written_to_with_constant_index = other._all_entries_written_to_with_constant_index; + _context = other._context; + return *this; +} + +/** + * @brief Read a field element from the RAM table at an index value + * + * @tparam Composer + * @param index + * @return field_t + */ +template field_t ram_table::read(const field_pt& index) const +{ + if (_context == nullptr) { + _context = index.get_context(); + } + + if (uint256_t(index.get_value()) >= _length) { + // TODO: what's best practise here? We are assuming that this action will generate failing constraints, + // and we set failure message here so that it better describes the point of failure. + // However, we are not *ensuring* that failing constraints are generated at the point that `failure()` is + // called. Is this ok? + _context->failure("ram_table: RAM array access out of bounds"); + } + + initialize_table(); + + if (!check_indices_initialized()) { + _context->failure("ram_table: must write to every RAM entry at least once (with constant index value) before " + "table can be read"); + } + + field_pt index_wire = index; + if (index.is_constant()) { + index_wire = field_pt::from_witness_index(_context, _context->put_constant_variable(index.get_value())); + } + + uint32_t output_idx = _context->read_RAM_array(_ram_id, index_wire.normalize().get_witness_index()); + return field_pt::from_witness_index(_context, output_idx); +} + +/** + * @brief Write a field element from the RAM table at an index value + * + * @tparam Composer + * @param index + * @param value + */ +template void ram_table::write(const field_pt& index, const field_pt& value) +{ + if (_context == nullptr) { + _context = index.get_context(); + } + + if (uint256_t(index.get_value()) >= _length) { + // TODO: what's best practise here? We are assuming that this action will generate failing constraints, + // and we set failure message here so that it better describes the point of failure. + // However, we are not *ensuring* that failing constraints are generated at the point that `failure()` is + // called. Is this ok? + _context->failure("ram_table: RAM array access out of bounds"); + } + + initialize_table(); + field_pt index_wire = index; + auto native_index = index.get_value(); + if (index.is_constant()) { + // need to write every array element at a constant index before doing reads/writes at prover-defined indices + index_wire = field_pt::from_witness_index(_context, _context->put_constant_variable(native_index)); + } else { + if (!check_indices_initialized()) { + _context->failure("ram_table: must write to every RAM entry at least once (with constant index value) " + "before table can be written to at an unknown index"); + } + } + + field_pt value_wire = value; + auto native_value = value.get_value(); + if (value.is_constant()) { + value_wire = field_pt::from_witness_index(_context, _context->put_constant_variable(native_value)); + } + + const size_t cast_index = static_cast(static_cast(native_index)); + if (index.is_constant() && _index_initialized[cast_index] == false) { + _context->init_RAM_element(_ram_id, cast_index, value_wire.get_witness_index()); + + _index_initialized[cast_index] = true; + } else { + _context->write_RAM_array(_ram_id, index_wire.normalize().get_witness_index(), value_wire.get_witness_index()); + } +} + +INSTANTIATE_STDLIB_ULTRA_TYPE(ram_table); +} // namespace stdlib +} // namespace plonk \ No newline at end of file diff --git a/cpp/src/barretenberg/stdlib/primitives/memory/ram_table.hpp b/cpp/src/barretenberg/stdlib/primitives/memory/ram_table.hpp new file mode 100644 index 0000000000..dd94b48dde --- /dev/null +++ b/cpp/src/barretenberg/stdlib/primitives/memory/ram_table.hpp @@ -0,0 +1,63 @@ +#pragma once +#include "../composers/composers_fwd.hpp" +#include "../field/field.hpp" + +namespace plonk { +namespace stdlib { + +// A runtime-defined read-only memory table. Table entries must be initialized in the constructor. +// N.B. Only works with the UltraComposer at the moment! +template class ram_table { + private: + typedef field_t field_pt; + + public: + ram_table() {} + ram_table(Composer* composer, const size_t table_size); + ram_table(const std::vector& table_entries); + ram_table(const ram_table& other); + ram_table(ram_table&& other); + + void initialize_table() const; + + ram_table& operator=(const ram_table& other); + ram_table& operator=(ram_table&& other); + + field_pt read(const field_pt& index) const; + + void write(const field_pt& index, const field_pt& value); + + size_t size() const { return _length; } + + Composer* get_context() const { return _context; } + + bool check_indices_initialized() const + { + if (_all_entries_written_to_with_constant_index) { + return true; + } + if (_length == 0) { + return false; + } + bool init = true; + for (auto i : _index_initialized) { + init = init && i; + } + _all_entries_written_to_with_constant_index = init; + return _all_entries_written_to_with_constant_index; + } + + private: + std::vector _raw_entries; + mutable std::vector _index_initialized; + size_t _length = 0; + mutable size_t _ram_id = 0; // Composer identifier for this ROM table + mutable bool _ram_table_generated_in_composer = false; + mutable bool _all_entries_written_to_with_constant_index = false; + mutable Composer* _context = nullptr; +}; + +EXTERN_STDLIB_ULTRA_TYPE(ram_table); + +} // namespace stdlib +} // namespace plonk \ No newline at end of file diff --git a/cpp/src/barretenberg/stdlib/primitives/memory/ram_table.test.cpp b/cpp/src/barretenberg/stdlib/primitives/memory/ram_table.test.cpp new file mode 100644 index 0000000000..9731be6498 --- /dev/null +++ b/cpp/src/barretenberg/stdlib/primitives/memory/ram_table.test.cpp @@ -0,0 +1,111 @@ +#include "ram_table.hpp" + +#include + +#include "barretenberg/numeric/random/engine.hpp" + +#include "barretenberg/plonk/composer/ultra_composer.hpp" + +namespace test_stdlib_ram_table { +using namespace plonk; + +// Defining ultra-specific types for local testing. +using Composer = plonk::UltraComposer; +using field_ct = stdlib::field_t; +using witness_ct = stdlib::witness_t; +using ram_table_ct = stdlib::ram_table; + +namespace { +auto& engine = numeric::random::get_debug_engine(); +} + +TEST(ram_table, ram_table_init_read_consistency) +{ + Composer composer; + + std::vector table_values; + const size_t table_size = 10; + for (size_t i = 0; i < table_size; ++i) { + table_values.emplace_back(witness_ct(&composer, fr::random_element())); + } + + ram_table_ct table(table_values); + + field_ct result(0); + fr expected(0); + + for (size_t i = 0; i < 10; ++i) { + field_ct index(witness_ct(&composer, (uint64_t)i)); + + if (i % 2 == 0) { + const auto to_add = table.read(index); + result += to_add; // variable lookup + } else { + const auto to_add = table.read(i); // constant lookup + result += to_add; + } + expected += table_values[i].get_value(); + } + + EXPECT_EQ(result.get_value(), expected); + + auto prover = composer.create_prover(); + auto verifier = composer.create_verifier(); + auto proof = prover.construct_proof(); + bool verified = verifier.verify_proof(proof); + EXPECT_EQ(verified, true); +} + +TEST(ram_table, ram_table_read_write_consistency) +{ + Composer composer; + const size_t table_size = 10; + + std::vector table_values(table_size); + + ram_table_ct table(&composer, table_size); + + for (size_t i = 0; i < table_size; ++i) { + table.write(i, 0); + } + field_ct result(0); + fr expected(0); + + const auto update = [&]() { + for (size_t i = 0; i < table_size / 2; ++i) { + table_values[2 * i] = fr::random_element(); + table_values[2 * i + 1] = fr::random_element(); + + // init with both constant and variable values + table.write(2 * i, table_values[2 * i]); + table.write(2 * i + 1, witness_ct(&composer, table_values[2 * i + 1])); + } + }; + + const auto read = [&]() { + for (size_t i = 0; i < table_size / 2; ++i) { + const size_t index = table_size - 2 - (i * 2); // access in something other than basic incremental order + + result += table.read(witness_ct(&composer, index)); + result += table.read(index + 1); + + expected += table_values[index]; + expected += table_values[index + 1]; + } + }; + + update(); + read(); + update(); + read(); + update(); + + EXPECT_EQ(result.get_value(), expected); + + auto prover = composer.create_prover(); + auto verifier = composer.create_verifier(); + auto proof = prover.construct_proof(); + bool verified = verifier.verify_proof(proof); + EXPECT_EQ(verified, true); +} +} // namespace test_stdlib_ram_table \ No newline at end of file diff --git a/cpp/src/barretenberg/stdlib/recursion/verifier/program_settings.hpp b/cpp/src/barretenberg/stdlib/recursion/verifier/program_settings.hpp index c7f50f9ce7..4fc602071a 100644 --- a/cpp/src/barretenberg/stdlib/recursion/verifier/program_settings.hpp +++ b/cpp/src/barretenberg/stdlib/recursion/verifier/program_settings.hpp @@ -19,7 +19,6 @@ template class recursive_ultra_verifier_settings : public plonk typedef plonk::ultra_settings base_settings; - typedef plonk::VerifierUltraFixedBaseWidget UltraFixedBaseWidget; typedef plonk::VerifierPlookupArithmeticWidget PlookupArithmeticWidget; typedef plonk::VerifierTurboLogicWidget TurboLogicWidget; typedef plonk::VerifierGenPermSortWidget GenPermSortWidget; @@ -45,9 +44,6 @@ template class recursive_ultra_verifier_settings : public plonk updated_alpha = PlookupArithmeticWidget::append_scalar_multiplication_inputs(key, updated_alpha, transcript, scalars); - updated_alpha = - UltraFixedBaseWidget::append_scalar_multiplication_inputs(key, updated_alpha, transcript, scalars); - updated_alpha = GenPermSortWidget::append_scalar_multiplication_inputs(key, updated_alpha, transcript, scalars); updated_alpha = EllipticWidget::append_scalar_multiplication_inputs(key, updated_alpha, transcript, scalars); @@ -72,9 +68,6 @@ template class recursive_ultra_verifier_settings : public plonk updated_alpha_base = PlookupArithmeticWidget::compute_quotient_evaluation_contribution( key, updated_alpha_base, transcript, quotient_numerator_eval); - updated_alpha_base = UltraFixedBaseWidget::compute_quotient_evaluation_contribution( - key, updated_alpha_base, transcript, quotient_numerator_eval); - updated_alpha_base = GenPermSortWidget::compute_quotient_evaluation_contribution( key, updated_alpha_base, transcript, quotient_numerator_eval); @@ -102,7 +95,6 @@ class recursive_ultra_to_standard_verifier_settings : public recursive_ultra_ver typedef plonk::ultra_to_standard_settings base_settings; - typedef plonk::VerifierUltraFixedBaseWidget UltraFixedBaseWidget; typedef plonk::VerifierPlookupArithmeticWidget PlookupArithmeticWidget; typedef plonk::VerifierTurboLogicWidget TurboLogicWidget; typedef plonk::VerifierGenPermSortWidget GenPermSortWidget; diff --git a/cpp/src/barretenberg/stdlib/types/types.hpp b/cpp/src/barretenberg/stdlib/types/types.hpp index 5a1f40ef7f..4fdc71e1c0 100644 --- a/cpp/src/barretenberg/stdlib/types/types.hpp +++ b/cpp/src/barretenberg/stdlib/types/types.hpp @@ -14,12 +14,16 @@ #include "barretenberg/stdlib/primitives/bigfield/bigfield.hpp" #include "barretenberg/stdlib/primitives/biggroup/biggroup.hpp" #include "barretenberg/stdlib/hash/pedersen/pedersen.hpp" +#include "barretenberg/stdlib/hash/pedersen/pedersen_plookup.hpp" #include "barretenberg/stdlib/merkle_tree/hash_path.hpp" #include "barretenberg/stdlib/encryption/schnorr/schnorr.hpp" #include "barretenberg/stdlib/primitives/curves/bn254.hpp" #include "barretenberg/stdlib/primitives/curves/secp256k1.hpp" #include "barretenberg/stdlib/primitives/memory/rom_table.hpp" #include "barretenberg/stdlib/recursion/verifier/program_settings.hpp" +#include "barretenberg/stdlib/primitives/memory/ram_table.hpp" +#include "barretenberg/stdlib/primitives/memory/rom_table.hpp" +#include "barretenberg/stdlib/primitives/memory/dynamic_array.hpp" namespace plonk::stdlib::types {