From 2acca41581bca2dfa5186a516dfb5d2646abef89 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Tue, 15 Aug 2023 18:21:09 +0000 Subject: [PATCH 01/50] added cycle_group class --- .../arithmetization/gate_data.hpp | 7 + .../circuit_builder/circuit_builder_base.hpp | 7 + .../standard_circuit_builder.cpp | 231 ++++++++++++ .../standard_circuit_builder.hpp | 2 + .../circuit_builder/turbo_circuit_builder.cpp | 230 ++++++++++++ .../circuit_builder/turbo_circuit_builder.hpp | 13 +- .../circuit_builder/ultra_circuit_builder.cpp | 99 ++++++ .../circuit_builder/ultra_circuit_builder.hpp | 3 +- .../stdlib/primitives/group/cycle_group.cpp | 291 +++++++++++++++ .../stdlib/primitives/group/cycle_group.hpp | 130 +++++++ .../primitives/group/cycle_group.test.cpp | 335 ++++++++++++++++++ 11 files changed, 1342 insertions(+), 6 deletions(-) create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/gate_data.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/gate_data.hpp index dbdd25061d5..6b10d1adfe9 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/gate_data.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/gate_data.hpp @@ -127,4 +127,11 @@ template struct ecc_add_gate_ { FF endomorphism_coefficient; FF sign_coefficient; }; + +template struct ecc_dbl_gate_ { + uint32_t x1; + uint32_t y1; + uint32_t x3; + uint32_t y3; +}; } // namespace proof_system diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp index 4e5e29510d6..a9d3f83d614 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp @@ -1,6 +1,7 @@ #pragma once #include "barretenberg/common/slab_allocator.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include "barretenberg/proof_system/arithmetization/arithmetization.hpp" #include "barretenberg/proof_system/arithmetization/gate_data.hpp" #include @@ -11,6 +12,9 @@ static constexpr uint32_t DUMMY_TAG = 0; template class CircuitBuilderBase { public: using FF = typename Arithmetization::FF; + using EmbeddedCurve = + std::conditional_t, barretenberg::g1, grumpkin::g1>; + static constexpr size_t NUM_WIRES = Arithmetization::NUM_WIRES; // Keeping NUM_WIRES, at least temporarily, for backward compatibility static constexpr size_t program_width = Arithmetization::NUM_WIRES; @@ -86,6 +90,9 @@ template class CircuitBuilderBase { virtual void create_mul_gate(const mul_triple_& in) = 0; virtual void create_bool_gate(const uint32_t a) = 0; virtual void create_poly_gate(const poly_triple_& in) = 0; + virtual void create_ecc_add_gate(const ecc_add_gate_& in) = 0; + virtual void create_ecc_dbl_gate(const ecc_dbl_gate_& in) = 0; + virtual size_t get_num_constant_gates() const = 0; /** diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp index 100e4045706..c7752996e99 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp @@ -240,6 +240,237 @@ template void StandardCircuitBuilder_::create_poly_gate(const ++this->num_gates; } +/** + * @brief Create a gate where we validate an elliptic curve point addition + * (x1, y1) + (x2, y2) = (x3, y3) + * N.B. uses incomplete addition formula. Use with caution + * @tparam FF + * @param in + */ +template void StandardCircuitBuilder_::create_ecc_add_gate(const ecc_add_gate_& in) +{ + const auto sign_coefficient = in.sign_coefficient; + const auto x1 = this->get_variable(in.x1); + const auto x2 = this->get_variable(in.x2); + const auto x3 = this->get_variable(in.x3); + const auto y1 = this->get_variable(in.y1); + const auto y2 = sign_coefficient * this->get_variable(in.y2); + + bool collision = x2 == x1; + if (collision) { + this->failure("create_ecc_add_gate incomplete formula collision"); + } + const auto lambda_v = collision ? 0 : (y2 - y1) / (x2 - x1); + const auto lambda = this->add_variable(lambda_v); + + // (x2 - x1) * lambda - y2 + y1 = 0 + const auto x2_minus_x1_v = x2 - x1; + const auto x2_minus_x1 = this->add_variable(x2_minus_x1_v); + create_poly_gate({ + .a = in.x2, + .b = in.x1, + .c = x2_minus_x1, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); + const auto t2_v = lambda_v * x2_minus_x1_v; + const auto t2 = this->add_variable(t2_v); + create_poly_gate({ + .a = lambda, + .b = x2_minus_x1, + .c = t2, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + create_poly_gate({ + .a = t2, + .b = in.y2, + .c = in.y1, + .q_m = 0, + .q_l = 1, + .q_r = -sign_coefficient, + .q_o = 1, + .q_c = 0, + }); + + // lambda * lambda - x2 - x1 = x3 + const auto x2_plus_x1_v = x2 + x1; + const auto x2_plus_x1 = this->add_variable(x2_plus_x1_v); + create_poly_gate({ + .a = in.x2, + .b = in.x1, + .c = x2_plus_x1, + .q_m = 0, + .q_l = 1, + .q_r = 1, + .q_o = -1, + .q_c = 0, + }); + const auto lambda_sqr_v = lambda_v * lambda_v; + const auto lambda_sqr = this->add_variable(lambda_sqr_v); + create_poly_gate({ + .a = lambda, + .b = lambda, + .c = lambda_sqr, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + create_poly_gate({ + .a = lambda_sqr, + .b = x2_plus_x1, + .c = in.x3, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); + + // lambda * (x1 - x3) - y1 - y3 = 0 + const auto x1_sub_x3_v = x1 - x3; + const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); + create_poly_gate({ + .a = in.x1, + .b = in.x3, + .c = x1_sub_x3, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); + const auto lambda_mul_x1_sub_x3_v = lambda_v * x1_sub_x3_v; + const auto lambda_mul_x1_sub_x3 = this->add_variable(lambda_mul_x1_sub_x3_v); + create_poly_gate({ + .a = lambda, + .b = x1_sub_x3, + .c = lambda_mul_x1_sub_x3, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + create_poly_gate({ + .a = lambda_mul_x1_sub_x3, + .b = in.y1, + .c = in.y3, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); +} + +/** + * @brief Create a gate where we validate an elliptic curve point doubling + * (x1, y1) * 2 = (x3, y3) + * @tparam FF + * @param in + */ +template void StandardCircuitBuilder_::create_ecc_dbl_gate(const ecc_dbl_gate_& in) +{ + const auto x1 = this->get_variable(in.x1); + const auto x3 = this->get_variable(in.x3); + const auto y1 = this->get_variable(in.y1); + + // lambda = 3x^2 / 2y + const auto three_x1_sqr_v = x1 * x1 * 3; + const auto three_x1_sqr = this->add_variable(three_x1_sqr_v); + create_poly_gate({ + .a = in.x1, + .b = in.x1, + .c = three_x1_sqr, + .q_m = 3, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + const auto lambda_v = three_x1_sqr_v / (y1 + y1); + const auto lambda = this->add_variable(lambda_v); + create_poly_gate({ + .a = lambda, + .b = in.y1, + .c = three_x1_sqr, + .q_m = 2, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + + // lambda * lambda - x2 - x1 = 0 + const auto lambda_sqr_v = lambda_v * lambda_v; + const auto lambda_sqr = this->add_variable(lambda_sqr_v); + create_poly_gate({ + .a = lambda, + .b = lambda, + .c = lambda_sqr, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + create_poly_gate({ + .a = lambda_sqr, + .b = in.x1, + .c = this->zero_idx, + .q_m = 0, + .q_l = 1, + .q_r = -2, + .q_o = 0, + .q_c = 0, + }); + + // lambda * (x1 - x3) - y1 = 0 + const auto x1_sub_x3_v = x1 - x3; + const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); + create_poly_gate({ + .a = in.x1, + .b = in.x3, + .c = x1_sub_x3, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); + const auto lambda_mul_x1_sub_x3_v = lambda_v * x1_sub_x3_v; + const auto lambda_mul_x1_sub_x3 = this->add_variable(lambda_mul_x1_sub_x3_v); + create_poly_gate({ + .a = lambda, + .b = x1_sub_x3, + .c = lambda_mul_x1_sub_x3, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + create_poly_gate({ + .a = lambda_mul_x1_sub_x3, + .b = in.y1, + .c = in.y3, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); +} + template std::vector StandardCircuitBuilder_::decompose_into_base4_accumulators(const uint32_t witness_index, const size_t num_bits, diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.hpp index 2c023001609..dbd2838092e 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.hpp @@ -83,6 +83,8 @@ template class StandardCircuitBuilder_ : public CircuitBuilderBase void create_fixed_group_add_gate_with_init(const fixed_group_add_quad_& in, const fixed_group_init_quad_& init); void create_fixed_group_add_gate_final(const add_quad_& in); + void create_ecc_add_gate(const ecc_add_gate_& in) override; + void create_ecc_dbl_gate(const ecc_dbl_gate_& in) override; fixed_group_add_quad_ previous_add_quad; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.cpp index ba7a5d213ce..50f1c622794 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.cpp @@ -372,6 +372,236 @@ template void TurboCircuitBuilder_::fix_witness(const uint32_t ++this->num_gates; } +/** + * @brief Create a gate where we validate an elliptic curve point addition + * (x1, y1) + (x2, y2) = (x3, y3) + * N.B. uses incomplete addition formula. Use with caution + * @tparam FF + * @param in + */ +template void TurboCircuitBuilder_::create_ecc_add_gate(const ecc_add_gate_& in) +{ + const auto sign_coefficient = in.sign_coefficient; + const auto x1 = this->get_variable(in.x1); + const auto x2 = this->get_variable(in.x2); + const auto x3 = this->get_variable(in.x3); + const auto y1 = this->get_variable(in.y1); + const auto y2 = sign_coefficient * this->get_variable(in.y2); + + bool collision = x2 == x1; + if (collision) { + this->failure("create_ecc_add_gate incomplete formula collision"); + } + const auto lambda_v = collision ? 0 : (y2 - y1) / (x2 - x1); + const auto lambda = this->add_variable(lambda_v); + + // (x2 - x1) * lambda - y2 + y1 = 0 + const auto x2_minus_x1_v = x2 - x1; + const auto x2_minus_x1 = this->add_variable(x2_minus_x1_v); + create_poly_gate({ + .a = in.x2, + .b = in.x1, + .c = x2_minus_x1, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); + const auto t2_v = lambda_v * x2_minus_x1_v; + const auto t2 = this->add_variable(t2_v); + create_poly_gate({ + .a = lambda, + .b = x2_minus_x1, + .c = t2, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + create_poly_gate({ + .a = t2, + .b = in.y2, + .c = in.y1, + .q_m = 0, + .q_l = 1, + .q_r = -sign_coefficient, + .q_o = 1, + .q_c = 0, + }); + + // lambda * lambda - x2 - x1 = x3 + const auto x2_plus_x1_v = x2 + x1; + const auto x2_plus_x1 = this->add_variable(x2_plus_x1_v); + create_poly_gate({ + .a = in.x2, + .b = in.x1, + .c = x2_plus_x1, + .q_m = 0, + .q_l = 1, + .q_r = 1, + .q_o = -1, + .q_c = 0, + }); + const auto lambda_sqr_v = lambda_v * lambda_v; + const auto lambda_sqr = this->add_variable(lambda_sqr_v); + create_poly_gate({ + .a = lambda, + .b = lambda, + .c = lambda_sqr, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + create_poly_gate({ + .a = lambda_sqr, + .b = x2_plus_x1, + .c = in.x3, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); + + // lambda * (x1 - x3) - y1 - y3 = 0 + const auto x1_sub_x3_v = x1 - x3; + const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); + create_poly_gate({ + .a = in.x1, + .b = in.x3, + .c = x1_sub_x3, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); + const auto lambda_mul_x1_sub_x3_v = lambda_v * x1_sub_x3_v; + const auto lambda_mul_x1_sub_x3 = this->add_variable(lambda_mul_x1_sub_x3_v); + create_poly_gate({ + .a = lambda, + .b = x1_sub_x3, + .c = lambda_mul_x1_sub_x3, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + create_poly_gate({ + .a = lambda_mul_x1_sub_x3, + .b = in.y1, + .c = in.y3, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); +} + +/** + * @brief Create a gate where we validate an elliptic curve point doubling + * (x1, y1) * 2 = (x3, y3) + * @tparam FF + * @param in + */ +template void TurboCircuitBuilder_::create_ecc_dbl_gate(const ecc_dbl_gate_& in) +{ + const auto x1 = this->get_variable(in.x1); + const auto x3 = this->get_variable(in.x3); + const auto y1 = this->get_variable(in.y1); + + // lambda = 3x^2 / 2y + const auto three_x1_sqr_v = x1 * x1 * 3; + const auto three_x1_sqr = this->add_variable(three_x1_sqr_v); + create_poly_gate({ + .a = in.x1, + .b = in.x1, + .c = three_x1_sqr, + .q_m = 3, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + const auto lambda_v = three_x1_sqr_v / (y1 + y1); + const auto lambda = this->add_variable(lambda_v); + create_poly_gate({ + .a = lambda, + .b = in.y1, + .c = three_x1_sqr, + .q_m = 2, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + + // lambda * lambda - x2 - x1 = 0 + const auto lambda_sqr_v = lambda_v * lambda_v; + const auto lambda_sqr = this->add_variable(lambda_sqr_v); + create_poly_gate({ + .a = lambda, + .b = lambda, + .c = lambda_sqr, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + create_poly_gate({ + .a = lambda_sqr, + .b = in.x1, + .c = this->zero_idx, + .q_m = 0, + .q_l = 1, + .q_r = -2, + .q_o = 0, + .q_c = 0, + }); + + // lambda * (x1 - x3) - y1 = 0 + const auto x1_sub_x3_v = x1 - x3; + const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); + create_poly_gate({ + .a = in.x1, + .b = in.x3, + .c = x1_sub_x3, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); + const auto lambda_mul_x1_sub_x3_v = lambda_v * x1_sub_x3_v; + const auto lambda_mul_x1_sub_x3 = this->add_variable(lambda_mul_x1_sub_x3_v); + create_poly_gate({ + .a = lambda, + .b = x1_sub_x3, + .c = lambda_mul_x1_sub_x3, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + create_poly_gate({ + .a = lambda_mul_x1_sub_x3, + .b = in.y1, + .c = in.y3, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); +} /** * Create a constraint placing the witness in 2^{num_bits} range. * diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.hpp index 7ab81feef2d..ca2247b3fb6 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.hpp @@ -54,20 +54,23 @@ template class TurboCircuitBuilder_ : public CircuitBuilderBase& in); + void create_add_gate(const add_triple_& in) override; void create_big_add_gate(const add_quad_& in); void create_big_add_gate_with_bit_extraction(const add_quad_& in); void create_big_mul_gate(const mul_quad_& in); void create_balanced_add_gate(const add_quad_& in); - void create_mul_gate(const mul_triple_& in); - void create_bool_gate(const uint32_t a); - void create_poly_gate(const poly_triple_& in); + void create_mul_gate(const mul_triple_& in) override; + void create_bool_gate(const uint32_t a) override; + void create_poly_gate(const poly_triple_& in) override; void create_fixed_group_add_gate(const fixed_group_add_quad_& in); void create_fixed_group_add_gate_with_init(const fixed_group_add_quad_& in, const fixed_group_init_quad_& init); void create_fixed_group_add_gate_final(const add_quad_& in); + void create_ecc_add_gate(const ecc_add_gate_& in) override; + void create_ecc_dbl_gate(const ecc_dbl_gate_& in) override; + void fix_witness(const uint32_t witness_index, const FF& witness_value); FF arithmetic_gate_evaluation(const size_t index, const FF alpha_base); @@ -100,7 +103,7 @@ template class TurboCircuitBuilder_ : public CircuitBuilderBase void UltraCircuitBuilder_::create_ecc_add_gate(const ++this->num_gates; } +/** + * @brief Create a gate where we validate an elliptic curve point doubling + * (x1, y1) * 2 = (x3, y3) + * @tparam FF + * @param in + */ +template void UltraCircuitBuilder_::create_ecc_dbl_gate(const ecc_dbl_gate_& in) +{ + const auto x1 = this->get_variable(in.x1); + const auto x3 = this->get_variable(in.x3); + const auto y1 = this->get_variable(in.y1); + + // lambda = 3x^2 / 2y + const auto three_x1_sqr_v = x1 * x1 * 3; + const auto three_x1_sqr = this->add_variable(three_x1_sqr_v); + create_poly_gate({ + .a = in.x1, + .b = in.x1, + .c = three_x1_sqr, + .q_m = 3, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + const auto lambda_v = three_x1_sqr_v / (y1 + y1); + const auto lambda = this->add_variable(lambda_v); + create_poly_gate({ + .a = lambda, + .b = in.y1, + .c = three_x1_sqr, + .q_m = 2, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + + // lambda * lambda - x2 - x1 = 0 + const auto lambda_sqr_v = lambda_v * lambda_v; + const auto lambda_sqr = this->add_variable(lambda_sqr_v); + create_poly_gate({ + .a = lambda, + .b = lambda, + .c = lambda_sqr, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + create_poly_gate({ + .a = lambda_sqr, + .b = in.x1, + .c = this->zero_idx, + .q_m = 0, + .q_l = 1, + .q_r = -2, + .q_o = 0, + .q_c = 0, + }); + + // lambda * (x1 - x3) - y1 = 0 + const auto x1_sub_x3_v = x1 - x3; + const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); + create_poly_gate({ + .a = in.x1, + .b = in.x3, + .c = x1_sub_x3, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); + const auto lambda_mul_x1_sub_x3_v = lambda_v * x1_sub_x3_v; + const auto lambda_mul_x1_sub_x3 = this->add_variable(lambda_mul_x1_sub_x3_v); + create_poly_gate({ + .a = lambda, + .b = x1_sub_x3, + .c = lambda_mul_x1_sub_x3, + .q_m = 1, + .q_l = 0, + .q_r = 0, + .q_o = -1, + .q_c = 0, + }); + create_poly_gate({ + .a = lambda_mul_x1_sub_x3, + .b = in.y1, + .c = in.y3, + .q_m = 0, + .q_l = 1, + .q_r = -1, + .q_o = -1, + .q_c = 0, + }); +} + /** * @brief Add a gate equating a particular witness to a constant, fixing it the value * diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp index 2ff9f0eb70a..c81ba1f2734 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp @@ -654,7 +654,8 @@ template class UltraCircuitBuilder_ : public CircuitBuilderBase& in) override; void create_bool_gate(const uint32_t a) override; void create_poly_gate(const poly_triple_& in) override; - void create_ecc_add_gate(const ecc_add_gate_& in); + void create_ecc_add_gate(const ecc_add_gate_& in) override; + void create_ecc_dbl_gate(const ecc_dbl_gate_& in) override; void fix_witness(const uint32_t witness_index, const FF& witness_value); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp new file mode 100644 index 00000000000..6aaed689233 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -0,0 +1,291 @@ +#include "../field/field.hpp" +#include "barretenberg/crypto/pedersen_commitment/pedersen.hpp" +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" + +#include "../../hash/pedersen/pedersen.hpp" +#include "../../hash/pedersen/pedersen_gates.hpp" + +#include "./cycle_group.hpp" +namespace proof_system::plonk::stdlib { + +template Composer* cycle_group::get_context(const cycle_group& other) +{ + if (get_context() != nullptr) { + return get_context(); + } + if (other.get_context() != nullptr) { + return other.get_context(); + } + return nullptr; +} + +/** + * @brief Evaluates a doubling + * + * @tparam Composer + * @return cycle_group + */ +template cycle_group cycle_group::dbl() +{ + auto context = get_context(); + + auto p1 = get_value(); + affine_element p3(element(p1).dbl()); + cycle_group result = cycle_group::from_witness(context, p3); + + proof_system::ecc_dbl_gate_ dbl_gate{ + .x1 = x.get_witness_index(), + .y1 = y.get_witness_index(), + .x3 = result.x.get_witness_index(), + .y3 = result.y.get_witness_index(), + }; + + context->create_ecc_dbl_gate(dbl_gate); + return result; +} + +/** + * @brief Will evaluate ECC point addition over `*this` and `other`. + * Incomplete addition formula edge cases are *NOT* checked! + * Only use this method if you know the x-coordinates of the operands cannot collide + * + * @tparam Composer + * @param other + * @return cycle_group + */ +template cycle_group cycle_group::unconditional_add(const cycle_group& other) +{ + auto context = get_context(other); + + const bool lhs_constant = is_constant(); + const bool rhs_constant = other.is_constant(); + if (lhs_constant && !rhs_constant) { + auto lhs = cycle_group::from_witness(context, get_value()); + return lhs.unconditional_add(other); + } + if (!lhs_constant && rhs_constant) { + auto rhs = cycle_group::from_witness(context, other.get_value()); + return unconditional_add(rhs); + } + + const auto p1 = get_value(); + const auto p2 = other.get_value(); + affine_element p3(element(p1) + element(p2)); + if (lhs_constant && rhs_constant) { + return cycle_group(p3); + } + cycle_group result = cycle_group::from_witness(context, p3); + + proof_system::ecc_add_gate_ add_gate{ + .x1 = x.get_witness_index(), + .y1 = y.get_witness_index(), + .x2 = other.x.get_witness_index(), + .y2 = other.y.get_witness_index(), + .x3 = result.x.get_witness_index(), + .y3 = result.y.get_witness_index(), + .endomorphism_coefficient = 1, + .sign_coefficient = 1, + }; + context->create_ecc_add_gate(add_gate); + + return result; +} + +/** + * @brief will evaluate ECC point subtraction over `*this` and `other`. + * Incomplete addition formula edge cases are *NOT* checked! + * Only use this method if you know the x-coordinates of the operands cannot collide + * + * @tparam Composer + * @param other + * @return cycle_group + */ +template +cycle_group cycle_group::unconditional_subtract(const cycle_group& other) +{ + auto context = get_context(other); + + const bool lhs_constant = is_constant(); + const bool rhs_constant = other.is_constant(); + + if (lhs_constant && !rhs_constant) { + auto lhs = cycle_group::from_witness(context, get_value()); + return lhs.unconditional_subtract(other); + } + if (!lhs_constant && rhs_constant) { + auto rhs = cycle_group::from_witness(context, other.get_value()); + return unconditional_subtract(rhs); + } + auto p1 = get_value(); + auto p2 = other.get_value(); + affine_element p3(element(p1) - element(p2)); + if (lhs_constant && rhs_constant) { + return cycle_group(p3); + } + cycle_group result = cycle_group::from_witness(context, p3); + + proof_system::ecc_add_gate_ add_gate{ + .x1 = x.get_witness_index(), + .y1 = y.get_witness_index(), + .x2 = other.x.get_witness_index(), + .y2 = other.y.get_witness_index(), + .x3 = result.x.get_witness_index(), + .y3 = result.y.get_witness_index(), + .endomorphism_coefficient = 1, + .sign_coefficient = -1, + }; + context->create_ecc_add_gate(add_gate); + + return result; +} + +/** + * @brief Will evaluate ECC point addition over `*this` and `other`. + * Uses incomplete addition formula + * If incomplete addition formula edge cases are triggered (x-coordinates of operands collide), + * the constraints produced by this method will be unsatisfiable. + * Useful when an honest prover will not produce a point collision with overwhelming probability, + * but a cheating prover will be able to. + * + * @tparam Composer + * @param other + * @return cycle_group + */ +template +cycle_group cycle_group::constrained_unconditional_add(const cycle_group& other) +{ + field_t x_delta = x - other.x; + x_delta.assert_is_not_zero("cycle_group::constrained_unconditional_add, x-coordinate collision"); + return unconditional_add(other); +} + +/** + * @brief Will evaluate ECC point subtraction over `*this` and `other`. + * Uses incomplete addition formula + * If incomplete addition formula edge cases are triggered (x-coordinates of operands collide), + * the constraints produced by this method will be unsatisfiable. + * Useful when an honest prover will not produce a point collision with overwhelming probability, + * but a cheating prover will be able to. + * + * @tparam Composer + * @param other + * @return cycle_group + */ +template +cycle_group cycle_group::constrained_unconditional_subtract(const cycle_group& other) +{ + field_t x_delta = x - other.x; + x_delta.assert_is_not_zero("cycle_group::constrained_unconditional_subtract, x-coordinate collision"); + return unconditional_subtract(other); +} + +/** + * @brief Will evaluate ECC point addition over `*this` and `other`. + * This method uses complete addition i.e. is compatible with edge cases. + * Method is expensive due to needing to evaluate both an addition, a doubling, + * plus conditional logic to handle points at infinity. + * + * @tparam Composer + * @param other + * @return cycle_group + */ +template cycle_group cycle_group::operator+(const cycle_group& other) +{ + + Composer* context = get_context(other); + auto add_result = unconditional_add(other); + auto dbl_result = dbl(); + + // dbl if x_match, y_match + // infinity if x_match, !y_match + const bool_t x_coordinates_match = (x == other.x); + const bool_t y_coordinates_match = (y == other.y); + const bool_t double_predicate = (x_coordinates_match && y_coordinates_match).normalize(); + const bool_t infinity_predicate = (x_coordinates_match && !y_coordinates_match).normalize(); + cycle_group result(context); + result.x = field_t::conditional_assign(double_predicate, dbl_result.x, add_result.x); + result.y = field_t::conditional_assign(double_predicate, dbl_result.y, add_result.y); + + const bool_t lhs_infinity = is_infinity; + const bool_t rhs_infinity = other.is_infinity; + // if lhs infinity, return rhs + result.x = field_t::conditional_assign(lhs_infinity, other.x, result.x); + result.y = field_t::conditional_assign(lhs_infinity, other.y, result.y); + + // if rhs infinity, return lhs + result.x = field_t::conditional_assign(rhs_infinity, x, result.x); + result.y = field_t::conditional_assign(rhs_infinity, y, result.y); + + // is result point at infinity? + // yes = infinity_predicate && !lhs_infinity && !rhs_infinity + // yes = lhs_infinity && rhs_infinity + // n.b. can likely optimise this + bool_t result_is_infinity = infinity_predicate && (!lhs_infinity && !rhs_infinity); + result_is_infinity = result_is_infinity || (lhs_infinity && rhs_infinity); + result.is_infinity = result_is_infinity; + return result; +} + +/** + * @brief Will evaluate ECC point subtraction over `*this` and `other`. + * This method uses complete addition i.e. is compatible with edge cases. + * Method is expensive due to needing to evaluate both an addition, a doubling, + * plus conditional logic to handle points at infinity. + * + * @tparam Composer + * @param other + * @return cycle_group + */ +template cycle_group cycle_group::operator-(const cycle_group& other) +{ + + Composer* context = get_context(other); + auto add_result = unconditional_subtract(other); + auto dbl_result = dbl(); + + // dbl if x_match, !y_match + // infinity if x_match, y_match + const bool_t x_coordinates_match = (x == other.x); + const bool_t y_coordinates_match = (y == other.y); + const bool_t double_predicate = (x_coordinates_match && !y_coordinates_match).normalize(); + const bool_t infinity_predicate = (x_coordinates_match && y_coordinates_match).normalize(); + cycle_group result(context); + result.x = field_t::conditional_assign(double_predicate, dbl_result.x, add_result.x); + result.y = field_t::conditional_assign(double_predicate, dbl_result.y, add_result.y); + + const bool_t lhs_infinity = is_infinity; + const bool_t rhs_infinity = other.is_infinity; + // if lhs infinity, return -rhs + result.x = field_t::conditional_assign(lhs_infinity, other.x, result.x); + result.y = field_t::conditional_assign(lhs_infinity, (-other.y).normalize(), result.y); + + // if rhs infinity, return lhs + result.x = field_t::conditional_assign(rhs_infinity, x, result.x); + result.y = field_t::conditional_assign(rhs_infinity, y, result.y); + + // is result point at infinity? + // yes = infinity_predicate && !lhs_infinity && !rhs_infinity + // yes = lhs_infinity && rhs_infinity + // n.b. can likely optimise this + bool_t result_is_infinity = infinity_predicate && (!lhs_infinity && !rhs_infinity); + result_is_infinity = result_is_infinity || (lhs_infinity && rhs_infinity); + result.is_infinity = result_is_infinity; + + return result; +} + +template cycle_group& cycle_group::operator+=(const cycle_group& other) +{ + *this = *this + other; + return *this; +} + +template cycle_group& cycle_group::operator-=(const cycle_group& other) +{ + *this = *this - other; + return *this; +} + +INSTANTIATE_STDLIB_TYPE(cycle_group); + +} // namespace proof_system::plonk::stdlib diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp new file mode 100644 index 00000000000..d5fc6518aba --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -0,0 +1,130 @@ +#pragma once + +#include "../field/field.hpp" +#include "barretenberg/crypto/pedersen_commitment/pedersen.hpp" +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" + +#include "../../hash/pedersen/pedersen.hpp" +#include "../../hash/pedersen/pedersen_gates.hpp" +#include + +namespace proof_system::plonk::stdlib { + +using namespace barretenberg; +using namespace crypto::generators; + +/** + * @brief cycle_group represents a group element of the proving system's embedded curve + * i.e. a curve with a cofactor 1 defined over a field equal to the circuit's native field Composer::FF + * + * (todo @zac-williamson) once the pedersen refactor project is finished, this class will supercede + * `stdlib::group` + * + * @tparam Composer + */ +template class cycle_group { + public: + using field_t = field_t; + using bool_t = bool_t; + using witness_t = witness_t; + using FF = typename Composer::FF; + using G1 = typename Composer::EmbeddedCurve; + using element = typename G1::element; + using affine_element = typename G1::affine_element; + + Composer* get_context(const cycle_group& other); + + cycle_group(Composer* _context = nullptr) + : context(_context) + , x(0) + , y(0) + , is_infinity(true) + , _is_constant(true) + {} + + cycle_group(Composer* _context, field_t _x, field_t _y, bool_t _is_infinity) + : context(_context) + , x(_x.normalize()) + , y(_y.normalize()) + , is_infinity(_is_infinity) + , _is_constant(_x.is_constant() && _y.is_constant() && _is_infinity.is_constant()) + {} + + cycle_group(const FF& _x, const FF& _y, bool _is_infinity) + : context(nullptr) + , x(_x) + , y(_y) + , is_infinity(_is_infinity) + , _is_constant(true) + {} + + cycle_group(const affine_element& _in) + : context(nullptr) + , x(_in.x) + , y(_in.y) + , is_infinity(_in.is_point_at_infinity()) + , _is_constant(true) + {} + + /** + * @brief + * + * N.B. make sure _in is not the point at infinity! + * (todo: shoul we validate on curve?) + * @param _context + * @param _in + * @return cycle_group + */ + static cycle_group from_witness(Composer* _context, const affine_element& _in) + { + cycle_group result(_context); + result.x = field_t(witness_t(_context, _in.x)); + result.y = field_t(witness_t(_context, _in.y)); + result.is_infinity = false; + result._is_constant = false; + return result; + } + + Composer* get_context() const { return context; } + [[nodiscard]] bool is_constant() const { return _is_constant; } + + affine_element get_value() const + { + affine_element result(x.get_value(), y.get_value()); + if (is_infinity.get_value()) { + result.self_set_infinity(); + } + return result; + } + + cycle_group dbl(); + cycle_group unconditional_add(const cycle_group& other); + cycle_group constrained_unconditional_add(const cycle_group& other); + cycle_group operator+(const cycle_group& other); + cycle_group unconditional_subtract(const cycle_group& other); + cycle_group constrained_unconditional_subtract(const cycle_group& other); + cycle_group operator-(const cycle_group& other); + cycle_group& operator+=(const cycle_group& other); + cycle_group& operator-=(const cycle_group& other); + + static cycle_group fixed_base_batch_mul(const std::vector& scalars, + const std::vector& generator_indices); + static cycle_group variable_base_batch_mul(const std::vector& scalars, + const std::vector& base_points); + + Composer* context; + field_t x; + field_t y; + bool_t is_infinity; + bool _is_constant; +}; + +template +inline std::ostream& operator<<(std::ostream& os, cycle_group const& v) +{ + return os << v.get_value(); +} + +EXTERN_STDLIB_TYPE(cycle_group); + +} // namespace proof_system::plonk::stdlib diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp new file mode 100644 index 00000000000..3d25ae420c7 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -0,0 +1,335 @@ +#include "barretenberg/numeric/random/engine.hpp" +#include "barretenberg/stdlib/primitives/field/field.hpp" +#include "barretenberg/stdlib/primitives/group/cycle_group.hpp" +#include "barretenberg/stdlib/primitives/witness/witness.hpp" +#include + +#define STDLIB_TYPE_ALIASES \ + using Composer = TypeParam; \ + using cycle_group_ct = stdlib::cycle_group; \ + using G1 = typename Composer::EmbeddedCurve; \ + using element = typename G1::element; \ + using affine_element = typename G1::affine_element; + +namespace stdlib_cycle_group_tests { +using namespace barretenberg; +using namespace proof_system::plonk; + +namespace { +auto& engine = numeric::random::get_debug_engine(); +} + +template class CycleGroupTest : public ::testing::Test { + public: + using G1 = typename Composer::EmbeddedCurve; + using FF = typename G1::subgroup_field; + + using element = typename G1::element; + using affine_element = typename G1::affine_element; + + static constexpr size_t num_generators = 10; + static inline std::array generators{}; + + static void SetUpTestSuite() + { + for (size_t i = 0; i < num_generators; ++i) { + generators[i] = G1::one * FF::random_element(&engine); + } + }; +}; + +using CircuitTypes = ::testing:: + Types; +TYPED_TEST_SUITE(CycleGroupTest, CircuitTypes); + +TYPED_TEST(CycleGroupTest, TestDbl) +{ + STDLIB_TYPE_ALIASES; + auto composer = Composer(); + + auto lhs = TestFixture::generators[0]; + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct c = a.dbl(); + affine_element expected(element(lhs).dbl()); + affine_element result = c.get_value(); + EXPECT_EQ(result, expected); + + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, false); +} + +TYPED_TEST(CycleGroupTest, TestUnconditionalAdd) +{ + STDLIB_TYPE_ALIASES; + auto composer = Composer(); + + auto add = + [&](const affine_element& lhs, const affine_element& rhs, const bool lhs_constant, const bool rhs_constant) { + cycle_group_ct a = lhs_constant ? cycle_group_ct(lhs) : cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = rhs_constant ? cycle_group_ct(rhs) : cycle_group_ct::from_witness(&composer, rhs); + cycle_group_ct c = a.unconditional_add(b); + affine_element expected(element(lhs) + element(rhs)); + affine_element result = c.get_value(); + EXPECT_EQ(result, expected); + }; + + add(TestFixture::generators[0], TestFixture::generators[1], false, false); + add(TestFixture::generators[0], TestFixture::generators[1], false, true); + add(TestFixture::generators[0], TestFixture::generators[1], true, false); + add(TestFixture::generators[0], TestFixture::generators[1], true, true); + + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, true); +} + +TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalAddSucceed) +{ + STDLIB_TYPE_ALIASES; + auto composer = Composer(); + + auto lhs = TestFixture::generators[0]; + auto rhs = TestFixture::generators[1]; + + // case 1. valid unconditional add + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); + cycle_group_ct c = a.constrained_unconditional_add(b); + affine_element expected(element(lhs) + element(rhs)); + affine_element result = c.get_value(); + EXPECT_EQ(result, expected); + + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, true); +} + +TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalAddFail) +{ + using Composer = TypeParam; + using cycle_group_ct = stdlib::cycle_group; + auto composer = Composer(); + + auto lhs = TestFixture::generators[0]; + auto rhs = -TestFixture::generators[0]; // ruh roh + + // case 2. invalid unconditional add + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); + a.constrained_unconditional_add(b); + + EXPECT_TRUE(composer.failed()); + + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, false); +} + +TYPED_TEST(CycleGroupTest, TestAdd) +{ + STDLIB_TYPE_ALIASES; + using bool_ct = stdlib::bool_t; + using witness_ct = stdlib::witness_t; + auto composer = Composer(); + + auto lhs = TestFixture::generators[0]; + auto rhs = -TestFixture::generators[1]; + + cycle_group_ct point_at_infinity = cycle_group_ct::from_witness(&composer, rhs); + point_at_infinity.is_infinity = bool_ct(witness_ct(&composer, true)); + + // case 1. no edge-cases triggered + { + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); + cycle_group_ct c = a + b; + affine_element expected(element(lhs) + element(rhs)); + affine_element result = c.get_value(); + EXPECT_EQ(result, expected); + } + + // case 2. lhs is point at infinity + { + cycle_group_ct a = point_at_infinity; + cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); + cycle_group_ct c = a + b; + affine_element result = c.get_value(); + EXPECT_EQ(result, rhs); + } + + // case 3. rhs is point at infinity + { + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = point_at_infinity; + cycle_group_ct c = a + b; + affine_element result = c.get_value(); + EXPECT_EQ(result, lhs); + } + + // case 4. both points are at infinity + { + cycle_group_ct a = point_at_infinity; + cycle_group_ct b = point_at_infinity; + cycle_group_ct c = a + b; + EXPECT_TRUE(c.is_infinity.get_value()); + EXPECT_TRUE(c.get_value().is_point_at_infinity()); + } + + // case 5. lhs = -rhs + { + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = cycle_group_ct::from_witness(&composer, -lhs); + cycle_group_ct c = a + b; + EXPECT_TRUE(c.is_infinity.get_value()); + EXPECT_TRUE(c.get_value().is_point_at_infinity()); + } + + // case 6. lhs = rhs + { + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct c = a + b; + affine_element expected((element(lhs)).dbl()); + affine_element result = c.get_value(); + EXPECT_EQ(result, expected); + } + + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, false); +} + +TYPED_TEST(CycleGroupTest, TestUnconditionalSubtract) +{ + STDLIB_TYPE_ALIASES; + auto composer = Composer(); + + auto add = + [&](const affine_element& lhs, const affine_element& rhs, const bool lhs_constant, const bool rhs_constant) { + cycle_group_ct a = lhs_constant ? cycle_group_ct(lhs) : cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = rhs_constant ? cycle_group_ct(rhs) : cycle_group_ct::from_witness(&composer, rhs); + cycle_group_ct c = a.unconditional_subtract(b); + affine_element expected(element(lhs) - element(rhs)); + affine_element result = c.get_value(); + EXPECT_EQ(result, expected); + }; + + add(TestFixture::generators[0], TestFixture::generators[1], false, false); + add(TestFixture::generators[0], TestFixture::generators[1], false, true); + add(TestFixture::generators[0], TestFixture::generators[1], true, false); + add(TestFixture::generators[0], TestFixture::generators[1], true, true); + + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, true); +} + +TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalSubtractSucceed) +{ + STDLIB_TYPE_ALIASES; + auto composer = Composer(); + + auto lhs = TestFixture::generators[0]; + auto rhs = TestFixture::generators[1]; + + // case 1. valid unconditional add + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); + cycle_group_ct c = a.constrained_unconditional_subtract(b); + affine_element expected(element(lhs) - element(rhs)); + affine_element result = c.get_value(); + EXPECT_EQ(result, expected); + + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, true); +} + +TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalSubtractFail) +{ + using Composer = TypeParam; + using cycle_group_ct = stdlib::cycle_group; + auto composer = Composer(); + + auto lhs = TestFixture::generators[0]; + auto rhs = -TestFixture::generators[0]; // ruh roh + + // case 2. invalid unconditional add + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); + a.constrained_unconditional_subtract(b); + + EXPECT_TRUE(composer.failed()); + + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, false); +} + +TYPED_TEST(CycleGroupTest, TestSubtract) +{ + STDLIB_TYPE_ALIASES; + using bool_ct = stdlib::bool_t; + using witness_ct = stdlib::witness_t; + auto composer = Composer(); + + auto lhs = TestFixture::generators[0]; + auto rhs = -TestFixture::generators[1]; + + cycle_group_ct point_at_infinity = cycle_group_ct::from_witness(&composer, rhs); + point_at_infinity.is_infinity = bool_ct(witness_ct(&composer, true)); + + // case 1. no edge-cases triggered + { + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); + cycle_group_ct c = a - b; + affine_element expected(element(lhs) - element(rhs)); + affine_element result = c.get_value(); + EXPECT_EQ(result, expected); + } + + // case 2. lhs is point at infinity + { + cycle_group_ct a = point_at_infinity; + cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); + cycle_group_ct c = a - b; + affine_element result = c.get_value(); + EXPECT_EQ(result, -rhs); + } + + // case 3. rhs is point at infinity + { + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = point_at_infinity; + cycle_group_ct c = a - b; + affine_element result = c.get_value(); + EXPECT_EQ(result, lhs); + } + + // case 4. both points are at infinity + { + cycle_group_ct a = point_at_infinity; + cycle_group_ct b = point_at_infinity; + cycle_group_ct c = a - b; + EXPECT_TRUE(c.is_infinity.get_value()); + EXPECT_TRUE(c.get_value().is_point_at_infinity()); + } + + // case 5. lhs = -rhs + { + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = cycle_group_ct::from_witness(&composer, -lhs); + cycle_group_ct c = a - b; + affine_element expected((element(lhs)).dbl()); + affine_element result = c.get_value(); + EXPECT_EQ(result, expected); + } + + // case 6. lhs = rhs + { + cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct b = cycle_group_ct::from_witness(&composer, lhs); + cycle_group_ct c = a - b; + EXPECT_TRUE(c.is_infinity.get_value()); + EXPECT_TRUE(c.get_value().is_point_at_infinity()); + } + + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, false); +} + +} // namespace stdlib_cycle_group_tests \ No newline at end of file From 4f885f62edca9e6df60b883e9c584aad965a8308 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Wed, 16 Aug 2023 21:02:22 +0000 Subject: [PATCH 02/50] fixed bugs in cycle_group add/sub/dbl added variable-base method that isn't quite passing tests. tests will fail ~33% of the time! --- .../standard_circuit_builder.cpp | 10 +- .../circuit_builder/turbo_circuit_builder.cpp | 6 +- .../circuit_builder/ultra_circuit_builder.cpp | 6 +- .../stdlib/primitives/field/field.hpp | 31 +- .../stdlib/primitives/group/cycle_group.cpp | 318 ++++++++++++++++-- .../stdlib/primitives/group/cycle_group.hpp | 63 +++- .../primitives/group/cycle_group.test.cpp | 36 +- 7 files changed, 411 insertions(+), 59 deletions(-) diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp index c7752996e99..99557c30f6b 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp @@ -410,7 +410,7 @@ template void StandardCircuitBuilder_::create_ecc_dbl_gate(con .q_c = 0, }); - // lambda * lambda - x2 - x1 = 0 + // lambda * lambda - 2x1 - x3 = 0 const auto lambda_sqr_v = lambda_v * lambda_v; const auto lambda_sqr = this->add_variable(lambda_sqr_v); create_poly_gate({ @@ -426,15 +426,15 @@ template void StandardCircuitBuilder_::create_ecc_dbl_gate(con create_poly_gate({ .a = lambda_sqr, .b = in.x1, - .c = this->zero_idx, + .c = in.x3, .q_m = 0, .q_l = 1, .q_r = -2, - .q_o = 0, + .q_o = -1, .q_c = 0, }); - // lambda * (x1 - x3) - y1 = 0 + // lambda * (x1 - x3) - y1 - y3 = 0 const auto x1_sub_x3_v = x1 - x3; const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); create_poly_gate({ @@ -733,6 +733,8 @@ template bool StandardCircuitBuilder_::check_circuit() gate_sum = q_m[i] * left * right + q_1[i] * left + q_2[i] * right + q_3[i] * output + q_c[i]; if (!gate_sum.is_zero()) { info("gate number", i); + info("l, r, o = ", left, ", ", right, ", ", output); + info("wl,wr,wo = ", w_l[i], ", ", w_r[i], ", ", w_o[i]); return false; } } diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.cpp index 50f1c622794..8f7372ff727 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.cpp @@ -542,7 +542,7 @@ template void TurboCircuitBuilder_::create_ecc_dbl_gate(const .q_c = 0, }); - // lambda * lambda - x2 - x1 = 0 + // lambda * lambda - 2x3 - x3 = 0 const auto lambda_sqr_v = lambda_v * lambda_v; const auto lambda_sqr = this->add_variable(lambda_sqr_v); create_poly_gate({ @@ -558,11 +558,11 @@ template void TurboCircuitBuilder_::create_ecc_dbl_gate(const create_poly_gate({ .a = lambda_sqr, .b = in.x1, - .c = this->zero_idx, + .c = in.x3, .q_m = 0, .q_l = 1, .q_r = -2, - .q_o = 0, + .q_o = -1, .q_c = 0, }); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp index b8ee8e9fd2d..f24c8e836c5 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp @@ -493,7 +493,7 @@ template void UltraCircuitBuilder_::create_ecc_dbl_gate(const .q_c = 0, }); - // lambda * lambda - x2 - x1 = 0 + // lambda * lambda - 2x1 - x3 = 0 const auto lambda_sqr_v = lambda_v * lambda_v; const auto lambda_sqr = this->add_variable(lambda_sqr_v); create_poly_gate({ @@ -509,11 +509,11 @@ template void UltraCircuitBuilder_::create_ecc_dbl_gate(const create_poly_gate({ .a = lambda_sqr, .b = in.x1, - .c = this->zero_idx, + .c = in.x3, .q_m = 0, .q_l = 1, .q_r = -2, - .q_o = 0, + .q_o = -1, .q_c = 0, }); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp index 9d81760b932..9b53fa57788 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp @@ -4,15 +4,13 @@ #include "barretenberg/common/assert.hpp" #include -namespace proof_system::plonk { -namespace stdlib { +namespace proof_system::plonk::stdlib { template class bool_t; template class field_t { public: field_t(ComposerContext* parent_context = nullptr); field_t(ComposerContext* parent_context, const barretenberg::fr& value); - field_t(const int value) : context(nullptr) { @@ -68,7 +66,7 @@ template class field_t { , witness_index(other.witness_index) {} - field_t(field_t&& other) + field_t(field_t&& other) noexcept : context(other.context) , additive_constant(other.additive_constant) , multiplicative_constant(other.multiplicative_constant) @@ -77,15 +75,20 @@ template class field_t { field_t(const bool_t& other); + ~field_t() = default; + static constexpr bool is_composite = false; static constexpr uint256_t modulus = barretenberg::fr::modulus; - static field_t from_witness_index(ComposerContext* parent_context, const uint32_t witness_index); + static field_t from_witness_index(ComposerContext* parent_context, uint32_t witness_index); explicit operator bool_t() const; field_t& operator=(const field_t& other) { + if (this == &other) { + return *this; + } additive_constant = other.additive_constant; multiplicative_constant = other.multiplicative_constant; witness_index = other.witness_index; @@ -93,7 +96,7 @@ template class field_t { return *this; } - field_t& operator=(field_t&& other) + field_t& operator=(field_t&& other) noexcept { additive_constant = other.additive_constant; multiplicative_constant = other.multiplicative_constant; @@ -149,7 +152,8 @@ template class field_t { }; // Postfix increment (x++) - field_t operator++(int) + // NOLINTNEXTLINE + field_t operator++(const int) { field_t this_before_operation = field_t(*this); *this = *this + 1; @@ -244,7 +248,7 @@ template class field_t { * Slices a `field_t` at given indices (msb, lsb) both included in the slice, * returns three parts: [low, slice, high]. */ - std::array slice(const uint8_t msb, const uint8_t lsb) const; + std::array slice(uint8_t msb, uint8_t lsb) const; /** * is_zero will return a bool_t, and add constraints that enforce its correctness @@ -252,7 +256,7 @@ template class field_t { **/ bool_t is_zero() const; - void create_range_constraint(const size_t num_bits, std::string const& msg = "field_t::range_constraint") const; + void create_range_constraint(size_t num_bits, std::string const& msg = "field_t::range_constraint") const; void assert_is_not_zero(std::string const& msg = "field_t::assert_is_not_zero") const; void assert_is_zero(std::string const& msg = "field_t::assert_is_zero") const; bool is_constant() const { return witness_index == IS_CONSTANT; } @@ -288,10 +292,12 @@ template class field_t { uint32_t get_witness_index() const { return witness_index; } + // std::vector> decompose_into_slices(size_t num_bits = 256, size_t slice_bits = 1) const; + std::vector> decompose_into_bits( - const size_t num_bits = 256, + size_t num_bits = 256, std::function(ComposerContext* ctx, uint64_t, uint256_t)> get_bit = - [](ComposerContext* ctx, uint64_t j, uint256_t val) { + [](ComposerContext* ctx, uint64_t j, const uint256_t& val) { return witness_t(ctx, val.get_bit(j)); }) const; @@ -421,5 +427,4 @@ template inline std::ostream& operator<<(std::ostream EXTERN_STDLIB_TYPE(field_t); -} // namespace stdlib -} // namespace proof_system::plonk +} // namespace proof_system::plonk::stdlib diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index 6aaed689233..fd65d472854 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -8,7 +8,7 @@ #include "./cycle_group.hpp" namespace proof_system::plonk::stdlib { -template Composer* cycle_group::get_context(const cycle_group& other) +template Composer* cycle_group::get_context(const cycle_group& other) const { if (get_context() != nullptr) { return get_context(); @@ -25,14 +25,25 @@ template Composer* cycle_group::get_context(const * @tparam Composer * @return cycle_group */ -template cycle_group cycle_group::dbl() +template cycle_group cycle_group::dbl() const { + // n.b. if p1 is point at infinity, calling p1.dbl() does not give us an output that satisfies the double gate :o) + // (native code just checks out of the dbl() method if point is at infinity) + auto x1 = x.get_value(); + auto y1 = y.get_value(); + auto lambda = (x1 * x1 * 3) / (y1 + y1); + auto x3 = lambda * lambda - x1 - x1; + auto y3 = lambda * (x1 - x3) - y1; + affine_element p3(x3, y3); + + if (is_constant()) { + return cycle_group(p3); + } + auto context = get_context(); - auto p1 = get_value(); - affine_element p3(element(p1).dbl()); cycle_group result = cycle_group::from_witness(context, p3); - + result.is_infinity = is_infinity; proof_system::ecc_dbl_gate_ dbl_gate{ .x1 = x.get_witness_index(), .y1 = y.get_witness_index(), @@ -53,7 +64,8 @@ template cycle_group cycle_group::dbl() * @param other * @return cycle_group */ -template cycle_group cycle_group::unconditional_add(const cycle_group& other) +template +cycle_group cycle_group::unconditional_add(const cycle_group& other) const { auto context = get_context(other); @@ -101,7 +113,7 @@ template cycle_group cycle_group::uncond * @return cycle_group */ template -cycle_group cycle_group::unconditional_subtract(const cycle_group& other) +cycle_group cycle_group::unconditional_subtract(const cycle_group& other) const { auto context = get_context(other); @@ -152,7 +164,7 @@ cycle_group cycle_group::unconditional_subtract(const cycle_ * @return cycle_group */ template -cycle_group cycle_group::constrained_unconditional_add(const cycle_group& other) +cycle_group cycle_group::constrained_unconditional_add(const cycle_group& other) const { field_t x_delta = x - other.x; x_delta.assert_is_not_zero("cycle_group::constrained_unconditional_add, x-coordinate collision"); @@ -172,7 +184,7 @@ cycle_group cycle_group::constrained_unconditional_add(const * @return cycle_group */ template -cycle_group cycle_group::constrained_unconditional_subtract(const cycle_group& other) +cycle_group cycle_group::constrained_unconditional_subtract(const cycle_group& other) const { field_t x_delta = x - other.x; x_delta.assert_is_not_zero("cycle_group::constrained_unconditional_subtract, x-coordinate collision"); @@ -189,19 +201,28 @@ cycle_group cycle_group::constrained_unconditional_subtract( * @param other * @return cycle_group */ -template cycle_group cycle_group::operator+(const cycle_group& other) +template cycle_group cycle_group::operator+(const cycle_group& other) const { - Composer* context = get_context(other); - auto add_result = unconditional_add(other); + const bool_t x_coordinates_match = (x == other.x); + const bool_t y_coordinates_match = (y == other.y); + const bool_t double_predicate = (x_coordinates_match && y_coordinates_match); + const bool_t infinity_predicate = (x_coordinates_match && !y_coordinates_match); + + auto x1 = x; + auto y1 = y; + auto x2 = other.x; + auto y2 = other.y; + auto x_diff = x2.add_two(-x1, x_coordinates_match); // todo document this oddity + auto lambda = (y2 - y1) / x_diff; + auto x3 = lambda.madd(lambda, -(x2 + x1)); + auto y3 = lambda.madd(x1 - x3, -y1); + cycle_group add_result(context, x3, y3, x_coordinates_match); + auto dbl_result = dbl(); // dbl if x_match, y_match // infinity if x_match, !y_match - const bool_t x_coordinates_match = (x == other.x); - const bool_t y_coordinates_match = (y == other.y); - const bool_t double_predicate = (x_coordinates_match && y_coordinates_match).normalize(); - const bool_t infinity_predicate = (x_coordinates_match && !y_coordinates_match).normalize(); cycle_group result(context); result.x = field_t::conditional_assign(double_predicate, dbl_result.x, add_result.x); result.y = field_t::conditional_assign(double_predicate, dbl_result.y, add_result.y); @@ -236,19 +257,29 @@ template cycle_group cycle_group::operat * @param other * @return cycle_group */ -template cycle_group cycle_group::operator-(const cycle_group& other) +template cycle_group cycle_group::operator-(const cycle_group& other) const { Composer* context = get_context(other); - auto add_result = unconditional_subtract(other); - auto dbl_result = dbl(); - - // dbl if x_match, !y_match - // infinity if x_match, y_match const bool_t x_coordinates_match = (x == other.x); const bool_t y_coordinates_match = (y == other.y); const bool_t double_predicate = (x_coordinates_match && !y_coordinates_match).normalize(); const bool_t infinity_predicate = (x_coordinates_match && y_coordinates_match).normalize(); + + auto x1 = x; + auto y1 = y; + auto x2 = other.x; + auto y2 = other.y; + auto x_diff = x2.add_two(-x1, x_coordinates_match); + auto lambda = (-y2 - y1) / x_diff; + auto x3 = lambda.madd(lambda, -(x2 + x1)); + auto y3 = lambda.madd(x1 - x3, -y1); + cycle_group add_result(context, x3, y3, x_coordinates_match); + + auto dbl_result = dbl(); + + // dbl if x_match, !y_match + // infinity if x_match, y_match cycle_group result(context); result.x = field_t::conditional_assign(double_predicate, dbl_result.x, add_result.x); result.y = field_t::conditional_assign(double_predicate, dbl_result.y, add_result.y); @@ -286,6 +317,249 @@ template cycle_group& cycle_group::opera return *this; } +template cycle_group::offset_generators::offset_generators(size_t num_points) +{ + auto generator_temp = G1::template derive_generators<100>(); // hmm bad + const size_t num_generators = num_points + 1; + for (size_t i = 0; i < num_generators; ++i) { + generators.emplace_back(generator_temp[i]); + } + + auto init_generator = generators[0]; +} +template +cycle_group::cycle_scalar::cycle_scalar(field_t _lo, field_t _hi) + : lo(_lo) + , hi(_hi) +{} + +template cycle_group::cycle_scalar::cycle_scalar(field_t _in) +{ + const uint256_t value(_in.get_value()); + const uint256_t lo_v = value.slice(0, LO_BITS); + const uint256_t hi_v = value.slice(LO_BITS, HI_BITS); + constexpr uint256_t shift = uint256_t(1) << LO_BITS; + if (_in.is_constant()) { + lo = lo_v; + hi = hi_v; + } else { + lo = witness_t(_in.get_context(), lo_v); + hi = witness_t(_in.get_context(), hi_v); + (lo + hi * shift).assert_equal(_in); + } +} + +template +typename cycle_group::cycle_scalar cycle_group::cycle_scalar::from_witness(Composer* context, + const ScalarField& value) +{ + const uint256_t value_u256(value); + const uint256_t lo_v = value_u256.slice(0, LO_BITS); + const uint256_t hi_v = value_u256.slice(LO_BITS, HI_BITS); + field_t lo = witness_t(context, lo_v); + field_t hi = witness_t(context, hi_v); + return cycle_scalar(lo, hi); +} + +template +cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* context, + const cycle_scalar& scalar, + const size_t table_bits) + : _table_bits(table_bits) +{ + // convert an input cycle_scalar object into a vector of slices, each containing `table_bits` bits. + // this also performs an implicit range check on the input slices + const auto slice_scalar = [&](const field_t& scalar, const size_t num_bits) { + std::vector result; + if (scalar.is_constant()) { + const size_t num_slices = num_bits / table_bits; + const uint64_t table_mask = (1ULL << table_bits) - 1ULL; + uint256_t raw_value = scalar.get_value(); + for (size_t i = 0; i < num_slices; ++i) { + uint64_t slice_v = static_cast(raw_value.data[0]) & table_mask; + field_t slice(context, slice_v); + result.push_back(slice); + } + return result; + } + if constexpr (IS_ULTRA) { + const auto slice_indices = + context->decompose_into_default_range(scalar.normalize().get_witness_index(), + num_bits, + table_bits, + "straus_scalar_slice decompose_into_default_range"); + for (auto& idx : slice_indices) { + result.emplace_back(field_t::from_witness_index(context, idx)); + } + } else { + uint256_t raw_value = scalar.get_value(); + const uint64_t table_mask = (1ULL << table_bits) - 1ULL; + const size_t num_slices = num_bits / table_bits; + + for (size_t i = 0; i < num_slices; ++i) { + uint64_t slice_v = static_cast(raw_value.data[0]) & table_mask; + field_t slice(witness_t(context, slice_v)); + + context->create_range_constraint( + slice.get_witness_index(), table_bits, "straus_scalar_slice create_range_constraint"); + + result.emplace_back(slice); + raw_value = raw_value >> table_bits; + } + std::vector linear_elements; + FF scaling_factor = 1; + for (size_t i = 0; i < num_slices; ++i) { + linear_elements.emplace_back(result[i] * scaling_factor); + scaling_factor += scaling_factor; + } + field_t::accumulate(linear_elements).assert_equal(scalar); + } + return result; + }; + + auto hi_slices = slice_scalar(scalar.hi, cycle_scalar::HI_BITS); + auto lo_slices = slice_scalar(scalar.lo, cycle_scalar::LO_BITS); + + // Check that scalar.hi * 2^LO_BITS + scalar.lo < cycle_group_modulus when evaluated over the integers + { + constexpr uint256_t cycle_group_modulus = cycle_scalar::ScalarField::modulus; + constexpr uint256_t r_lo = cycle_group_modulus.slice(0, cycle_scalar::LO_BITS); + constexpr uint256_t r_hi = cycle_group_modulus.slice(cycle_scalar::LO_BITS, cycle_scalar::HI_BITS); + + bool need_borrow = uint256_t(scalar.lo.get_value()) > r_lo; + field_t borrow = scalar.lo.is_constant() ? need_borrow : field_t::from_witness(context, need_borrow); + + // directly call `create_new_range_constraint` to avoid creating an arithmetic gate + if (!scalar.lo.is_constant()) { + if constexpr (IS_ULTRA) { + context->create_new_range_constraint(borrow.get_witness_index(), 1, "borrow"); + } else { + borrow.assert_equal(borrow * borrow); + } + } + // Hi range check = r_hi - y_hi - borrow + // Lo range check = r_lo - y_lo + borrow * 2^{126} + field_t hi = (-scalar.hi + r_hi) - borrow; + field_t lo = (-scalar.lo + r_lo) + (borrow * (uint256_t(1) << 126)); + + hi.create_range_constraint(cycle_scalar::HI_BITS); + lo.create_range_constraint(cycle_scalar::LO_BITS); + } + + std::copy(lo_slices.begin(), lo_slices.end(), std::back_inserter(slices)); + std::copy(hi_slices.begin(), hi_slices.end(), std::back_inserter(slices)); +} + +template field_t cycle_group::straus_scalar_slice::read(size_t index) +{ + ASSERT(slices.size() > index); + return slices[index]; +} + +template +cycle_group::straus_lookup_table::straus_lookup_table(Composer* context, + const cycle_group& base_point, + const cycle_group& generator_point, + size_t table_bits) + : _table_bits(table_bits) + , _context(context) +{ + const size_t table_size = 1UL << table_bits; + + point_table.resize(table_size); + point_table[0] = generator_point; + for (size_t i = 1; i < table_size; ++i) { + point_table[i] = point_table[i - 1].constrained_unconditional_add(base_point); + } + + if constexpr (IS_ULTRA) { + rom_id = context->create_ROM_array(table_size); + for (size_t i = 0; i < table_size; ++i) { + if (point_table[i].is_constant()) { + auto element = point_table[i].get_value(); + point_table[i] = cycle_group::from_witness(_context, element); + point_table[i].x.assert_equal(element.x); + point_table[i].y.assert_equal(element.y); + } + context->set_ROM_element_pair( + rom_id, + i, + std::array{ point_table[i].x.get_witness_index(), point_table[i].y.get_witness_index() }); + } + } else { + ASSERT(table_bits == 1); + } +} + +template +cycle_group cycle_group::straus_lookup_table::read(const field_t& index) +{ + if constexpr (IS_ULTRA) { + auto output_indices = _context->read_ROM_array_pair(rom_id, index.get_witness_index()); + field_t x = field_t::from_witness_index(_context, output_indices[0]); + field_t y = field_t::from_witness_index(_context, output_indices[1]); + return cycle_group(_context, x, y, false); + } + // idx * point_table[1] + (1 - idx) * point_table[0] + // idx (point_table[1] - point_table[0]) + point_table[0] + field_t x = index * (point_table[1].x - point_table[0].x) + point_table[0].x; + field_t y = index * (point_table[1].y - point_table[0].y) + point_table[0].y; + return cycle_group(_context, x, y, false); +} + +template +cycle_group cycle_group::variable_base_batch_mul(const std::vector& scalars, + const std::vector& base_points) +{ + Composer* context = nullptr; + for (auto& point : base_points) { + if (point.get_context() != nullptr) { + context = point.get_context(); + break; + } + } + // core algorithm + // define a `table_bits` size lookup table + ASSERT(scalars.size() == base_points.size()); + const size_t num_points = scalars.size(); + + auto generators = offset_generators(num_points); + std::vector scalar_slices; + std::vector point_tables; + for (size_t i = 0; i < num_points; ++i) { + scalar_slices.emplace_back(straus_scalar_slice(context, scalars[i], table_bits)); + point_tables.emplace_back( + straus_lookup_table(context, base_points[i], generators.generators[i + 1], table_bits)); + } + + element debug_acc = G1::point_at_infinity; + uint256_t debug_scalar = uint256_t(scalars[0].lo.get_value()) + + (uint256_t(scalars[0].hi.get_value()) * (uint256_t(1) << (cycle_scalar::LO_BITS))); + + element offset_generator_accumulator = generators.generators[0]; + cycle_group accumulator = generators.generators[0]; + for (size_t i = 0; i < num_rounds; ++i) { + if (i != 0) { + + for (size_t j = 0; j < table_bits; ++j) { + accumulator = accumulator.dbl(); + offset_generator_accumulator = offset_generator_accumulator.dbl(); + debug_acc = debug_acc.dbl(); + } + } + + for (size_t j = 0; j < num_points; ++j) { + const field_t scalar_slice = scalar_slices[j].read(num_rounds - i - 1); + const cycle_group point = point_tables[j].read(scalar_slice); + accumulator = accumulator.constrained_unconditional_add(point); + offset_generator_accumulator = offset_generator_accumulator + element(generators.generators[j + 1]); + } + } + cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); + accumulator = accumulator.constrained_unconditional_add(offset_generator_delta); + + return accumulator; +} INSTANTIATE_STDLIB_TYPE(cycle_group); } // namespace proof_system::plonk::stdlib diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index d5fc6518aba..3d95c946bd8 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -32,7 +32,12 @@ template class cycle_group { using element = typename G1::element; using affine_element = typename G1::affine_element; - Composer* get_context(const cycle_group& other); + static constexpr bool IS_ULTRA = Composer::CIRCUIT_TYPE == CircuitType::ULTRA; + static constexpr size_t table_bits = IS_ULTRA ? 4 : 1; + static constexpr size_t num_bits = FF::modulus.get_msb() + 1; + static constexpr size_t num_rounds = (num_bits + table_bits - 1) / table_bits; + + Composer* get_context(const cycle_group& other) const; cycle_group(Composer* _context = nullptr) : context(_context) @@ -97,19 +102,57 @@ template class cycle_group { return result; } - cycle_group dbl(); - cycle_group unconditional_add(const cycle_group& other); - cycle_group constrained_unconditional_add(const cycle_group& other); - cycle_group operator+(const cycle_group& other); - cycle_group unconditional_subtract(const cycle_group& other); - cycle_group constrained_unconditional_subtract(const cycle_group& other); - cycle_group operator-(const cycle_group& other); + cycle_group dbl() const; + cycle_group unconditional_add(const cycle_group& other) const; + cycle_group constrained_unconditional_add(const cycle_group& other) const; + cycle_group operator+(const cycle_group& other) const; + cycle_group unconditional_subtract(const cycle_group& other) const; + cycle_group constrained_unconditional_subtract(const cycle_group& other) const; + cycle_group operator-(const cycle_group& other) const; cycle_group& operator+=(const cycle_group& other); cycle_group& operator-=(const cycle_group& other); - static cycle_group fixed_base_batch_mul(const std::vector& scalars, + class offset_generators { + public: + offset_generators(size_t num_points); + // cycle_group get_generator(size_t generator_idx); + // cycle_group get_final_generator_offset(); + std::vector generators; + }; + + struct cycle_scalar { + using ScalarField = typename G1::subgroup_field; + static constexpr size_t LO_BITS = 128; + static constexpr size_t HI_BITS = ScalarField::modulus.get_msb() + 1 - LO_BITS; + static cycle_scalar from_witness(Composer* context, const ScalarField& value); + cycle_scalar(field_t _lo, field_t _hi); + cycle_scalar(field_t _in); + field_t lo; + field_t hi; + }; + class straus_scalar_slice { + public: + straus_scalar_slice(Composer* context, const cycle_scalar& scalars, size_t table_bits); + field_t read(size_t index); + size_t _table_bits; + std::vector slices; + }; + class straus_lookup_table { + public: + straus_lookup_table(Composer* context, + const cycle_group& base_point, + const cycle_group& generator_point, + size_t table_bits); + cycle_group read(const field_t& index); + size_t _table_bits; + Composer* _context; + std::vector point_table; + size_t rom_id = 0; + }; + + static cycle_group fixed_base_batch_mul(const std::vector& scalars, const std::vector& generator_indices); - static cycle_group variable_base_batch_mul(const std::vector& scalars, + static cycle_group variable_base_batch_mul(const std::vector& scalars, const std::vector& base_points); Composer* context; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index 3d25ae420c7..1ad79e2be23 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -33,7 +33,7 @@ template class CycleGroupTest : public ::testing::Test { static void SetUpTestSuite() { for (size_t i = 0; i < num_generators; ++i) { - generators[i] = G1::one * FF::random_element(&engine); + generators[i] = G1::one * FF::random_element(); } }; }; @@ -55,7 +55,7 @@ TYPED_TEST(CycleGroupTest, TestDbl) EXPECT_EQ(result, expected); bool proof_result = composer.check_circuit(); - EXPECT_EQ(proof_result, false); + EXPECT_EQ(proof_result, true); } TYPED_TEST(CycleGroupTest, TestUnconditionalAdd) @@ -192,7 +192,7 @@ TYPED_TEST(CycleGroupTest, TestAdd) } bool proof_result = composer.check_circuit(); - EXPECT_EQ(proof_result, false); + EXPECT_EQ(proof_result, true); } TYPED_TEST(CycleGroupTest, TestUnconditionalSubtract) @@ -329,7 +329,35 @@ TYPED_TEST(CycleGroupTest, TestSubtract) } bool proof_result = composer.check_circuit(); - EXPECT_EQ(proof_result, false); + EXPECT_EQ(proof_result, true); } +TYPED_TEST(CycleGroupTest, TestVariableBaseBatchMul) +{ + STDLIB_TYPE_ALIASES; + auto composer = Composer(); + + const size_t num_muls = 1; + std::vector points; + std::vector scalars; + + element expected = G1::point_at_infinity; + + for (size_t i = 0; i < num_muls; ++i) { + auto element = TestFixture::generators[i]; + typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); + + expected += (element * scalar); + points.emplace_back(cycle_group_ct::from_witness(&composer, element)); + scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + } + + auto result = cycle_group_ct::variable_base_batch_mul(scalars, points); + + EXPECT_EQ(result.get_value(), affine_element(expected)); + + std::cout << "num gates = " << composer.get_num_gates() << std::endl; + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, true); +} } // namespace stdlib_cycle_group_tests \ No newline at end of file From fbd9b40ddcb3b1bdab409c0f18cd866f20db3f0b Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 17 Aug 2023 14:49:49 +0000 Subject: [PATCH 03/50] variable-base scalar multiplication passes tests --- .../stdlib/primitives/group/cycle_group.cpp | 101 ++++++++++++++---- .../stdlib/primitives/group/cycle_group.hpp | 8 +- .../primitives/group/cycle_group.test.cpp | 65 +++++++++-- 3 files changed, 140 insertions(+), 34 deletions(-) diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index fd65d472854..6f5273617bd 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -43,7 +43,7 @@ template cycle_group cycle_group::dbl() auto context = get_context(); cycle_group result = cycle_group::from_witness(context, p3); - result.is_infinity = is_infinity; + result.is_infinity = is_point_at_infinity(); proof_system::ecc_dbl_gate_ dbl_gate{ .x1 = x.get_witness_index(), .y1 = y.get_witness_index(), @@ -227,8 +227,8 @@ template cycle_group cycle_group::operat result.x = field_t::conditional_assign(double_predicate, dbl_result.x, add_result.x); result.y = field_t::conditional_assign(double_predicate, dbl_result.y, add_result.y); - const bool_t lhs_infinity = is_infinity; - const bool_t rhs_infinity = other.is_infinity; + const bool_t lhs_infinity = is_point_at_infinity(); + const bool_t rhs_infinity = other.is_point_at_infinity(); // if lhs infinity, return rhs result.x = field_t::conditional_assign(lhs_infinity, other.x, result.x); result.y = field_t::conditional_assign(lhs_infinity, other.y, result.y); @@ -284,8 +284,8 @@ template cycle_group cycle_group::operat result.x = field_t::conditional_assign(double_predicate, dbl_result.x, add_result.x); result.y = field_t::conditional_assign(double_predicate, dbl_result.y, add_result.y); - const bool_t lhs_infinity = is_infinity; - const bool_t rhs_infinity = other.is_infinity; + const bool_t lhs_infinity = is_point_at_infinity(); + const bool_t rhs_infinity = other.is_point_at_infinity(); // if lhs infinity, return -rhs result.x = field_t::conditional_assign(lhs_infinity, other.x, result.x); result.y = field_t::conditional_assign(lhs_infinity, (-other.y).normalize(), result.y); @@ -328,12 +328,12 @@ template cycle_group::offset_generators::offset_ge auto init_generator = generators[0]; } template -cycle_group::cycle_scalar::cycle_scalar(field_t _lo, field_t _hi) +cycle_group::cycle_scalar::cycle_scalar(const field_t& _lo, const field_t& _hi) : lo(_lo) , hi(_hi) {} -template cycle_group::cycle_scalar::cycle_scalar(field_t _in) +template cycle_group::cycle_scalar::cycle_scalar(const field_t& _in) { const uint256_t value(_in.get_value()); const uint256_t lo_v = value.slice(0, LO_BITS); @@ -349,6 +349,15 @@ template cycle_group::cycle_scalar::cycle_scalar(f } } +template cycle_group::cycle_scalar::cycle_scalar(const ScalarField& _in) +{ + const uint256_t value(_in); + const uint256_t lo_v = value.slice(0, LO_BITS); + const uint256_t hi_v = value.slice(LO_BITS, HI_BITS); + lo = lo_v; + hi = hi_v; +} + template typename cycle_group::cycle_scalar cycle_group::cycle_scalar::from_witness(Composer* context, const ScalarField& value) @@ -361,6 +370,19 @@ typename cycle_group::cycle_scalar cycle_group::cycle_scalar return cycle_scalar(lo, hi); } +template bool cycle_group::cycle_scalar::is_constant() const +{ + return (lo.is_constant() && hi.is_constant()); +} + +template +typename cycle_group::cycle_scalar::ScalarField cycle_group::cycle_scalar::get_value() const +{ + uint256_t lo_v(lo.get_value()); + uint256_t hi_v(hi.get_value()); + return ScalarField(lo_v + (hi_v << LO_BITS)); +} + template cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* context, const cycle_scalar& scalar, @@ -372,13 +394,13 @@ cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* contex const auto slice_scalar = [&](const field_t& scalar, const size_t num_bits) { std::vector result; if (scalar.is_constant()) { - const size_t num_slices = num_bits / table_bits; + const size_t num_slices = (num_bits + table_bits - 1) / table_bits; const uint64_t table_mask = (1ULL << table_bits) - 1ULL; uint256_t raw_value = scalar.get_value(); for (size_t i = 0; i < num_slices; ++i) { uint64_t slice_v = static_cast(raw_value.data[0]) & table_mask; - field_t slice(context, slice_v); - result.push_back(slice); + result.push_back(field_t(slice_v)); + raw_value = raw_value >> table_bits; } return result; } @@ -394,8 +416,7 @@ cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* contex } else { uint256_t raw_value = scalar.get_value(); const uint64_t table_mask = (1ULL << table_bits) - 1ULL; - const size_t num_slices = num_bits / table_bits; - + const size_t num_slices = (num_bits + table_bits - 1) / table_bits; for (size_t i = 0; i < num_slices; ++i) { uint64_t slice_v = static_cast(raw_value.data[0]) & table_mask; field_t slice(witness_t(context, slice_v)); @@ -420,8 +441,8 @@ cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* contex auto hi_slices = slice_scalar(scalar.hi, cycle_scalar::HI_BITS); auto lo_slices = slice_scalar(scalar.lo, cycle_scalar::LO_BITS); - // Check that scalar.hi * 2^LO_BITS + scalar.lo < cycle_group_modulus when evaluated over the integers - { + if (!scalar.is_constant()) { + // Check that scalar.hi * 2^LO_BITS + scalar.lo < cycle_group_modulus when evaluated over the integers constexpr uint256_t cycle_group_modulus = cycle_scalar::ScalarField::modulus; constexpr uint256_t r_lo = cycle_group_modulus.slice(0, cycle_scalar::LO_BITS); constexpr uint256_t r_hi = cycle_group_modulus.slice(cycle_scalar::LO_BITS, cycle_scalar::HI_BITS); @@ -440,7 +461,7 @@ cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* contex // Hi range check = r_hi - y_hi - borrow // Lo range check = r_lo - y_lo + borrow * 2^{126} field_t hi = (-scalar.hi + r_hi) - borrow; - field_t lo = (-scalar.lo + r_lo) + (borrow * (uint256_t(1) << 126)); + field_t lo = (-scalar.lo + r_lo) + (borrow * (uint256_t(1) << cycle_scalar::LO_BITS)); hi.create_range_constraint(cycle_scalar::HI_BITS); lo.create_range_constraint(cycle_scalar::LO_BITS); @@ -492,9 +513,14 @@ cycle_group::straus_lookup_table::straus_lookup_table(Composer* contex } template -cycle_group cycle_group::straus_lookup_table::read(const field_t& index) +cycle_group cycle_group::straus_lookup_table::read(const field_t& _index) { if constexpr (IS_ULTRA) { + field_t index(_index); + if (index.is_constant()) { + index = witness_t(_context, _index.get_value()); + index.assert_equal(_index.get_value()); + } auto output_indices = _context->read_ROM_array_pair(rom_id, index.get_witness_index()); field_t x = field_t::from_witness_index(_context, output_indices[0]); field_t y = field_t::from_witness_index(_context, output_indices[1]); @@ -502,25 +528,45 @@ cycle_group cycle_group::straus_lookup_table::read(const fie } // idx * point_table[1] + (1 - idx) * point_table[0] // idx (point_table[1] - point_table[0]) + point_table[0] - field_t x = index * (point_table[1].x - point_table[0].x) + point_table[0].x; - field_t y = index * (point_table[1].y - point_table[0].y) + point_table[0].y; + field_t x = _index * (point_table[1].x - point_table[0].x) + point_table[0].x; + field_t y = _index * (point_table[1].y - point_table[0].y) + point_table[0].y; return cycle_group(_context, x, y, false); } template -cycle_group cycle_group::variable_base_batch_mul(const std::vector& scalars, - const std::vector& base_points) +cycle_group cycle_group::variable_base_batch_mul(const std::vector& _scalars, + const std::vector& _base_points) { + ASSERT(_scalars.size() == _base_points.size()); + Composer* context = nullptr; - for (auto& point : base_points) { + for (auto& point : _base_points) { if (point.get_context() != nullptr) { context = point.get_context(); break; } } + + std::vector scalars; + std::vector base_points; + bool has_constant_component = false; + bool has_non_constant_component = false; + element constant_component = G1::point_at_infinity; + for (size_t i = 0; i < _scalars.size(); ++i) { + if (_scalars[i].is_constant() && _base_points[i].is_constant()) { + has_constant_component = true; + constant_component += _base_points[i].get_value() * _scalars[i].get_value(); + } else { + has_non_constant_component = true; + scalars.emplace_back(_scalars[i]); + base_points.emplace_back(_base_points[i]); + } + } + if (!has_non_constant_component) { + return cycle_group(constant_component); + } // core algorithm // define a `table_bits` size lookup table - ASSERT(scalars.size() == base_points.size()); const size_t num_points = scalars.size(); auto generators = offset_generators(num_points); @@ -555,8 +601,17 @@ cycle_group cycle_group::variable_base_batch_mul(const std:: offset_generator_accumulator = offset_generator_accumulator + element(generators.generators[j + 1]); } } + + // NOTE: should this be a general addition? + // e.g. x.[P] + -x.[P] . We want to be able to support this :/ + if (has_constant_component) { + // we subtract off the offset_generator_accumulator, so subtract constant component from the accumulator! + offset_generator_accumulator -= constant_component; + } cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); - accumulator = accumulator.constrained_unconditional_add(offset_generator_delta); + // use a full conditional add here in case we end with a point at infinity or a point doubling. + // e.g. x[P] + x[P], or x[P] + -x[P] + accumulator = accumulator + offset_generator_delta; return accumulator; } diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index 3d95c946bd8..ef64f770e84 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -102,6 +102,7 @@ template class cycle_group { return result; } + bool_t is_point_at_infinity() const { return is_infinity; } cycle_group dbl() const; cycle_group unconditional_add(const cycle_group& other) const; cycle_group constrained_unconditional_add(const cycle_group& other) const; @@ -125,8 +126,11 @@ template class cycle_group { static constexpr size_t LO_BITS = 128; static constexpr size_t HI_BITS = ScalarField::modulus.get_msb() + 1 - LO_BITS; static cycle_scalar from_witness(Composer* context, const ScalarField& value); - cycle_scalar(field_t _lo, field_t _hi); - cycle_scalar(field_t _in); + cycle_scalar(const ScalarField& _in); + cycle_scalar(const field_t& _lo, const field_t& _hi); + cycle_scalar(const field_t& _in); + [[nodiscard]] bool is_constant() const; + ScalarField get_value() const; field_t lo; field_t hi; }; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index 1ad79e2be23..6e74efc7ddf 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -338,25 +338,72 @@ TYPED_TEST(CycleGroupTest, TestVariableBaseBatchMul) auto composer = Composer(); const size_t num_muls = 1; - std::vector points; - std::vector scalars; element expected = G1::point_at_infinity; - for (size_t i = 0; i < num_muls; ++i) { - auto element = TestFixture::generators[i]; - typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); + // case 1, general MSM with inputs that are combinations of constant and witnesses + { + std::vector points; + std::vector scalars; + + for (size_t i = 0; i < num_muls; ++i) { + auto element = TestFixture::generators[i]; + typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); + + // 1: add entry where point, scalar are witnesses + expected += (element * scalar); + points.emplace_back(cycle_group_ct::from_witness(&composer, element)); + scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + + // 2: add entry where point is constant, scalar is witness + expected += (element * scalar); + points.emplace_back(cycle_group_ct(element)); + scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + + // 3: add entry where point is witness, scalar is constant + expected += (element * scalar); + points.emplace_back(cycle_group_ct::from_witness(&composer, element)); + scalars.emplace_back(typename cycle_group_ct::cycle_scalar(scalar)); + + // 4: add entry where point is constant, scalar is constant + expected += (element * scalar); + points.emplace_back(cycle_group_ct(element)); + scalars.emplace_back(typename cycle_group_ct::cycle_scalar(scalar)); + } + auto result = cycle_group_ct::variable_base_batch_mul(scalars, points); + EXPECT_EQ(result.get_value(), affine_element(expected)); + } - expected += (element * scalar); + // case 2, MSM that produces point at infinity + { + std::vector points; + std::vector scalars; + + auto element = TestFixture::generators[0]; + typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); points.emplace_back(cycle_group_ct::from_witness(&composer, element)); scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + + points.emplace_back(cycle_group_ct::from_witness(&composer, element)); + scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, -scalar)); + + auto result = cycle_group_ct::variable_base_batch_mul(scalars, points); + EXPECT_TRUE(result.is_point_at_infinity().get_value()); } - auto result = cycle_group_ct::variable_base_batch_mul(scalars, points); + // case 3. Multiply by zero + { + std::vector points; + std::vector scalars; - EXPECT_EQ(result.get_value(), affine_element(expected)); + auto element = TestFixture::generators[0]; + typename G1::subgroup_field scalar = 0; + points.emplace_back(cycle_group_ct::from_witness(&composer, element)); + scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + auto result = cycle_group_ct::variable_base_batch_mul(scalars, points); + EXPECT_TRUE(result.is_point_at_infinity().get_value()); + } - std::cout << "num gates = " << composer.get_num_gates() << std::endl; bool proof_result = composer.check_circuit(); EXPECT_EQ(proof_result, true); } From 55f70196886ec106f644eac1c4812ad087b75e10 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 17 Aug 2023 15:40:14 +0000 Subject: [PATCH 04/50] cycle_group::variable_batch_mul now supports input points that are at infinity! mul function should now be "computationally" complete in the case of an honest Prover (prob. of triggering unsatisfiable constraints from incomplete addition formulae is equiv to solving the dlog problem, for inputs that include points at infinity, duplicates and scalar multipliers that are zero) --- .../stdlib/primitives/field/field.cpp | 10 +++++++ .../stdlib/primitives/group/cycle_group.cpp | 19 ++++++++++++- .../stdlib/primitives/group/cycle_group.hpp | 1 + .../primitives/group/cycle_group.test.cpp | 27 +++++++++++++++++++ 4 files changed, 56 insertions(+), 1 deletion(-) diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp index a9b756f6180..733b8dd2534 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp @@ -706,6 +706,9 @@ bool_t field_t::operator!=(const field_t& othe template field_t field_t::conditional_negate(const bool_t& predicate) const { + if (predicate.is_constant()) { + return predicate.get_value() ? -(*this) : *this; + } field_t predicate_field(predicate); field_t multiplicand = -(predicate_field + predicate_field); return multiplicand.madd(*this, *this); @@ -717,6 +720,13 @@ field_t field_t::conditional_assign(const bool const field_t& lhs, const field_t& rhs) { + if (predicate.is_constant()) { + return predicate.get_value() ? lhs : rhs; + } + // if lhs and rhs are the same witness, just return it! + if (lhs.get_witness_index() == rhs.get_witness_index()) { + return lhs; + } return (lhs - rhs).madd(predicate, rhs); } diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index 6f5273617bd..15decaeea28 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -489,8 +489,25 @@ cycle_group::straus_lookup_table::straus_lookup_table(Composer* contex point_table.resize(table_size); point_table[0] = generator_point; + + // We want to support the case where input points are points at infinity. + // If base point is at infinity, we want every point in the table to just be `generator_point`. + // We achieve this via the following: + // 1: We create a "work_point" that is base_point if not at infinity, otherwise is just 1 + // 2: When computing the point table, we use "work_point" in additions instead of the "base_point" (to prevent + // x-coordinate collisions in honest case) 3: When assigning to the point table, we conditionally assign either + // the output of the point addition (if not at infinity) or the generator point (if at infinity) + // Note: if `base_point.is_point_at_infinity()` is constant, these conditional assigns produce zero gate overhead + cycle_group fallback_point(G1::affine_one); + field_t modded_x = field_t::conditional_assign(base_point.is_point_at_infinity(), fallback_point.x, base_point.x); + field_t modded_y = field_t::conditional_assign(base_point.is_point_at_infinity(), fallback_point.y, base_point.y); + cycle_group modded_base_point(context, modded_x, modded_y, false); for (size_t i = 1; i < table_size; ++i) { - point_table[i] = point_table[i - 1].constrained_unconditional_add(base_point); + + auto add_output = point_table[i - 1].constrained_unconditional_add(modded_base_point); + field_t x = field_t::conditional_assign(base_point.is_point_at_infinity(), generator_point.x, add_output.x); + field_t y = field_t::conditional_assign(base_point.is_point_at_infinity(), generator_point.y, add_output.y); + point_table[i] = cycle_group(context, x, y, false); } if constexpr (IS_ULTRA) { diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index ef64f770e84..bb5f872b776 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -106,6 +106,7 @@ template class cycle_group { cycle_group dbl() const; cycle_group unconditional_add(const cycle_group& other) const; cycle_group constrained_unconditional_add(const cycle_group& other) const; + cycle_group conditional_add(const cycle_group& other) const; cycle_group operator+(const cycle_group& other) const; cycle_group unconditional_subtract(const cycle_group& other) const; cycle_group constrained_unconditional_subtract(const cycle_group& other) const; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index 6e74efc7ddf..269daec8195 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -335,6 +335,7 @@ TYPED_TEST(CycleGroupTest, TestSubtract) TYPED_TEST(CycleGroupTest, TestVariableBaseBatchMul) { STDLIB_TYPE_ALIASES; + using witness_ct = stdlib::witness_t; auto composer = Composer(); const size_t num_muls = 1; @@ -404,6 +405,32 @@ TYPED_TEST(CycleGroupTest, TestVariableBaseBatchMul) EXPECT_TRUE(result.is_point_at_infinity().get_value()); } + // case 4. Inputs are points at infinity + { + std::vector points; + std::vector scalars; + + auto element = TestFixture::generators[0]; + typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); + + // is_infinity = witness + { + cycle_group_ct point = cycle_group_ct::from_witness(&composer, element); + point.is_infinity = witness_ct(&composer, true); + points.emplace_back(point); + scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + } + // is_infinity = constant + { + cycle_group_ct point = cycle_group_ct::from_witness(&composer, element); + point.is_infinity = true; + points.emplace_back(point); + scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + } + auto result = cycle_group_ct::variable_base_batch_mul(scalars, points); + EXPECT_TRUE(result.is_point_at_infinity().get_value()); + } + bool proof_result = composer.check_circuit(); EXPECT_EQ(proof_result, true); } From e339ba026e4d6aefe4eeca28825220b15462e49e Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 17 Aug 2023 18:18:29 +0000 Subject: [PATCH 05/50] added an elliptic curve point doubling gate to the UltraPlonk arithmetisation+circuit builder --- .../arithmetization/arithmetization.hpp | 4 +- .../circuit_builder/ultra_circuit_builder.cpp | 298 ++++++++++++------ .../circuit_builder/ultra_circuit_builder.hpp | 20 +- .../ultra_circuit_builder.test.cpp | 27 ++ 4 files changed, 249 insertions(+), 100 deletions(-) diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/arithmetization.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/arithmetization.hpp index 671ab2e4304..969aac893fc 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/arithmetization.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/arithmetization.hpp @@ -125,7 +125,7 @@ template class Turbo : public Arithmetization class Ultra : public Arithmetization { +template class Ultra : public Arithmetization { public: using FF = _FF; struct Selectors : SelectorsBase { @@ -140,6 +140,7 @@ template class Ultra : public Arithmetization>& q_elliptic = std::get<8>(this->_data); std::vector>& q_aux = std::get<9>(this->_data); std::vector>& q_lookup_type = std::get<10>(this->_data); + std::vector>& q_elliptic_double = std::get<11>(this->_data); Selectors() : SelectorsBase(){}; Selectors(const Selectors& other) @@ -159,6 +160,7 @@ template class Ultra : public Arithmetizationq_elliptic = std::get<8>(this->_data); this->q_aux = std::get<9>(this->_data); this->q_lookup_type = std::get<10>(this->_data); + this->q_elliptic_double = std::get<11>(this->_data); }; Selectors& operator=(Selectors&& other) { diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp index f24c8e836c5..f0acddfd040 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp @@ -79,6 +79,7 @@ template void UltraCircuitBuilder_::add_gates_to_ensure_all_po q_lookup_type.emplace_back(0); q_elliptic.emplace_back(1); q_aux.emplace_back(1); + q_elliptic_double.emplace_back(1); ++this->num_gates; // Some relations depend on wire shifts so we add another gate with @@ -136,6 +137,7 @@ template void UltraCircuitBuilder_::create_add_gate(const add_ q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); ++this->num_gates; } @@ -166,6 +168,7 @@ void UltraCircuitBuilder_::create_big_add_gate(const add_quad_& in, cons q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); ++this->num_gates; } @@ -257,6 +260,7 @@ template void UltraCircuitBuilder_::create_big_mul_gate(const q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); ++this->num_gates; } @@ -281,6 +285,7 @@ template void UltraCircuitBuilder_::create_balanced_add_gate(c q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); ++this->num_gates; // Why 3? TODO: return to this @@ -321,6 +326,7 @@ template void UltraCircuitBuilder_::create_mul_gate(const mul_ q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); ++this->num_gates; } @@ -348,6 +354,7 @@ template void UltraCircuitBuilder_::create_bool_gate(const uin q_4.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); ++this->num_gates; } @@ -377,6 +384,7 @@ template void UltraCircuitBuilder_::create_poly_gate(const pol q_4.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); ++this->num_gates; } @@ -394,11 +402,11 @@ template void UltraCircuitBuilder_::create_poly_gate(const pol template void UltraCircuitBuilder_::create_ecc_add_gate(const ecc_add_gate_& in) { /** + * gate structure: * | 1 | 2 | 3 | 4 | - * | a1 | a2 | x1 | y1 | - * | x2 | y2 | x3 | y3 | - * | -- | -- | x4 | y4 | - * + * | -- | x1 | y1 | -- | + * | x2 | x3 | y3 | y2 | + * we can chain successive ecc_add_gates if x3 y3 of previous gate equals x1 y1 of current gate **/ this->assert_valid_variables({ in.x1, in.x2, in.x3, in.y1, in.y2, in.y3 }); @@ -433,10 +441,10 @@ template void UltraCircuitBuilder_::create_ecc_add_gate(const q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(1); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); ++this->num_gates; } - w_l.emplace_back(in.x2); w_4.emplace_back(in.y2); w_r.emplace_back(in.x3); @@ -451,108 +459,141 @@ template void UltraCircuitBuilder_::create_ecc_add_gate(const q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); ++this->num_gates; } /** - * @brief Create a gate where we validate an elliptic curve point doubling - * (x1, y1) * 2 = (x3, y3) - * @tparam FF - * @param in + * @brief Create an elliptic curve addition gate + * + * @details x and y are defined over scalar field. Addition can handle applying the curve endomorphism to one of the + * points being summed at the time of addition. + * + * @param in Elliptic curve point addition gate parameters, including the the affine coordinates of the two points being + * added, the resulting point coordinates and the selector values that describe whether the endomorphism is used on the + * second point and whether it is negated. */ template void UltraCircuitBuilder_::create_ecc_dbl_gate(const ecc_dbl_gate_& in) { - const auto x1 = this->get_variable(in.x1); - const auto x3 = this->get_variable(in.x3); - const auto y1 = this->get_variable(in.y1); - - // lambda = 3x^2 / 2y - const auto three_x1_sqr_v = x1 * x1 * 3; - const auto three_x1_sqr = this->add_variable(three_x1_sqr_v); - create_poly_gate({ - .a = in.x1, - .b = in.x1, - .c = three_x1_sqr, - .q_m = 3, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - const auto lambda_v = three_x1_sqr_v / (y1 + y1); - const auto lambda = this->add_variable(lambda_v); - create_poly_gate({ - .a = lambda, - .b = in.y1, - .c = three_x1_sqr, - .q_m = 2, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - - // lambda * lambda - 2x1 - x3 = 0 - const auto lambda_sqr_v = lambda_v * lambda_v; - const auto lambda_sqr = this->add_variable(lambda_sqr_v); - create_poly_gate({ - .a = lambda, - .b = lambda, - .c = lambda_sqr, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - create_poly_gate({ - .a = lambda_sqr, - .b = in.x1, - .c = in.x3, - .q_m = 0, - .q_l = 1, - .q_r = -2, - .q_o = -1, - .q_c = 0, - }); + // q_elliptic_double.emplace_back(1); + w_l.emplace_back(in.x1); + w_4.emplace_back(in.y1); + w_r.emplace_back(in.x3); + w_o.emplace_back(in.y3); - // lambda * (x1 - x3) - y1 = 0 - const auto x1_sub_x3_v = x1 - x3; - const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); - create_poly_gate({ - .a = in.x1, - .b = in.x3, - .c = x1_sub_x3, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); - const auto lambda_mul_x1_sub_x3_v = lambda_v * x1_sub_x3_v; - const auto lambda_mul_x1_sub_x3 = this->add_variable(lambda_mul_x1_sub_x3_v); - create_poly_gate({ - .a = lambda, - .b = x1_sub_x3, - .c = lambda_mul_x1_sub_x3, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - create_poly_gate({ - .a = lambda_mul_x1_sub_x3, - .b = in.y1, - .c = in.y3, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); + q_elliptic_double.emplace_back(1); + q_m.emplace_back(0); + q_1.emplace_back(0); + q_2.emplace_back(0); + q_3.emplace_back(0); + q_c.emplace_back(0); + q_arith.emplace_back(0); + q_4.emplace_back(0); + q_sort.emplace_back(0); + q_lookup_type.emplace_back(0); + q_elliptic.emplace_back(0); + q_aux.emplace_back(0); + ++this->num_gates; } +// /** +// * @brief Create a gate where we validate an elliptic curve point doubling +// * (x1, y1) * 2 = (x3, y3) +// * @tparam FF +// * @param in +// */ +// template void UltraCircuitBuilder_::create_ecc_dbl_gate(const ecc_dbl_gate_& in) +// { +// const auto x1 = this->get_variable(in.x1); +// const auto x3 = this->get_variable(in.x3); +// const auto y1 = this->get_variable(in.y1); + +// // lambda = 3x^2 / 2y +// const auto three_x1_sqr_v = x1 * x1 * 3; +// const auto three_x1_sqr = this->add_variable(three_x1_sqr_v); +// create_poly_gate({ +// .a = in.x1, +// .b = in.x1, +// .c = three_x1_sqr, +// .q_m = 3, +// .q_l = 0, +// .q_r = 0, +// .q_o = -1, +// .q_c = 0, +// }); +// const auto lambda_v = three_x1_sqr_v / (y1 + y1); +// const auto lambda = this->add_variable(lambda_v); +// create_poly_gate({ +// .a = lambda, +// .b = in.y1, +// .c = three_x1_sqr, +// .q_m = 2, +// .q_l = 0, +// .q_r = 0, +// .q_o = -1, +// .q_c = 0, +// }); + +// // lambda * lambda - 2x1 - x3 = 0 +// const auto lambda_sqr_v = lambda_v * lambda_v; +// const auto lambda_sqr = this->add_variable(lambda_sqr_v); +// create_poly_gate({ +// .a = lambda, +// .b = lambda, +// .c = lambda_sqr, +// .q_m = 1, +// .q_l = 0, +// .q_r = 0, +// .q_o = -1, +// .q_c = 0, +// }); +// create_poly_gate({ +// .a = lambda_sqr, +// .b = in.x1, +// .c = in.x3, +// .q_m = 0, +// .q_l = 1, +// .q_r = -2, +// .q_o = -1, +// .q_c = 0, +// }); + +// // lambda * (x1 - x3) - y1 = 0 +// const auto x1_sub_x3_v = x1 - x3; +// const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); +// create_poly_gate({ +// .a = in.x1, +// .b = in.x3, +// .c = x1_sub_x3, +// .q_m = 0, +// .q_l = 1, +// .q_r = -1, +// .q_o = -1, +// .q_c = 0, +// }); +// const auto lambda_mul_x1_sub_x3_v = lambda_v * x1_sub_x3_v; +// const auto lambda_mul_x1_sub_x3 = this->add_variable(lambda_mul_x1_sub_x3_v); +// create_poly_gate({ +// .a = lambda, +// .b = x1_sub_x3, +// .c = lambda_mul_x1_sub_x3, +// .q_m = 1, +// .q_l = 0, +// .q_r = 0, +// .q_o = -1, +// .q_c = 0, +// }); +// create_poly_gate({ +// .a = lambda_mul_x1_sub_x3, +// .b = in.y1, +// .c = in.y3, +// .q_m = 0, +// .q_l = 1, +// .q_r = -1, +// .q_o = -1, +// .q_c = 0, +// }); +// } /** * @brief Add a gate equating a particular witness to a constant, fixing it the value @@ -578,6 +619,7 @@ template void UltraCircuitBuilder_::fix_witness(const uint32_t q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); ++this->num_gates; } @@ -798,6 +840,7 @@ plookup::ReadData UltraCircuitBuilder_::create_gates_from_plookup_ q_4.emplace_back(0); q_sort.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); ++this->num_gates; } @@ -1107,6 +1150,7 @@ void UltraCircuitBuilder_::create_sort_constraint(const std::vector::create_sort_constraint(const std::vector::create_dummy_constraints(const std::vector::create_sort_constraint_with_edges(const std::vect q_4.emplace_back(0); q_sort.emplace_back(1); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_lookup_type.emplace_back(0); q_aux.emplace_back(0); // enforce range check for middle rows @@ -1208,6 +1255,7 @@ void UltraCircuitBuilder_::create_sort_constraint_with_edges(const std::vect q_4.emplace_back(0); q_sort.emplace_back(1); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_lookup_type.emplace_back(0); q_aux.emplace_back(0); } @@ -1227,6 +1275,7 @@ void UltraCircuitBuilder_::create_sort_constraint_with_edges(const std::vect q_4.emplace_back(0); q_sort.emplace_back(1); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_lookup_type.emplace_back(0); q_aux.emplace_back(0); } @@ -1247,6 +1296,7 @@ void UltraCircuitBuilder_::create_sort_constraint_with_edges(const std::vect q_4.emplace_back(0); q_sort.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_lookup_type.emplace_back(0); q_aux.emplace_back(0); } @@ -1354,6 +1404,7 @@ template void UltraCircuitBuilder_::apply_aux_selectors(const q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); switch (type) { case AUX_SELECTORS::LIMB_ACCUMULATE_1: { q_1.emplace_back(0); @@ -2019,6 +2070,7 @@ std::array UltraCircuitBuilder_::evaluate_non_native_field_addi q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); } @@ -2140,6 +2192,7 @@ std::array UltraCircuitBuilder_::evaluate_non_native_field_subt q_sort.emplace_back(0); q_lookup_type.emplace_back(0); q_elliptic.emplace_back(0); + q_elliptic_double.emplace_back(0); q_aux.emplace_back(0); } @@ -3362,6 +3415,45 @@ inline FF UltraCircuitBuilder_::compute_auxilary_identity(FF q_aux_value, return auxiliary_identity; } +/** + * @brief Compute a single general permutation sorting identity + * + * @param w_1_value + * @param w_2_value + * @param w_3_value + * @param w_4_value + * @param w_1_shifted_value + * @param alpha_base + * @param alpha + * @return fr + */ +template +inline FF UltraCircuitBuilder_::compute_elliptic_double_identity( + FF q_elliptic_double_value, FF w_1_value, FF w_2_value, FF w_3_value, FF w_4_value, FF alpha_base, FF alpha) const +{ + constexpr FF curve_b = CircuitBuilderBase>::EmbeddedCurve::curve_b; + static_assert(CircuitBuilderBase>::EmbeddedCurve::curve_a == 0); + const auto x1 = w_1_value; + const auto y1 = w_4_value; + const auto x3 = w_2_value; + const auto y3 = w_3_value; + + // x-coordinate relation + // (x3 + 2x1)(4y^2) - (9x^4) = 0 + // This is degree 4...but + // we can use x^3 = y^2 - b + // hon hon hon + // (x3 + 2x1)(4y^2) - (9x(y^2 - b)) is degree 3 + const FF x_pow_4 = (y1 * y1 - curve_b) * x1; + const FF x_relation = (x3 + x1 + x1) * (y1 + y1) * (y1 + y1) - x_pow_4 * FF(9); + + // Y relation: (x1 - x3)(3x^2) - (2y1)(y1 + y3) = 0 + const FF x_pow_2 = (x1 * x1); + const FF y_relation = x_pow_2 * (x1 - x3) * 3 - (y1 + y1) * (y1 + y3); + + return q_elliptic_double_value * alpha_base * (x_relation + y_relation * alpha); +} + /** * @brief Check that the circuit is correct in its current state * @@ -3384,6 +3476,7 @@ template bool UltraCircuitBuilder_::check_circuit() const FF elliptic_base = FF::random_element(); const FF genperm_sort_base = FF::random_element(); const FF auxillary_base = FF::random_element(); + const FF elliptic_double_base = FF::random_element(); const FF alpha = FF::random_element(); const FF eta = FF::random_element(); @@ -3464,6 +3557,7 @@ template bool UltraCircuitBuilder_::check_circuit() FF q_elliptic_value; FF q_sort_value; FF q_lookup_type_value; + FF q_elliptic_double_value; FF q_1_value; FF q_2_value; FF q_3_value; @@ -3481,6 +3575,7 @@ template bool UltraCircuitBuilder_::check_circuit() q_elliptic_value = q_elliptic[i]; q_sort_value = q_sort[i]; q_lookup_type_value = q_lookup_type[i]; + q_elliptic_double_value = q_elliptic_double[i]; q_1_value = q_1[i]; q_2_value = q_2[i]; q_3_value = q_3[i]; @@ -3620,6 +3715,15 @@ template bool UltraCircuitBuilder_::check_circuit() break; } } + if (!compute_elliptic_double_identity( + q_elliptic_double_value, w_1_value, w_2_value, w_3_value, w_4_value, elliptic_double_base, alpha) + .is_zero()) { +#ifndef FUZZING + info("Elliptic doubling identity fails at gate ", i); +#endif + result = false; + break; + } } if (left_tag_product != right_tag_product) { #ifndef FUZZING diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp index c81ba1f2734..7f296f7550a 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp @@ -219,8 +219,10 @@ template class UltraCircuitBuilder_ : public CircuitBuilderBase ultra_selector_names() { - std::vector result{ "q_m", "q_c", "q_1", "q_2", "q_3", "q_4", - "q_arith", "q_sort", "q_elliptic", "q_aux", "table_type" }; + std::vector result{ + "q_m", "q_c", "q_1", "q_2", "q_3", "q_4", + "q_arith", "q_sort", "q_elliptic", "q_aux", "table_type", "q_elliptic_double" + }; return result; } struct non_native_field_multiplication_cross_terms { @@ -269,6 +271,7 @@ template class UltraCircuitBuilder_ : public CircuitBuilderBase tau; @@ -321,6 +324,7 @@ template class UltraCircuitBuilder_ : public CircuitBuilderBase class UltraCircuitBuilder_ : public CircuitBuilderBaseq_elliptic.resize(num_gates); builder->q_aux.resize(num_gates); builder->q_lookup_type.resize(num_gates); + builder->q_elliptic_double.resize(num_gates); } /** * @brief Checks that the circuit state is the same as the stored circuit's one @@ -496,6 +501,9 @@ template class UltraCircuitBuilder_ : public CircuitBuilderBase class UltraCircuitBuilder_ : public CircuitBuilderBaseselectors.q_elliptic; SelectorVector& q_aux = this->selectors.q_aux; SelectorVector& q_lookup_type = this->selectors.q_lookup_type; + SelectorVector& q_elliptic_double = this->selectors.q_elliptic_double; // These are variables that we have used a gate on, to enforce that they are // equal to a defined value. @@ -1198,6 +1207,13 @@ template class UltraCircuitBuilder_ : public CircuitBuilderBase Date: Mon, 21 Aug 2023 12:52:53 +0000 Subject: [PATCH 06/50] hash to curve --- .../cpp/src/barretenberg/ecc/groups/group.hpp | 75 ++++++++++++++++++- .../stdlib/primitives/group/cycle_group.cpp | 6 +- .../stdlib/primitives/group/cycle_group.hpp | 8 ++ .../primitives/group/cycle_group.test.cpp | 35 ++++++++- 4 files changed, 121 insertions(+), 3 deletions(-) diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp index 3f59987d8e2..2110c9ff880 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp @@ -4,12 +4,12 @@ #include "./affine_element.hpp" #include "./element.hpp" #include "./wnaf.hpp" +#include "barretenberg/crypto/sha256/sha256.hpp" #include #include #include #include #include - namespace barretenberg { /** @@ -62,6 +62,79 @@ template + */ + static std::vector derive_generators_secure(size_t num_generators, + const std::string& domain_separator) + { + std::vector result; + std::array domain_hash = sha256::sha256(domain_separator); + std::vector generator_preimage; + generator_preimage.reserve(64); + std::copy(domain_hash.begin(), domain_hash.end(), std::back_inserter(generator_preimage)); + for (size_t i = 0; i < 32; ++i) { + generator_preimage.emplace_back(0); + } + for (size_t i = 0; i < num_generators; ++i) { + auto generator_index = static_cast(i); + { + uint32_t mask = 0xff; + generator_preimage[32] = static_cast(generator_index >> 24); + generator_preimage[33] = static_cast((generator_index >> 16) & mask); + generator_preimage[34] = static_cast((generator_index >> 8) & mask); + generator_preimage[35] = static_cast(generator_index & mask); + } + result.push_back(affine_element::hash_to_curve(generator_preimage)); + } + return result; + } + + static affine_element get_secure_generator_from_index(size_t generator_index, const std::string& domain_separator) + { + std::array domain_hash = sha256::sha256(domain_separator); + std::vector generator_preimage; + generator_preimage.reserve(64); + std::copy(domain_hash.begin(), domain_hash.end(), std::back_inserter(generator_preimage)); + for (size_t i = 0; i < 32; ++i) { + generator_preimage.emplace_back(0); + } + auto gen_idx = static_cast(generator_index); + uint32_t mask = 0xff; + generator_preimage[32] = static_cast(gen_idx >> 24); + generator_preimage[33] = static_cast((gen_idx >> 16) & mask); + generator_preimage[34] = static_cast((gen_idx >> 8) & mask); + generator_preimage[35] = static_cast(gen_idx & mask); + return affine_element::hash_to_curve(generator_preimage); + } + BBERG_INLINE static void conditional_negate_affine(const affine_element* src, affine_element* dest, uint64_t predicate); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index 15decaeea28..60d9fc59a35 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -319,7 +319,7 @@ template cycle_group& cycle_group::opera template cycle_group::offset_generators::offset_generators(size_t num_points) { - auto generator_temp = G1::template derive_generators<100>(); // hmm bad + auto generator_temp = G1::template derive_generators<300>(); // hmm bad const size_t num_generators = num_points + 1; for (size_t i = 0; i < num_generators; ++i) { generators.emplace_back(generator_temp[i]); @@ -550,6 +550,10 @@ cycle_group cycle_group::straus_lookup_table::read(const fie return cycle_group(_context, x, y, false); } +// for fixed base batch mul... +// 1. take each generator point and split into table_bits chunks +// 2. precompute multiples of each generator point and store in lookup table +// 3. template cycle_group cycle_group::variable_base_batch_mul(const std::vector& _scalars, const std::vector& _base_points) diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index bb5f872b776..ee9085063a0 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -103,6 +103,14 @@ template class cycle_group { } bool_t is_point_at_infinity() const { return is_infinity; } + void validate_is_on_curve() const + { + auto xx = x * x; + auto xxx = xx * x; + auto res = y.madd(y, -xxx - G1::curve_b); + res *= is_point_at_infinity(); + res.assert_is_zero(); + } cycle_group dbl() const; cycle_group unconditional_add(const cycle_group& other) const; cycle_group constrained_unconditional_add(const cycle_group& other) const; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index 269daec8195..220b291af85 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -27,7 +27,7 @@ template class CycleGroupTest : public ::testing::Test { using element = typename G1::element; using affine_element = typename G1::affine_element; - static constexpr size_t num_generators = 10; + static constexpr size_t num_generators = 110; static inline std::array generators{}; static void SetUpTestSuite() @@ -434,4 +434,37 @@ TYPED_TEST(CycleGroupTest, TestVariableBaseBatchMul) bool proof_result = composer.check_circuit(); EXPECT_EQ(proof_result, true); } + +TYPED_TEST(CycleGroupTest, ProfileVariableBaseBatcMul) +{ + STDLIB_TYPE_ALIASES; + auto composer = Composer(); + + const size_t num_muls = 2; + + element expected = G1::point_at_infinity; + + // case 1, general MSM with inputs that are combinations of constant and witnesses + { + std::vector points; + std::vector scalars; + + for (size_t i = 0; i < num_muls; ++i) { + auto element = TestFixture::generators[i]; + typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); + + // 1: add entry where point, scalar are witnesses + expected += (element * scalar); + points.emplace_back(cycle_group_ct::from_witness(&composer, element)); + scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + } + auto result = cycle_group_ct::variable_base_batch_mul(scalars, points); + EXPECT_EQ(result.get_value(), affine_element(expected)); + } + + std::cout << "composer gates = " << composer.get_num_gates() << std::endl; + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, true); +} + } // namespace stdlib_cycle_group_tests \ No newline at end of file From f5a9a5fc3324499ae0507843db7e1d33d8c2df9d Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 25 Aug 2023 08:36:16 +0000 Subject: [PATCH 07/50] wip --- .../crypto/pedersen_hash/pedersen.cpp | 22 +- .../crypto/pedersen_hash/pedersen.hpp | 18 +- .../ecc/curves/grumpkin/grumpkin.cpp | 22 +- .../ecc/curves/secp256k1/secp256k1.cpp | 23 +- .../ecc/curves/secp256r1/secp256r1.cpp | 14 +- .../cpp/src/barretenberg/ecc/fields/field.hpp | 10 + .../ecc/groups/affine_element.hpp | 3 + .../ecc/groups/affine_element_impl.hpp | 121 +++++--- .../src/barretenberg/ecc/groups/element.hpp | 2 +- .../barretenberg/ecc/groups/element_impl.hpp | 1 + .../cpp/src/barretenberg/ecc/groups/group.hpp | 30 +- .../new_pedersen_experiment.cpp | 41 +++ .../new_pedersen_experiment.hpp | 218 +++++++++++++++ .../plookup_tables/plookup_tables.cpp | 25 +- .../plookup_tables/plookup_tables.hpp | 29 +- .../proof_system/plookup_tables/types.hpp | 18 +- .../stdlib/primitives/group/cycle_group.cpp | 261 +++++++++++++++++- .../stdlib/primitives/group/cycle_group.hpp | 31 ++- .../primitives/group/cycle_group.test.cpp | 38 +++ 19 files changed, 820 insertions(+), 107 deletions(-) create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.cpp create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.hpp diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp index 6debd1b9ff3..32b28862fe6 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp @@ -4,11 +4,19 @@ #include #endif -namespace crypto { -namespace pedersen_hash { +namespace crypto::pedersen_hash { using namespace generators; +grumpkin::g1::affine_element generator_info::get_lhs_generator() +{ + return lhs_generator; +} +grumpkin::g1::affine_element generator_info::get_rhs_generator() +{ + return rhs_generator; +} + grumpkin::g1::element hash_single(const barretenberg::fr& in, generator_index_t const& index) { auto gen_data = get_generator_data(index); @@ -21,7 +29,7 @@ grumpkin::g1::element hash_single(const barretenberg::fr& in, generator_index_t const fixed_base_ladder* ladder = gen_data.get_hash_ladder(num_bits); - uint64_t wnaf_entries[num_quads + 2] = { 0 }; + std::array wnaf_entries = { 0 }; bool skew = false; barretenberg::wnaf::fixed_wnaf(&scalar_multiplier.data[0], &wnaf_entries[0], skew, 0); @@ -64,9 +72,11 @@ grumpkin::fq hash_multiple(const std::vector& inputs, const size_t r = out[i] + r; } grumpkin::g1::affine_element result = - r.is_point_at_infinity() ? grumpkin::g1::affine_element(0, 0) : grumpkin::g1::affine_element(r); + r.is_point_at_infinity() ? grumpkin::g1::affine_element(0, 0) : static_cast(r); return result.x; } -} // namespace pedersen_hash -} // namespace crypto \ No newline at end of file +struct foo; +struct generator_info; +// class grumpkin::g1::affine_element generator_info::get_rhs_generator(); +} // namespace crypto::pedersen_hash \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp index 40bdfc7ff8d..4cd2b095cfd 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp @@ -4,12 +4,20 @@ #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include -namespace crypto { -namespace pedersen_hash { +namespace crypto::pedersen_hash { + +struct generator_info { + inline static const grumpkin::g1::affine_element lhs_generator = + grumpkin::g1::get_secure_generator_from_index(0, "pedersen_hash_generator"); + inline static const grumpkin::g1::affine_element rhs_generator = + grumpkin::g1::get_secure_generator_from_index(1, "pedersen_hash_generator"); + + static grumpkin::g1::affine_element get_lhs_generator(); + static grumpkin::g1::affine_element get_rhs_generator(); +}; grumpkin::g1::element hash_single(const barretenberg::fr& in, generators::generator_index_t const& index); -grumpkin::fq hash_multiple(const std::vector& inputs, const size_t hash_index = 0); +grumpkin::fq hash_multiple(const std::vector& inputs, size_t hash_index = 0); -} // namespace pedersen_hash -} // namespace crypto +} // namespace crypto::pedersen_hash diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp index 17661c709e3..fdb0348b877 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp @@ -3,18 +3,22 @@ namespace grumpkin { namespace { -constexpr size_t max_num_generators = 1 << 10; -static std::array generators; -static bool init_generators = false; +// constexpr size_t max_num_generators = 1 << 10; +// static std::array generators; +// static bool init_generators = false; } // namespace +// g1::affine_element get_generator(const size_t generator_index) +// { +// if (!init_generators) { +// generators = g1::derive_generators(); +// init_generators = true; +// } +// ASSERT(generator_index < max_num_generators); +// return generators[generator_index]; +// } g1::affine_element get_generator(const size_t generator_index) { - if (!init_generators) { - generators = g1::derive_generators(); - init_generators = true; - } - ASSERT(generator_index < max_num_generators); - return generators[generator_index]; + return g1::get_secure_generator_from_index(generator_index, "grumpkin_default_generator"); } } // namespace grumpkin \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp index ed7bd89172f..3d958a8d729 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp @@ -1,23 +1,20 @@ #include "./secp256k1.hpp" namespace secp256k1 { -namespace { - -constexpr size_t max_num_generators = 1 << 10; -static std::array generators; -static bool init_generators = false; - -} // namespace /* In case where prime bit length is 256, the method produces a generator, but only with one less bit of randomness than the maximum possible, as the y coordinate in that case is determined by the x-coordinate. */ +// g1::affine_element get_generator(const size_t generator_index) +// { +// if (!init_generators) { +// generators = g1::derive_generators(); +// init_generators = true; +// } +// ASSERT(generator_index < max_num_generators); +// return generators[generator_index]; +// } g1::affine_element get_generator(const size_t generator_index) { - if (!init_generators) { - generators = g1::derive_generators(); - init_generators = true; - } - ASSERT(generator_index < max_num_generators); - return generators[generator_index]; + return g1::get_secure_generator_from_index(generator_index, "secp256k1_default_generator"); } } // namespace secp256k1 \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp index 676ec6c3db1..c3057b7d476 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp @@ -1,23 +1,11 @@ #include "./secp256r1.hpp" namespace secp256r1 { -namespace { - -constexpr size_t max_num_generators = 1 << 10; -static std::array generators; -static bool init_generators = false; - -} // namespace /* In case where prime bit length is 256, the method produces a generator, but only with one less bit of randomness than the maximum possible, as the y coordinate in that case is determined by the x-coordinate. */ g1::affine_element get_generator(const size_t generator_index) { - if (!init_generators) { - generators = g1::derive_generators(); - init_generators = true; - } - ASSERT(generator_index < max_num_generators); - return generators[generator_index]; + return g1::get_secure_generator_from_index(generator_index, "secp256r1_default_generator"); } } // namespace secp256r1 \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/fields/field.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/fields/field.hpp index 914793614e3..8d14a212a3d 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/fields/field.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/fields/field.hpp @@ -88,6 +88,16 @@ template struct alignas(32) field { constexpr field(const uint64_t a, const uint64_t b, const uint64_t c, const uint64_t d) noexcept : data{ a, b, c, d } {}; + constexpr explicit field(const uint512_t& input) noexcept + { + uint256_t value = (input % modulus).lo; + data[0] = value.data[0]; + data[1] = value.data[1]; + data[2] = value.data[2]; + data[3] = value.data[3]; + self_to_montgomery_form(); + } + constexpr explicit operator uint32_t() const { field out = from_montgomery_form(); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp index f88a0c19f26..7a34cb971d2 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp @@ -68,6 +68,8 @@ template class alignas(64) affine_el constexpr bool on_curve() const noexcept; + static constexpr std::optional derive_from_x_coordinate(const Fq& x, bool sign_bit) noexcept; + /** * @brief Samples a random point on the curve. * @@ -81,6 +83,7 @@ template class alignas(64) affine_el * @return A point on the curve corresponding to the given seed */ static affine_element hash_to_curve(const uint64_t seed) noexcept; + static affine_element hash_to_curve(const std::vector& seed) noexcept; constexpr bool operator==(const affine_element& other) const noexcept; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp index a23d5a37751..fc034117a92 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp @@ -1,9 +1,9 @@ #pragma once #include "./element.hpp" #include "barretenberg/crypto/keccak/keccak.hpp" +#include "barretenberg/crypto/sha256/sha256.hpp" -namespace barretenberg { -namespace group_elements { +namespace barretenberg::group_elements { template constexpr affine_element::affine_element(const Fq& a, const Fq& b) noexcept : x(a) @@ -83,6 +83,9 @@ constexpr affine_element affine_element::operator+( template constexpr affine_element& affine_element::operator=(const affine_element& other) noexcept { + if (this == &other) { + return *this; + } x = other.x; y = other.y; return *this; @@ -183,25 +186,46 @@ constexpr bool affine_element::operator>(const affine_element& other) // We are setting point at infinity to always be the lowest element if (is_point_at_infinity()) { return false; - } else if (other.is_point_at_infinity()) { + } + if (other.is_point_at_infinity()) { return true; } if (x > other.x) { return true; - } else if (x == other.x && y > other.y) { + } + if (x == other.x && y > other.y) { return true; } return false; } template -affine_element affine_element::hash_to_curve(const uint64_t seed) noexcept +constexpr std::optional> affine_element::derive_from_x_coordinate( + const Fq& x, bool sign_bit) noexcept { - static_assert(T::can_hash_to_curve == true); + auto yy = x.sqr() * x + T::b; + if constexpr (T::has_a) { + yy += (x * T::a); + } + auto [found_root, y] = yy.sqrt(); + + if (found_root) { + if (uint256_t(y).get_bit(0) != sign_bit) { + y = -y; + } + return affine_element(x, y); + } + return std::nullopt; +} + +template +affine_element affine_element::hash_to_curve(uint64_t seed) noexcept +{ + static_assert(static_cast(T::can_hash_to_curve)); Fq input(seed, 0, 0, 0); - keccak256 c = hash_field_element((uint64_t*)&input.data[0]); + keccak256 c = hash_field_element(&input.data[0]); uint256_t hash{ c.word64s[0], c.word64s[1], c.word64s[2], c.word64s[3] }; uint256_t x_coordinate = hash; @@ -212,22 +236,57 @@ affine_element affine_element::hash_to_curve(const uint64_ bool y_bit = hash.get_bit(255); - Fq x_out = Fq(x_coordinate); - Fq y_out = (x_out.sqr() * x_out + T::b); - if constexpr (T::has_a) { - y_out += (x_out * T::a); + std::optional result = derive_from_x_coordinate(x_coordinate, y_bit); + + if (result.has_value()) { + return result.value(); } + return affine_element(0, 0); +} - // When the sqrt of y_out doesn't exist, return 0. - auto [is_quadratic_remainder, y_out_] = y_out.sqrt(); - if (!is_quadratic_remainder) { - return affine_element(Fq::zero(), Fq::zero()); +template +affine_element affine_element::hash_to_curve(const std::vector& seed) noexcept +{ + std::vector target_seed(seed); + + // expand by 33 bytes to cover incremental hash attempts + const size_t seed_size = seed.size(); + for (size_t i = 0; i < 33; ++i) { + target_seed.emplace_back(0); } - if (uint256_t(y_out_).get_bit(0) != y_bit) { - y_out_ = -y_out_; + uint16_t attempt_counter = 0; + + while (true) { + auto hi = static_cast(attempt_counter >> static_cast(8)); + auto lo = static_cast(attempt_counter & static_cast(0xff)); + target_seed[seed_size] = hi; + target_seed[seed_size + 1] = lo; + target_seed[target_seed.size() - 1] = 0; + std::array hash_hi = sha256::sha256(target_seed); + target_seed[target_seed.size() - 1] = 1; + std::array hash_lo = sha256::sha256(target_seed); + std::vector gg(hash_hi.begin(), hash_hi.end()); + std::vector ff(hash_lo.begin(), hash_lo.end()); + uint256_t x_lo = 0; + uint256_t x_hi = 0; + // uint8_t* f = &hash_lo[0]; + // uint8_t* g = &hash_hi[0]; + read(ff, x_lo); + read(gg, x_hi); + // numeric::read(*f, x_lo); + // numeric::read(*g, x_hi); + uint512_t x_full(x_lo, x_hi); + Fq x(x_full); + bool sign_bit = false; + sign_bit = x_hi.get_bit(0); + std::optional result = derive_from_x_coordinate(x, sign_bit); + + if (result.has_value()) { + return result.value(); + } + attempt_counter++; } - - return affine_element(x_out, y_out_); + return affine_element(0, 0); } template @@ -237,30 +296,22 @@ affine_element affine_element::random_element(numeric::ran engine = &numeric::random::get_engine(); } - bool found_one = false; Fq yy; Fq x; Fq y; - while (!found_one) { + while (true) { // Sample a random x-coordinate and check if it satisfies curve equation. x = Fq::random_element(engine); - yy = x.sqr() * x + T::b; - if constexpr (T::has_a) { - yy += (x * T::a); - } - auto [found_root, y1] = yy.sqrt(); - y = y1; - // Negate the y-coordinate based on a randomly sampled bit. - bool random_bit = (engine->get_random_uint8() & 1); - if (random_bit) { - y = -y; - } + bool sign_bit = (engine->get_random_uint8() & 1) != 0; + + std::optional result = derive_from_x_coordinate(x, sign_bit); - found_one = found_root; + if (result.has_value()) { + return result.value(); + } } return affine_element(x, y); } -} // namespace group_elements -} // namespace barretenberg +} // namespace barretenberg::group_elements diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/element.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/element.hpp index c04bb0fce36..85d929487bf 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/element.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/element.hpp @@ -91,7 +91,7 @@ template class alignas(32) element { BBERG_INLINE constexpr bool on_curve() const noexcept; BBERG_INLINE constexpr bool operator==(const element& other) const noexcept; - static void batch_normalize(element* elements, const size_t num_elements) noexcept; + static void batch_normalize(element* elements, size_t num_elements) noexcept; static std::vector> batch_mul_with_endomorphism( const std::vector>& points, const Fr& exponent) noexcept; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp index 60f2faef284..59a67c38c8e 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp @@ -1,5 +1,6 @@ #pragma once +#include "element.hpp" namespace barretenberg { namespace group_elements { template diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp index 2110c9ff880..6d656141520 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp @@ -4,6 +4,7 @@ #include "./affine_element.hpp" #include "./element.hpp" #include "./wnaf.hpp" +#include "barretenberg/common/constexpr_utils.hpp" #include "barretenberg/crypto/sha256/sha256.hpp" #include #include @@ -30,10 +31,10 @@ template element; - typedef group_elements::affine_element affine_element; - typedef coordinate_field Fq; - typedef subgroup_field Fr; + using element = group_elements::element; + using affine_element = group_elements::affine_element; + using Fq = coordinate_field; + using Fr = subgroup_field; static constexpr bool USE_ENDOMORPHISM = GroupParams::USE_ENDOMORPHISM; static constexpr bool has_a = GroupParams::has_a; @@ -92,8 +93,8 @@ template */ - static std::vector derive_generators_secure(size_t num_generators, - const std::string& domain_separator) + static std::vector derive_generators_secure(const std::vector& domain_separator, + const size_t num_generators) { std::vector result; std::array domain_hash = sha256::sha256(domain_separator); @@ -105,13 +106,11 @@ template (i); - { - uint32_t mask = 0xff; - generator_preimage[32] = static_cast(generator_index >> 24); - generator_preimage[33] = static_cast((generator_index >> 16) & mask); - generator_preimage[34] = static_cast((generator_index >> 8) & mask); - generator_preimage[35] = static_cast(generator_index & mask); - } + uint32_t mask = 0xff; + generator_preimage[32] = static_cast(generator_index >> 24); + generator_preimage[33] = static_cast((generator_index >> 16) & mask); + generator_preimage[34] = static_cast((generator_index >> 8) & mask); + generator_preimage[35] = static_cast(generator_index & mask); result.push_back(affine_element::hash_to_curve(generator_preimage)); } return result; @@ -132,7 +131,10 @@ template ((gen_idx >> 16) & mask); generator_preimage[34] = static_cast((gen_idx >> 8) & mask); generator_preimage[35] = static_cast(gen_idx & mask); - return affine_element::hash_to_curve(generator_preimage); + auto result = affine_element::hash_to_curve(generator_preimage); + ASSERT(result.x != 0); + ASSERT(result.y != 0); + return result; } BBERG_INLINE static void conditional_negate_affine(const affine_element* src, diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.cpp new file mode 100644 index 00000000000..691848d9480 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.cpp @@ -0,0 +1,41 @@ + +#include "./new_pedersen_experiment.hpp" + +namespace plookup::new_pedersen { + +bool table::lookup_table_exists_for_point(const grumpkin::g1::affine_element& input) +{ + return (input == crypto::pedersen_hash::generator_info::get_lhs_generator() || + input == crypto::pedersen_hash::generator_info::get_rhs_generator()); +} + +std::optional> table::get_lookup_table_ids_for_point( + const grumpkin::g1::affine_element& input) +{ + if (input == crypto::pedersen_hash::generator_info::get_lhs_generator()) { + return { { NEW_PEDERSEN_LEFT_LO, NEW_PEDERSEN_LEFT_HI } }; + } + if (input == crypto::pedersen_hash::generator_info::get_lhs_generator()) { + return { { NEW_PEDERSEN_RIGHT_LO, NEW_PEDERSEN_RIGHT_HI } }; + } + return {}; +} + +std::optional table::get_generator_offset_for_table_id(const MultiTableId table_id) +{ + if (table_id == NEW_PEDERSEN_LEFT_LO) { + return pedersen_table_offset_generators[0]; + } + if (table_id == NEW_PEDERSEN_LEFT_HI) { + return pedersen_table_offset_generators[1]; + } + if (table_id == NEW_PEDERSEN_RIGHT_LO) { + return pedersen_table_offset_generators[2]; + } + if (table_id == NEW_PEDERSEN_RIGHT_HI) { + return pedersen_table_offset_generators[3]; + } + return {}; +} + +} // namespace plookup::new_pedersen \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.hpp new file mode 100644 index 00000000000..23e9dd700f5 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.hpp @@ -0,0 +1,218 @@ +#pragma once + +#include "./types.hpp" + +#include "barretenberg/common/constexpr_utils.hpp" +#include "barretenberg/crypto/pedersen_hash/pedersen.hpp" +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" +#include "barretenberg/numeric/bitop/pow.hpp" +#include "barretenberg/numeric/bitop/rotate.hpp" +#include "barretenberg/numeric/bitop/sparse_form.hpp" + +namespace plookup::new_pedersen { + +class table { + public: + // static constexpr size_t BITS_PER_HASH = 512; + static constexpr size_t BITS_PER_TABLE = 9; + // static constexpr size_t BITS_OF_BETA = 192; + static constexpr size_t BITS_ON_CURVE = 254; + static constexpr size_t BITS_PER_LAST_TABLE = 2; + static constexpr size_t PEDERSEN_TABLE_SIZE = (1UL) << BITS_PER_TABLE; + static constexpr size_t PEDERSEN_SMALL_TABLE_SIZE = (1UL) << BITS_PER_LAST_TABLE; + static constexpr size_t NUM_PEDERSEN_TABLES = + (BITS_ON_CURVE / BITS_PER_TABLE) + (BITS_ON_CURVE % BITS_PER_TABLE == 0 ? 0 : 1); + static constexpr size_t NUM_PEDERSEN_POINTS = 2; + static constexpr size_t NUM_PEDERSEN_MULTI_TABLES = 4; + using affine_element = grumpkin::g1::affine_element; + using element = grumpkin::g1::element; + + using single_lookup_table = std::vector; + using fixed_base_scalar_mul_tables = std::vector; + using all_multi_tables = std::array; + + static single_lookup_table generate_single_lookup_table(const affine_element& base_point, + const affine_element& offset_generator) + { + std::vector table_raw(PEDERSEN_TABLE_SIZE); + + element accumulator = offset_generator; + for (size_t i = 0; i < PEDERSEN_TABLE_SIZE; ++i) { + table_raw[i] = accumulator; + accumulator += base_point; + } + element::batch_normalize(&table_raw[0], PEDERSEN_TABLE_SIZE); + single_lookup_table table(PEDERSEN_TABLE_SIZE); + for (size_t i = 0; i < table_raw.size(); ++i) { + if (i < 16) { + std::cout << "IUWAHRGIAWERUGH " << table_raw[i].x << std::endl; + } + table[i] = affine_element{ table_raw[i].x, table_raw[i].y }; + } + return table; + } + + template + static fixed_base_scalar_mul_tables generate_tables(const grumpkin::g1::affine_element& input) + { + constexpr size_t NUM_TABLES = (num_bits / BITS_PER_TABLE) + ((num_bits % BITS_PER_TABLE) ? 1 : 0); + + fixed_base_scalar_mul_tables result; + result.resize(NUM_TABLES); + + std::vector input_buf; + serialize::write(input_buf, input); + const auto offset_generators = grumpkin::g1::derive_generators_secure(input_buf, PEDERSEN_TABLE_SIZE); + + grumpkin::g1::element accumulator = input; + for (size_t i = 0; i < NUM_TABLES; ++i) { + result.emplace_back(generate_single_lookup_table(accumulator, offset_generators[i])); + for (size_t j = 0; j < BITS_PER_TABLE; ++j) { + accumulator = accumulator.dbl(); + } + } + return result; + } + + template + static grumpkin::g1::affine_element generate_generator_offset(const grumpkin::g1::affine_element& input) + { + constexpr size_t NUM_TABLES = (num_table_bits / BITS_PER_TABLE) + ((num_table_bits % BITS_PER_TABLE) ? 1 : 0); + + std::vector input_buf; + serialize::write(input_buf, input); + const auto offset_generators = grumpkin::g1::derive_generators_secure(input_buf, NUM_TABLES); + grumpkin::g1::element acc = grumpkin::g1::point_at_infinity; + for (const auto& gen : offset_generators) { + acc += gen; + } + return acc; + } + + inline static const all_multi_tables pedersen_tables = { + table::generate_tables<128>(crypto::pedersen_hash::generator_info::get_lhs_generator()), + table::generate_tables<126>(grumpkin::g1::element(crypto::pedersen_hash::generator_info::get_lhs_generator()) * + (uint256_t(1) << 128)), + table::generate_tables<128>(crypto::pedersen_hash::generator_info::get_rhs_generator()), + table::generate_tables<126>(grumpkin::g1::element(crypto::pedersen_hash::generator_info::get_rhs_generator()) * + (uint256_t(1) << 128)), + }; + + inline static const std::array + pedersen_table_offset_generators = { + table::generate_generator_offset<128>(crypto::pedersen_hash::generator_info::get_lhs_generator()), + table::generate_generator_offset<126>( + grumpkin::g1::element(crypto::pedersen_hash::generator_info::get_lhs_generator()) * + (uint256_t(1) << 128)), + table::generate_generator_offset<128>(crypto::pedersen_hash::generator_info::get_rhs_generator()), + table::generate_generator_offset<126>( + grumpkin::g1::element(crypto::pedersen_hash::generator_info::get_rhs_generator()) * + (uint256_t(1) << 128)), + }; + template + inline static std::array get_basic_pedersen_table_values(const std::array key) + { + static_assert(multitable_index < NUM_PEDERSEN_MULTI_TABLES); + static_assert(table_index < NUM_PEDERSEN_TABLES); + const auto& basic_table = pedersen_tables[multitable_index][table_index]; + + // const auto& basic_table = pedersen_tables[generator_index][table_index]; + const auto index = static_cast(key[0]); + std::cout << "get basic table values. index = " << index << " x = " << basic_table[index].x << std::endl; + return { basic_table[index].x, basic_table[index].y }; + } + + template + static inline BasicTable generate_basic_pedersen_table(BasicTableId id, + size_t basic_table_index, + size_t table_index) + { + ASSERT(multitable_index < NUM_PEDERSEN_MULTI_TABLES); + ASSERT(table_index < NUM_PEDERSEN_TABLES); + + BasicTable table; + table.id = id; + table.table_index = basic_table_index; + table.size = PEDERSEN_TABLE_SIZE; + table.use_twin_keys = false; + + const auto& basic_table = pedersen_tables[multitable_index][table_index]; + // table::generate_tables( + // crypto::pedersen_hash::generator_info::get_lhs_generator())[table_index]; + + // const auto& basic_table = pedersen_tables[generator_index][table_index]; + + for (size_t i = 0; i < table.size; ++i) { + table.column_1.emplace_back(i); + table.column_2.emplace_back(basic_table[i].x); + table.column_3.emplace_back(basic_table[i].y); + } + table.get_values_from_key = nullptr; + barretenberg::constexpr_for<0, NUM_PEDERSEN_TABLES, 1>([&]() { + if (i == table_index) { + table.get_values_from_key = &get_basic_pedersen_table_values; + } + }); + ASSERT(table.get_values_from_key != nullptr); + table.column_1_step_size = table.size; + table.column_2_step_size = 0; + table.column_3_step_size = 0; + + return table; + } + + template + static inline MultiTable get_pedersen_table(const MultiTableId id = NEW_PEDERSEN_LEFT_LO) + { + constexpr size_t NUM_TABLES = (num_bits / BITS_PER_TABLE) + ((num_bits % BITS_PER_TABLE) ? 1 : 0); + + std::cout << "get pedersen table mtidx = " << multitable_index << " num bits " << num_bits << std::endl; + + // todo. split explicitly into 126 / 128 bit chunks. + // Construct 126 bit chunk out of 14 9-bit tables + // Construct 128 bit chunk out of 12 9-bit tables and 2 10-bit tables + MultiTable table(PEDERSEN_TABLE_SIZE, 0, 0, NUM_TABLES); + + std::cout << "q0" << std::endl; + std::cout << "NUM TABLES = " << NUM_TABLES << std::endl; + table.id = id; + for (size_t i = 0; i < NUM_TABLES; ++i) { + std::cout << "beep" << std::endl; + table.slice_sizes.emplace_back(PEDERSEN_TABLE_SIZE); + } + std::cout << "q1" << std::endl; + + table.get_table_values.resize(NUM_TABLES); + table.lookup_ids.resize(NUM_TABLES); + + std::cout << "q2" << std::endl; + + barretenberg::constexpr_for<0, NUM_TABLES, 1>([&]() { + table.get_table_values[i] = &get_basic_pedersen_table_values; + size_t idx = i; + if (multitable_index == 0) { + idx += static_cast(PEDERSEN_0_0); + } else if (multitable_index == 1) { + idx += static_cast(PEDERSEN_1_0); + } else if (multitable_index == 2) { + idx += static_cast(PEDERSEN_2_0); + } else { + idx += static_cast(PEDERSEN_3_0); + } + std::cout << "q3" << std::endl; + table.lookup_ids[i] = static_cast(idx); + std::cout << "q4" << std::endl; + }); + std::cout << "q5" << std::endl; + std::cout << "RETURNING TABLE W. SLICE SIZES SIZE = " << table.slice_sizes.size() << std::endl; + return table; + } + + static bool lookup_table_exists_for_point(const grumpkin::g1::affine_element& input); + static std::optional> get_lookup_table_ids_for_point( + const grumpkin::g1::affine_element& input); + + static std::optional get_generator_offset_for_table_id(MultiTableId table_id); +}; + +} // namespace plookup::new_pedersen \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.cpp index 6580a47e8e4..d29a93a5848 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.cpp @@ -91,9 +91,17 @@ void init_multi_tables() keccak_tables::Chi::get_chi_output_table(MultiTableId::KECCAK_CHI_OUTPUT); MULTI_TABLES[MultiTableId::KECCAK_FORMAT_OUTPUT] = keccak_tables::KeccakOutput::get_keccak_output_table(MultiTableId::KECCAK_FORMAT_OUTPUT); + MULTI_TABLES[MultiTableId::NEW_PEDERSEN_LEFT_LO] = + new_pedersen::table::get_pedersen_table<0, 128>(MultiTableId::NEW_PEDERSEN_LEFT_LO); + MULTI_TABLES[MultiTableId::NEW_PEDERSEN_LEFT_HI] = + new_pedersen::table::get_pedersen_table<1, 126>(MultiTableId::NEW_PEDERSEN_LEFT_LO); + MULTI_TABLES[MultiTableId::NEW_PEDERSEN_RIGHT_LO] = + new_pedersen::table::get_pedersen_table<2, 128>(MultiTableId::NEW_PEDERSEN_RIGHT_LO); + MULTI_TABLES[MultiTableId::NEW_PEDERSEN_RIGHT_HI] = + new_pedersen::table::get_pedersen_table<3, 126>(MultiTableId::NEW_PEDERSEN_RIGHT_HI); barretenberg::constexpr_for<0, 25, 1>([&]() { - MULTI_TABLES[(size_t)MultiTableId::KECCAK_NORMALIZE_AND_ROTATE + i] = + MULTI_TABLES[static_cast(MultiTableId::KECCAK_NORMALIZE_AND_ROTATE) + i] = keccak_tables::Rho<8, i>::get_rho_output_table(MultiTableId::KECCAK_NORMALIZE_AND_ROTATE); }); MULTI_TABLES[MultiTableId::HONK_DUMMY_MULTI] = dummy_tables::get_honk_dummy_multitable(); @@ -115,17 +123,27 @@ ReadData get_lookup_accumulators(const MultiTableId id, const bool is_2_to_1_lookup) { // return multi-table, populating global array of all multi-tables if need be + std::cout << "z0" << std::endl; const auto& multi_table = create_table(id); + std::cout << "z1" << std::endl; const size_t num_lookups = multi_table.lookup_ids.size(); + std::cout << "z2" << std::endl; ReadData lookup; - + std::cout << "z3" << std::endl; + std::cout << "slice sizes = " << multi_table.slice_sizes[0] << std::endl; + std::cout << "key a = " << uint256_t(key_a) << std::endl; + std::cout << "key a msb = " << uint256_t(key_a).get_msb() << std::endl; + std::cout << "num slices = " << multi_table.slice_sizes.size() << std::endl; + std::cout << "multitableid = " << id << std::endl; const auto key_a_slices = numeric::slice_input_using_variable_bases(key_a, multi_table.slice_sizes); const auto key_b_slices = numeric::slice_input_using_variable_bases(key_b, multi_table.slice_sizes); + std::cout << "z4" << std::endl; std::vector column_1_raw_values; std::vector column_2_raw_values; std::vector column_3_raw_values; + std::cout << "z5" << std::endl; for (size_t i = 0; i < num_lookups; ++i) { // get i-th table query function and then submit query @@ -139,9 +157,12 @@ ReadData get_lookup_accumulators(const MultiTableId id, const BasicTable::KeyEntry key_entry{ { key_a_slices[i], key_b_slices[i] }, values }; lookup.key_entries.emplace_back(key_entry); } + std::cout << "z6" << std::endl; + lookup[ColumnIdx::C1].resize(num_lookups); lookup[ColumnIdx::C2].resize(num_lookups); lookup[ColumnIdx::C3].resize(num_lookups); + std::cout << "z7" << std::endl; /** * A multi-table consists of multiple basic tables (say L = 6). diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.hpp index d915409550a..d2707db51c4 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.hpp @@ -1,6 +1,7 @@ #pragma once #include "barretenberg/common/throw_or_abort.hpp" +#include "./new_pedersen_experiment.hpp" #include "aes128.hpp" #include "blake2s.hpp" #include "dummy.hpp" @@ -18,15 +19,37 @@ namespace plookup { -const MultiTable& create_table(const MultiTableId id); +const MultiTable& create_table(MultiTableId id); -ReadData get_lookup_accumulators(const MultiTableId id, +ReadData get_lookup_accumulators(MultiTableId id, const barretenberg::fr& key_a, const barretenberg::fr& key_b = 0, - const bool is_2_to_1_map = false); + bool is_2_to_1_lookup = false); inline BasicTable create_basic_table(const BasicTableId id, const size_t index) { + // TODO(@zac-williamson) improve + auto id_var = static_cast(id); + if (id_var >= static_cast(PEDERSEN_0_0) && + id_var < static_cast(PEDERSEN_0_0) + NUM_PEDERSEN_TABLES_LO) { + return new_pedersen::table::generate_basic_pedersen_table<0>( + id, index, id_var - static_cast(PEDERSEN_0_0)); + } + if (id_var >= static_cast(PEDERSEN_1_0) && + id_var < static_cast(PEDERSEN_1_0) + NUM_PEDERSEN_TABLES_HI) { + return new_pedersen::table::generate_basic_pedersen_table<1>( + id, index, id_var - static_cast(PEDERSEN_1_0)); + } + if (id_var >= static_cast(PEDERSEN_2_0) && + id_var < static_cast(PEDERSEN_2_0) + NUM_PEDERSEN_TABLES_LO) { + return new_pedersen::table::generate_basic_pedersen_table<2>( + id, index, id_var - static_cast(PEDERSEN_2_0)); + } + if (id_var >= static_cast(PEDERSEN_3_0) && + id_var < static_cast(PEDERSEN_3_0) + NUM_PEDERSEN_TABLES_HI) { + return new_pedersen::table::generate_basic_pedersen_table<3>( + id, index, id_var - static_cast(PEDERSEN_3_0)); + } switch (id) { case AES_SPARSE_MAP: { return sparse_tables::generate_sparse_table_with_rotation<9, 8, 0>(AES_SPARSE_MAP, index); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp index e7fd4e400ef..1d1ca4a88e5 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp @@ -3,10 +3,12 @@ #include #include +// #include "./new_pedersen_experiment.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" - namespace plookup { - +static constexpr size_t NUM_PEDERSEN_TABLES_LO = 15; // FIXFIXFIX +static constexpr size_t NUM_PEDERSEN_TABLES_HI = 14; // FIXFIXFIX +static constexpr size_t NUM_PEDERSEN_TABLES = 29; // FIXFIXFIX enum BasicTableId { XOR, AND, @@ -52,7 +54,11 @@ enum BasicTableId { BLAKE_XOR_ROTATE1, BLAKE_XOR_ROTATE2, BLAKE_XOR_ROTATE4, - PEDERSEN_29_SMALL, + PEDERSEN_0_0, + PEDERSEN_1_0 = PEDERSEN_0_0 + NUM_PEDERSEN_TABLES_LO, + PEDERSEN_2_0 = PEDERSEN_1_0 + NUM_PEDERSEN_TABLES_HI, + PEDERSEN_3_0 = PEDERSEN_2_0 + NUM_PEDERSEN_TABLES_LO, + PEDERSEN_29_SMALL = PEDERSEN_3_0 + NUM_PEDERSEN_TABLES_HI, PEDERSEN_28, PEDERSEN_27, PEDERSEN_26, @@ -115,6 +121,12 @@ enum MultiTableId { PEDERSEN_LEFT_LO, PEDERSEN_RIGHT_HI, PEDERSEN_RIGHT_LO, + NEW_PEDERSEN_LEFT, + NEW_PEDERSEN_RIGHT, + NEW_PEDERSEN_LEFT_HI, + NEW_PEDERSEN_LEFT_LO, + NEW_PEDERSEN_RIGHT_HI, + NEW_PEDERSEN_RIGHT_LO, UINT32_XOR, UINT32_AND, BN254_XLO, diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index 60d9fc59a35..92dc6eaf347 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -6,6 +6,8 @@ #include "../../hash/pedersen/pedersen_gates.hpp" #include "./cycle_group.hpp" +#include "barretenberg/proof_system/plookup_tables/types.hpp" +#include "barretenberg/stdlib/primitives/plookup/plookup.hpp" namespace proof_system::plonk::stdlib { template Composer* cycle_group::get_context(const cycle_group& other) const @@ -607,7 +609,7 @@ cycle_group cycle_group::variable_base_batch_mul(const std:: cycle_group accumulator = generators.generators[0]; for (size_t i = 0; i < num_rounds; ++i) { if (i != 0) { - + // NOTE: IN FIXED BASE MODE WE CAN DOUBLE THE TABLES INSTEAD OF THE POINTS (if not using plookup) for (size_t j = 0; j < table_bits; ++j) { accumulator = accumulator.dbl(); offset_generator_accumulator = offset_generator_accumulator.dbl(); @@ -636,6 +638,263 @@ cycle_group cycle_group::variable_base_batch_mul(const std:: return accumulator; } + +template +cycle_group cycle_group::fixed_base_batch_mul( + const std::vector& _scalars, + const std::vector& _base_points) requires SupportsLookupTables +{ + ASSERT(_scalars.size() == _base_points.size()); + + Composer* context = nullptr; + for (auto& scalar : _scalars) { + if (scalar.get_context() != nullptr) { + context = scalar.get_context(); + break; + } + } + + std::vector scalars; + std::vector base_points; + bool has_constant_component = false; + bool has_non_constant_component = false; + element constant_component = G1::point_at_infinity; + for (size_t i = 0; i < _scalars.size(); ++i) { + if (_scalars[i].is_constant()) { + has_constant_component = true; + constant_component += _base_points[i] * _scalars[i].get_value(); + } else { + has_non_constant_component = true; + scalars.emplace_back(_scalars[i]); + base_points.emplace_back(_base_points[i]); + } + } + if (!has_non_constant_component) { + return cycle_group(constant_component); + } + + const size_t num_points = base_points.size(); + using MultiTableId = plookup::MultiTableId; + using ColumnIdx = plookup::ColumnIdx; + + std::vector plookup_table_ids; + std::vector plookup_base_points; + std::vector plookup_scalars; + std::vector leftover_scalars; + std::vector leftover_base_points; + + for (size_t i = 0; i < num_points; ++i) { + std::optional> table_id = + plookup::new_pedersen::table::get_lookup_table_ids_for_point(base_points[i]); + if (table_id.has_value()) { + plookup_table_ids.emplace_back(table_id.value()[0]); + plookup_table_ids.emplace_back(table_id.value()[1]); + plookup_base_points.emplace_back(base_points[i]); + plookup_base_points.emplace_back(element(base_points[i]) * (uint256_t(1) << 128)); + plookup_scalars.emplace_back(scalars[i].lo); + plookup_scalars.emplace_back(scalars[i].hi); + + } else { + leftover_base_points.emplace_back(base_points[i]); + leftover_scalars.emplace_back(scalars[i]); + } + ASSERT(table_id.has_value()); + } + + std::vector lookup_points; + element offset_generator_accumulator = G1::point_at_infinity; + for (size_t i = 0; i < plookup_scalars.size(); ++i) { + std::cout << "i = " << i << std::endl; + plookup::ReadData lookup_data = + plookup_read::get_lookup_accumulators(plookup_table_ids[i], plookup_scalars[i]); + std::cout << "t0" << std::endl; + for (size_t j = 0; j < lookup_data[ColumnIdx::C2].size(); ++j) { + const auto x = lookup_data[ColumnIdx::C2][j]; + const auto y = lookup_data[ColumnIdx::C3][j]; + std::cout << "x/y = " << x << " : " << y << std::endl; + lookup_points.emplace_back(cycle_group(context, x, y, false)); + } + + std::optional offset_1 = + plookup::new_pedersen::table::get_generator_offset_for_table_id(plookup_table_ids[i]); + + ASSERT(offset_1.has_value()); + // ASSERT(offset_2.has_value()); + offset_generator_accumulator += offset_1.value(); + // offset_generator_accumulator += offset_2.value(); + } + std::cout << "mark" << std::endl; + cycle_group accumulator; + const size_t leftover_points = leftover_scalars.size(); + if (leftover_points > 0) { + + auto generators = offset_generators(leftover_points); + std::vector scalar_slices; + std::vector point_tables; + for (size_t i = 0; i < leftover_points; ++i) { + scalar_slices.emplace_back(straus_scalar_slice(context, leftover_scalars[i], table_bits)); + point_tables.emplace_back(straus_lookup_table( + context, cycle_group(leftover_base_points[i]), generators.generators[i + 1], table_bits)); + } + + element debug_acc = G1::point_at_infinity; + uint256_t debug_scalar = + uint256_t(leftover_scalars[0].lo.get_value()) + + (uint256_t(leftover_scalars[0].hi.get_value()) * (uint256_t(1) << (cycle_scalar::LO_BITS))); + + offset_generator_accumulator += generators.generators[0]; + accumulator = generators.generators[0]; + for (size_t i = 0; i < num_rounds; ++i) { + if (i != 0) { + + for (size_t j = 0; j < table_bits; ++j) { + accumulator = accumulator.dbl(); + offset_generator_accumulator = offset_generator_accumulator.dbl(); + debug_acc = debug_acc.dbl(); + } + } + + for (size_t j = 0; j < leftover_points; ++j) { + const field_t scalar_slice = scalar_slices[j].read(num_rounds - i - 1); + const cycle_group point = point_tables[j].read(scalar_slice); + accumulator = accumulator.constrained_unconditional_add(point); + offset_generator_accumulator = offset_generator_accumulator + element(generators.generators[j + 1]); + } + } + } + std::cout << "mark 2" << std::endl; + // cycle_group accumulator = lookup_points[0]; + for (size_t i = 0; i < lookup_points.size(); ++i) { + std::cout << "i = " << i << std::endl; + if (i == 0) { + std::cout << "leftover empty? " << leftover_scalars.empty() << " : " << leftover_scalars.size() + << std::endl; + if (leftover_scalars.empty()) { + accumulator = lookup_points[i]; + } else { + accumulator = accumulator.unconditional_add(lookup_points[i]); + } + } else { + std::cout << "acc vs pt " << accumulator << " : " << lookup_points[i] << std::endl; + accumulator = accumulator.unconditional_add(lookup_points[i]); + } + } + std::cout << "mark 3" << std::endl; + + if (has_constant_component) { + // we subtract off the offset_generator_accumulator, so subtract constant component from the accumulator! + offset_generator_accumulator -= constant_component; + } + std::cout << "mark 4" << std::endl; + cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); + accumulator = accumulator.unconditional_add(offset_generator_delta); + return accumulator; +} + +template +cycle_group cycle_group::fixed_base_batch_mul( + const std::vector& _scalars, + const std::vector& _base_points) requires DoesNotSupportLookupTables + +{ + ASSERT(_scalars.size() == _base_points.size()); + static constexpr size_t FIXED_BASE_TABLE_BITS = 1; + + Composer* context = nullptr; + for (auto& scalar : _scalars) { + if (scalar.get_context() != nullptr) { + context = scalar.get_context(); + break; + } + } + + std::vector scalars; + std::vector base_points; + bool has_constant_component = false; + bool has_non_constant_component = false; + element constant_component = G1::point_at_infinity; + for (size_t i = 0; i < _scalars.size(); ++i) { + if (_scalars[i].is_constant()) { + has_constant_component = true; + constant_component += _base_points[i] * _scalars[i].get_value(); + } else { + has_non_constant_component = true; + scalars.emplace_back(_scalars[i]); + base_points.emplace_back(_base_points[i]); + } + } + if (!has_non_constant_component) { + return cycle_group(constant_component); + } + // core algorithm + // define a `table_bits` size lookup table + const size_t num_points = scalars.size(); + + auto generators = offset_generators(num_points); + std::vector scalar_slices; + // std::vector point_tables; + + using straus_round_tables = std::vector; + std::vector point_tables(num_points); + // for (size_t i = 0; i < num_points; ++i) { + // scalar_slices.emplace_back(straus_scalar_slice(context, scalars[i], table_bits)); + // point_tables.emplace_back( + // straus_lookup_table(context, cycle_group(base_points[i]), generators.generators[i + 1], table_bits)); + // } + + // creating these point tables should cost 0 constraints if base points are constant + for (size_t i = 0; i < num_points; ++i) { + std::vector round_points(num_rounds); + std::vector round_offset_generators(num_rounds); + round_points[0] = base_points[i]; + round_offset_generators[0] = generators.generators[i + 1]; + for (size_t j = 1; j < num_rounds; ++j) { + round_points[j] = round_points[j - 1].dbl(); + round_offset_generators[j] = round_offset_generators[j - 1].dbl(); + } + element::batch_normalize(&round_points[0], num_rounds); + element::batch_normalize(&round_offset_generators[0], num_rounds); + point_tables[i].resize(num_rounds); + for (size_t j = 0; j < num_rounds; ++j) { + point_tables[i][num_rounds - j - 1] = straus_lookup_table( + context, cycle_group(round_points[j]), cycle_group(round_offset_generators[j]), FIXED_BASE_TABLE_BITS); + } + } + + for (size_t i = 0; i < num_points; ++i) { + scalar_slices.emplace_back(straus_scalar_slice(context, scalars[i], FIXED_BASE_TABLE_BITS)); + } + + element offset_generator_accumulator = generators.generators[0]; + cycle_group accumulator = cycle_group(element(generators.generators[0]) * (uint256_t(1) << 253)); + for (size_t i = 0; i < num_rounds; ++i) { + + if (i > 0) { + offset_generator_accumulator = offset_generator_accumulator.dbl(); + } + for (size_t j = 0; j < num_points; ++j) { + auto& point_table = point_tables[j][i]; + + const field_t scalar_slice = scalar_slices[j].read(num_rounds - i - 1); + + const cycle_group point = point_table.read(scalar_slice); + accumulator = accumulator.unconditional_add(point); + offset_generator_accumulator = offset_generator_accumulator + element(generators.generators[j + 1]); + } + } + + if (has_constant_component) { + // we subtract off the offset_generator_accumulator, so subtract constant component from the accumulator! + offset_generator_accumulator -= constant_component; + } + cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); + // use a full conditional add here in case we end with a point at infinity or a point doubling. + // e.g. x[P] + x[P], or x[P] + -x[P] + accumulator = accumulator + offset_generator_delta; + + return accumulator; +} + INSTANTIATE_STDLIB_TYPE(cycle_group); } // namespace proof_system::plonk::stdlib diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index ee9085063a0..18a16933c33 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -13,6 +13,10 @@ namespace proof_system::plonk::stdlib { using namespace barretenberg; using namespace crypto::generators; +template concept SupportsLookupTables = (Composer::CIRCUIT_TYPE == CircuitType::ULTRA); + +template concept DoesNotSupportLookupTables = (Composer::CIRCUIT_TYPE != CircuitType::ULTRA); + /** * @brief cycle_group represents a group element of the proving system's embedded curve * i.e. a curve with a cofactor 1 defined over a field equal to the circuit's native field Composer::FF @@ -142,6 +146,8 @@ template class cycle_group { ScalarField get_value() const; field_t lo; field_t hi; + + Composer* get_context() const { return lo.get_context() != nullptr ? lo.get_context() : hi.get_context(); } }; class straus_scalar_slice { public: @@ -152,6 +158,7 @@ template class cycle_group { }; class straus_lookup_table { public: + straus_lookup_table() = default; straus_lookup_table(Composer* context, const cycle_group& base_point, const cycle_group& generator_point, @@ -163,8 +170,18 @@ template class cycle_group { size_t rom_id = 0; }; - static cycle_group fixed_base_batch_mul(const std::vector& scalars, - const std::vector& generator_indices); + static cycle_group fixed_base_batch_mul( + const std::vector& _scalars, + const std::vector& _base_points) requires SupportsLookupTables; + + static cycle_group fixed_base_batch_mul( + const std::vector& _scalars, + const std::vector& _base_points) requires DoesNotSupportLookupTables; + + // static cycle_group fixed_base_batch_mul(const std::vector& scalars, + // const std::vector& base_points) + // requires(!cycle_group::IS_ULTRA); + static cycle_group variable_base_batch_mul(const std::vector& scalars, const std::vector& base_points); @@ -175,6 +192,16 @@ template class cycle_group { bool _is_constant; }; +// template +// requires(cycle_group::IS_ULTRA) +// class cycle_group_upper : public cycle_group { +// using cycle_scalar = typename cycle_group::cycle_scalar; +// using affine_element = typename cycle_group::affine_element; + +// static cycle_group fixed_base_batch_mul(const std::vector& _scalars, +// const std::vector& _base_points); +// }; + template inline std::ostream& operator<<(std::ostream& os, cycle_group const& v) { diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index 220b291af85..21b150448b0 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -1,3 +1,4 @@ +#include "barretenberg/crypto/pedersen_hash/pedersen.hpp" #include "barretenberg/numeric/random/engine.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" #include "barretenberg/stdlib/primitives/group/cycle_group.hpp" @@ -467,4 +468,41 @@ TYPED_TEST(CycleGroupTest, ProfileVariableBaseBatcMul) EXPECT_EQ(proof_result, true); } +TYPED_TEST(CycleGroupTest, TestFixedBaseBatchMul) +{ + STDLIB_TYPE_ALIASES; + // using witness_ct = stdlib::witness_t; + auto composer = Composer(); + + const size_t num_muls = 1; + + element expected = G1::point_at_infinity; + + // case 1, general MSM with inputs that are combinations of constant and witnesses + { + std::vector points; + std::vector scalars; + + for (size_t i = 0; i < num_muls; ++i) { + auto element = crypto::pedersen_hash::generator_info::get_lhs_generator(); + typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); + + // 2: add entry where point is constant, scalar is witness + expected += (element * scalar); + points.emplace_back((element)); + scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + + // // 4: add entry where point is constant, scalar is constant + // expected += (element * scalar); + // points.emplace_back((element)); + // scalars.emplace_back(typename cycle_group_ct::cycle_scalar(scalar)); + } + auto result = cycle_group_ct::fixed_base_batch_mul(scalars, points); + EXPECT_EQ(result.get_value(), affine_element(expected)); + } + + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, true); +} + } // namespace stdlib_cycle_group_tests \ No newline at end of file From 604ad3f2c19bf195358caf0851b125432af011f3 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Sat, 26 Aug 2023 20:05:06 +0000 Subject: [PATCH 08/50] fixed linting errors in proof_system/plookup_tables cycle_group::fixed_base_scalar_mul now working with plookup tables --- .../proof_system/plookup_tables/aes128.hpp | 49 ++-- .../proof_system/plookup_tables/blake2s.hpp | 13 +- .../proof_system/plookup_tables/dummy.hpp | 6 +- .../plookup_tables/fixed_base/fixed_base.cpp | 187 +++++++++++++++ .../plookup_tables/fixed_base/fixed_base.hpp | 82 +++++++ .../fixed_base/fixed_base_params.hpp | 34 +++ .../plookup_tables/keccak/keccak_chi.hpp | 8 +- .../plookup_tables/keccak/keccak_input.hpp | 12 +- .../plookup_tables/keccak/keccak_output.hpp | 11 +- .../plookup_tables/keccak/keccak_rho.hpp | 38 ++- .../plookup_tables/keccak/keccak_theta.hpp | 8 +- .../new_pedersen_experiment.cpp | 41 ---- .../new_pedersen_experiment.hpp | 218 ------------------ .../non_native_group_generator.cpp | 50 ++-- .../non_native_group_generator.hpp | 63 ++--- .../plookup_tables/plookup_tables.cpp | 36 ++- .../plookup_tables/plookup_tables.hpp | 38 ++- .../proof_system/plookup_tables/sha256.hpp | 15 +- .../proof_system/plookup_tables/sparse.hpp | 18 +- .../proof_system/plookup_tables/types.hpp | 54 +++-- .../proof_system/plookup_tables/uint.hpp | 6 +- .../stdlib/primitives/group/cycle_group.cpp | 17 +- .../stdlib/primitives/group/cycle_group.hpp | 1 - .../primitives/group/cycle_group.test.cpp | 10 +- 24 files changed, 509 insertions(+), 506 deletions(-) create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.hpp create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp delete mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.cpp delete mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.hpp diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/aes128.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/aes128.hpp index 7c714effd43..bd05f6f8345 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/aes128.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/aes128.hpp @@ -8,16 +8,15 @@ #include "sparse.hpp" #include "types.hpp" -namespace plookup { -namespace aes128_tables { +namespace plookup::aes128_tables { static constexpr uint64_t AES_BASE = 9; -static constexpr uint64_t aes_normalization_table[AES_BASE]{ +static constexpr std::array aes_normalization_table{ 1, 0, 0, 0, 0, 0, 0, 0, 0, }; inline std::array get_aes_sparse_values_from_key(const std::array key) { - const auto sparse = numeric::map_into_sparse_form(uint64_t(key[0])); + const auto sparse = numeric::map_into_sparse_form(static_cast(key[0])); return { barretenberg::fr(sparse), barretenberg::fr(0) }; } @@ -30,10 +29,10 @@ inline BasicTable generate_aes_sparse_table(BasicTableId id, const size_t table_ table.use_twin_keys = true; for (uint64_t i = 0; i < table.size; ++i) { uint64_t left = i; - const auto right = numeric::map_into_sparse_form((uint8_t)i); - table.column_1.emplace_back(barretenberg::fr(left)); - table.column_2.emplace_back(barretenberg::fr(0)); - table.column_3.emplace_back(barretenberg::fr(right)); + const auto right = numeric::map_into_sparse_form(static_cast(i)); + table.column_1.emplace_back(left); + table.column_2.emplace_back(0); + table.column_3.emplace_back(right); } table.get_values_from_key = &get_aes_sparse_values_from_key; @@ -56,21 +55,21 @@ inline BasicTable generate_aes_sparse_normalization_table(BasicTableId id, const table.table_index = table_index; for (uint64_t i = 0; i < AES_BASE; ++i) { uint64_t i_raw = i * AES_BASE * AES_BASE * AES_BASE; - uint64_t i_normalized = ((i & 1UL) == 1UL) * AES_BASE * AES_BASE * AES_BASE; + uint64_t i_normalized = static_cast((i & 1UL) == 1UL) * AES_BASE * AES_BASE * AES_BASE; for (uint64_t j = 0; j < AES_BASE; ++j) { uint64_t j_raw = j * AES_BASE * AES_BASE; - uint64_t j_normalized = ((j & 1UL) == 1UL) * AES_BASE * AES_BASE; + uint64_t j_normalized = static_cast((j & 1UL) == 1UL) * AES_BASE * AES_BASE; for (uint64_t k = 0; k < AES_BASE; ++k) { uint64_t k_raw = k * AES_BASE; - uint64_t k_normalized = ((k & 1UL) == 1UL) * AES_BASE; + uint64_t k_normalized = static_cast((k & 1UL) == 1UL) * AES_BASE; for (uint64_t m = 0; m < AES_BASE; ++m) { uint64_t m_raw = m; - uint64_t m_normalized = ((m & 1UL) == 1UL); + auto m_normalized = static_cast((m & 1UL) == 1UL); uint64_t left = i_raw + j_raw + k_raw + m_raw; uint64_t right = i_normalized + j_normalized + k_normalized + m_normalized; table.column_1.emplace_back(left); table.column_2.emplace_back(right); - table.column_3.emplace_back(barretenberg::fr(0)); + table.column_3.emplace_back(0); } } } @@ -127,10 +126,10 @@ inline MultiTable get_aes_input_table(const MultiTableId id = AES_INPUT) inline std::array get_aes_sbox_values_from_key(const std::array key) { const auto byte = numeric::map_from_sparse_form(key[0]); - uint8_t sbox_value = crypto::aes128::sbox[(uint8_t)byte]; - uint8_t swizzled = ((uint8_t)(sbox_value << 1) ^ (uint8_t)(((sbox_value >> 7) & 1) * 0x1b)); + uint8_t sbox_value = crypto::aes128::sbox[static_cast(byte)]; + uint8_t swizzled = (static_cast(sbox_value << 1) ^ static_cast(((sbox_value >> 7) & 1) * 0x1b)); return { barretenberg::fr(numeric::map_into_sparse_form(sbox_value)), - barretenberg::fr(numeric::map_into_sparse_form((uint8_t)(sbox_value ^ swizzled))) }; + barretenberg::fr(numeric::map_into_sparse_form(static_cast(sbox_value ^ swizzled))) }; } inline BasicTable generate_aes_sbox_table(BasicTableId id, const size_t table_index) @@ -141,15 +140,16 @@ inline BasicTable generate_aes_sbox_table(BasicTableId id, const size_t table_in table.size = 256; table.use_twin_keys = false; for (uint64_t i = 0; i < table.size; ++i) { - const auto first = numeric::map_into_sparse_form((uint8_t)i); - uint8_t sbox_value = crypto::aes128::sbox[(uint8_t)i]; - uint8_t swizzled = ((uint8_t)(sbox_value << 1) ^ (uint8_t)(((sbox_value >> 7) & 1) * 0x1b)); + const auto first = numeric::map_into_sparse_form(static_cast(i)); + uint8_t sbox_value = crypto::aes128::sbox[static_cast(i)]; + uint8_t swizzled = + (static_cast(sbox_value << 1) ^ static_cast(((sbox_value >> 7) & 1) * 0x1b)); const auto second = numeric::map_into_sparse_form(sbox_value); - const auto third = numeric::map_into_sparse_form((uint8_t)(sbox_value ^ swizzled)); + const auto third = numeric::map_into_sparse_form(static_cast(sbox_value ^ swizzled)); - table.column_1.emplace_back(barretenberg::fr(first)); - table.column_2.emplace_back(barretenberg::fr(second)); - table.column_3.emplace_back(barretenberg::fr(third)); + table.column_1.emplace_back(first); + table.column_2.emplace_back(second); + table.column_3.emplace_back(third); } table.get_values_from_key = get_aes_sbox_values_from_key; @@ -173,5 +173,4 @@ inline MultiTable get_aes_sbox_table(const MultiTableId id = AES_SBOX) } return table; } -} // namespace aes128_tables -} // namespace plookup +} // namespace plookup::aes128_tables diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/blake2s.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/blake2s.hpp index 39df081ca01..159be79de08 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/blake2s.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/blake2s.hpp @@ -5,8 +5,7 @@ #include "sparse.hpp" #include "types.hpp" -namespace plookup { -namespace blake2s_tables { +namespace plookup::blake2s_tables { static constexpr size_t BITS_IN_LAST_SLICE = 5UL; static constexpr size_t SIZE_OF_LAST_SLICE = (1UL << BITS_IN_LAST_SLICE); @@ -21,8 +20,8 @@ inline std::array get_xor_rotate_values_from_key(const std: { uint64_t filtered_key0 = filter ? key[0] & 3ULL : key[0]; uint64_t filtered_key1 = filter ? key[1] & 3ULL : key[1]; - return { uint256_t(numeric::rotate32(uint32_t(filtered_key0) ^ uint32_t(filtered_key1), - uint32_t(num_rotated_output_bits))), + return { uint256_t{ numeric::rotate32(static_cast(filtered_key0) ^ static_cast(filtered_key1), + static_cast(num_rotated_output_bits)) }, 0ULL }; } @@ -50,7 +49,8 @@ inline BasicTable generate_xor_rotate_table(BasicTableId id, const size_t table_ j_copy &= 3ULL; } table.column_3.emplace_back( - uint256_t(numeric::rotate32(uint32_t(i_copy) ^ uint32_t(j_copy), uint32_t(num_rotated_output_bits)))); + uint256_t{ numeric::rotate32(static_cast(i_copy) ^ static_cast(j_copy), + static_cast(num_rotated_output_bits)) }); } } @@ -215,5 +215,4 @@ inline MultiTable get_blake2s_xor_rotate_7_table(const MultiTableId id = BLAKE_X return table; } -} // namespace blake2s_tables -} // namespace plookup +} // namespace plookup::blake2s_tables diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/dummy.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/dummy.hpp index e44ea72e8fb..81c491f6e7c 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/dummy.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/dummy.hpp @@ -9,8 +9,7 @@ #include "types.hpp" -namespace plookup { -namespace dummy_tables { +namespace plookup::dummy_tables { /** * @brief Lookup the value corresponding to a specific key @@ -93,5 +92,4 @@ inline MultiTable get_honk_dummy_multitable() table.get_table_values.emplace_back(&get_value_from_key); return table; } -} // namespace dummy_tables -} // namespace plookup \ No newline at end of file +} // namespace plookup::dummy_tables \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp new file mode 100644 index 00000000000..98a6b0c60b7 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp @@ -0,0 +1,187 @@ + +#include "./fixed_base.hpp" + +#include "barretenberg/common/constexpr_utils.hpp" +#include "barretenberg/numeric/bitop/pow.hpp" +#include "barretenberg/numeric/bitop/rotate.hpp" +#include "barretenberg/numeric/bitop/sparse_form.hpp" + +namespace plookup::fixed_base { + +table::single_lookup_table table::generate_single_lookup_table(const affine_element& base_point, + const affine_element& offset_generator) +{ + std::vector table_raw(MAX_TABLE_SIZE); + + element accumulator = offset_generator; + for (size_t i = 0; i < MAX_TABLE_SIZE; ++i) { + table_raw[i] = accumulator; + accumulator += base_point; + } + element::batch_normalize(&table_raw[0], MAX_TABLE_SIZE); + single_lookup_table table(MAX_TABLE_SIZE); + for (size_t i = 0; i < table_raw.size(); ++i) { + table[i] = affine_element{ table_raw[i].x, table_raw[i].y }; + } + return table; +} + +template table::fixed_base_scalar_mul_tables table::generate_tables(const affine_element& input) +{ + constexpr size_t NUM_TABLES = get_num_tables_per_multi_table(); + + fixed_base_scalar_mul_tables result; + result.reserve(NUM_TABLES); + + std::vector input_buf; + serialize::write(input_buf, input); + const auto offset_generators = grumpkin::g1::derive_generators_secure(input_buf, MAX_TABLE_SIZE); + + grumpkin::g1::element accumulator = input; + for (size_t i = 0; i < NUM_TABLES; ++i) { + result.emplace_back(generate_single_lookup_table(accumulator, offset_generators[i])); + for (size_t j = 0; j < BITS_PER_TABLE; ++j) { + accumulator = accumulator.dbl(); + } + } + return result; +} + +template +grumpkin::g1::affine_element table::generate_generator_offset(const grumpkin::g1::affine_element& input) +{ + constexpr size_t NUM_TABLES = get_num_tables_per_multi_table(); + + std::vector input_buf; + serialize::write(input_buf, input); + const auto offset_generators = grumpkin::g1::derive_generators_secure(input_buf, NUM_TABLES); + grumpkin::g1::element acc = grumpkin::g1::point_at_infinity; + for (const auto& gen : offset_generators) { + acc += gen; + } + return acc; +} +bool table::lookup_table_exists_for_point(const grumpkin::g1::affine_element& input) +{ + return (input == crypto::pedersen_hash::generator_info::get_lhs_generator() || + input == crypto::pedersen_hash::generator_info::get_rhs_generator()); +} + +std::optional> table::get_lookup_table_ids_for_point( + const grumpkin::g1::affine_element& input) +{ + if (input == crypto::pedersen_hash::generator_info::get_lhs_generator()) { + return { { FIXED_BASE_LEFT_LO, FIXED_BASE_LEFT_HI } }; + } + if (input == crypto::pedersen_hash::generator_info::get_lhs_generator()) { + return { { FIXED_BASE_RIGHT_LO, FIXED_BASE_RIGHT_HI } }; + } + return {}; +} + +std::optional table::get_generator_offset_for_table_id(const MultiTableId table_id) +{ + if (table_id == FIXED_BASE_LEFT_LO) { + return fixed_base_table_offset_generators[0]; + } + if (table_id == FIXED_BASE_LEFT_HI) { + return fixed_base_table_offset_generators[1]; + } + if (table_id == FIXED_BASE_RIGHT_LO) { + return fixed_base_table_offset_generators[2]; + } + if (table_id == FIXED_BASE_RIGHT_HI) { + return fixed_base_table_offset_generators[3]; + } + return {}; +} +template +BasicTable table::generate_basic_fixed_base_table(BasicTableId id, size_t basic_table_index, size_t table_index) +{ + static_assert(multitable_index < NUM_FIXED_BASE_MULTI_TABLES); + ASSERT(table_index < MAX_NUM_TABLES_IN_MULTITABLE); + + const size_t multitable_bits = get_num_bits_of_multi_table(multitable_index); + const size_t bits_covered_by_previous_tables_in_multitable = BITS_PER_TABLE * table_index; + const bool is_small_table = (multitable_bits - bits_covered_by_previous_tables_in_multitable) < BITS_PER_TABLE; + const size_t table_bits = + is_small_table ? multitable_bits - bits_covered_by_previous_tables_in_multitable : BITS_PER_TABLE; + const size_t table_size = 1ULL << table_bits; + BasicTable table; + table.id = id; + table.table_index = basic_table_index; + table.size = table_size; + table.use_twin_keys = false; + + const auto& basic_table = fixed_base_tables[multitable_index][table_index]; + + for (size_t i = 0; i < table.size; ++i) { + table.column_1.emplace_back(i); + table.column_2.emplace_back(basic_table[i].x); + table.column_3.emplace_back(basic_table[i].y); + } + table.get_values_from_key = nullptr; + + // this needs to be a compile-time loop so we can convert `table_index, multitable_index` into a template parameter. + // prevents us having to make `table_index` a template parameter of this method, which simplifies upstream code + barretenberg::constexpr_for<0, MAX_NUM_TABLES_IN_MULTITABLE, 1>([&]() { + if (i == table_index) { + table.get_values_from_key = &get_basic_fixed_base_table_values; + } + }); + ASSERT(table.get_values_from_key != nullptr); + table.column_1_step_size = table.size; + table.column_2_step_size = 0; + table.column_3_step_size = 0; + + return table; +} + +template MultiTable table::get_fixed_base_table(const MultiTableId id) +{ + constexpr size_t NUM_TABLES = get_num_tables_per_multi_table(); + + MultiTable table(MAX_TABLE_SIZE, 0, 0, NUM_TABLES); + + table.id = id; + table.get_table_values.resize(NUM_TABLES); + table.lookup_ids.resize(NUM_TABLES); + + barretenberg::constexpr_for<0, NUM_TABLES, 1>([&]() { + table.slice_sizes.emplace_back(MAX_TABLE_SIZE); + table.get_table_values[i] = &get_basic_fixed_base_table_values; + size_t idx = i; + if constexpr (multitable_index == 0) { + idx += static_cast(FIXED_BASE_0_0); + } else if constexpr (multitable_index == 1) { + idx += static_cast(FIXED_BASE_1_0); + } else if constexpr (multitable_index == 2) { + idx += static_cast(FIXED_BASE_2_0); + } else if constexpr (multitable_index == 3) { + idx += static_cast(FIXED_BASE_3_0); + } + static_assert(multitable_index < NUM_FIXED_BASE_MULTI_TABLES); + table.lookup_ids[i] = static_cast(idx); + }); + return table; +} + +template grumpkin::g1::affine_element table::generate_generator_offset( + const grumpkin::g1::affine_element& input); +template grumpkin::g1::affine_element table::generate_generator_offset( + const grumpkin::g1::affine_element& input); +template table::fixed_base_scalar_mul_tables table::generate_tables( + const table::affine_element& input); +template table::fixed_base_scalar_mul_tables table::generate_tables( + const table::affine_element& input); + +template BasicTable table::generate_basic_fixed_base_table<0>(BasicTableId, size_t, size_t); +template BasicTable table::generate_basic_fixed_base_table<1>(BasicTableId, size_t, size_t); +template BasicTable table::generate_basic_fixed_base_table<2>(BasicTableId, size_t, size_t); +template BasicTable table::generate_basic_fixed_base_table<3>(BasicTableId, size_t, size_t); +template MultiTable table::get_fixed_base_table<0, table::BITS_PER_LO_SCALAR>(MultiTableId); +template MultiTable table::get_fixed_base_table<1, table::BITS_PER_HI_SCALAR>(MultiTableId); +template MultiTable table::get_fixed_base_table<2, table::BITS_PER_LO_SCALAR>(MultiTableId); +template MultiTable table::get_fixed_base_table<3, table::BITS_PER_HI_SCALAR>(MultiTableId); + +} // namespace plookup::fixed_base \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.hpp new file mode 100644 index 00000000000..3a3c3031782 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.hpp @@ -0,0 +1,82 @@ +#pragma once + +#include "../types.hpp" +#include "./fixed_base_params.hpp" +#include "barretenberg/crypto/pedersen_hash/pedersen.hpp" +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" + +namespace plookup::fixed_base { + +class table : public FixedBaseParams { + public: + using affine_element = grumpkin::g1::affine_element; + using element = grumpkin::g1::element; + + using single_lookup_table = std::vector; + using fixed_base_scalar_mul_tables = std::vector; + using all_multi_tables = std::array; + + static inline single_lookup_table generate_single_lookup_table(const affine_element& base_point, + const affine_element& offset_generator); + template static fixed_base_scalar_mul_tables generate_tables(const affine_element& input); + + template static affine_element generate_generator_offset(const affine_element& input); + + static constexpr uint256_t MAX_LO_SCALAR = uint256_t(1) << BITS_PER_LO_SCALAR; + inline static const affine_element lhs_base_point_lo = crypto::pedersen_hash::generator_info::get_lhs_generator(); + inline static const affine_element lhs_base_point_hi = element(lhs_base_point_lo) * MAX_LO_SCALAR; + inline static const affine_element rhs_base_point_lo = crypto::pedersen_hash::generator_info::get_rhs_generator(); + inline static const affine_element rhs_base_point_hi = element(rhs_base_point_lo) * MAX_LO_SCALAR; + inline static const all_multi_tables fixed_base_tables = { + table::generate_tables(lhs_base_point_lo), + table::generate_tables(lhs_base_point_hi), + table::generate_tables(rhs_base_point_lo), + table::generate_tables(rhs_base_point_hi), + }; + + inline static const std::array + fixed_base_table_offset_generators = { + table::generate_generator_offset(lhs_base_point_lo), + table::generate_generator_offset(lhs_base_point_hi), + table::generate_generator_offset(rhs_base_point_lo), + table::generate_generator_offset(rhs_base_point_hi), + }; + + static bool lookup_table_exists_for_point(const affine_element& input); + static std::optional> get_lookup_table_ids_for_point(const affine_element& input); + static std::optional get_generator_offset_for_table_id(MultiTableId table_id); + + template + static BasicTable generate_basic_fixed_base_table(BasicTableId id, size_t basic_table_index, size_t table_index); + template static MultiTable get_fixed_base_table(MultiTableId id); + + template + static std::array get_basic_fixed_base_table_values(const std::array key) + { + static_assert(multitable_index < NUM_FIXED_BASE_MULTI_TABLES); + static_assert(table_index < get_num_bits_of_multi_table(multitable_index)); + const auto& basic_table = fixed_base_tables[multitable_index][table_index]; + const auto index = static_cast(key[0]); + return { basic_table[index].x, basic_table[index].y }; + } +}; + +extern template table::affine_element table::generate_generator_offset( + const table::affine_element&); +extern template table::affine_element table::generate_generator_offset( + const table::affine_element&); +extern template table::fixed_base_scalar_mul_tables table::generate_tables( + const table::affine_element&); +extern template table::fixed_base_scalar_mul_tables table::generate_tables( + const table::affine_element&); + +extern template BasicTable table::generate_basic_fixed_base_table<0>(BasicTableId, size_t, size_t); +extern template BasicTable table::generate_basic_fixed_base_table<1>(BasicTableId, size_t, size_t); +extern template BasicTable table::generate_basic_fixed_base_table<2>(BasicTableId, size_t, size_t); +extern template BasicTable table::generate_basic_fixed_base_table<3>(BasicTableId, size_t, size_t); +extern template MultiTable table::get_fixed_base_table<0, table::BITS_PER_LO_SCALAR>(MultiTableId); +extern template MultiTable table::get_fixed_base_table<1, table::BITS_PER_HI_SCALAR>(MultiTableId); +extern template MultiTable table::get_fixed_base_table<2, table::BITS_PER_LO_SCALAR>(MultiTableId); +extern template MultiTable table::get_fixed_base_table<3, table::BITS_PER_HI_SCALAR>(MultiTableId); + +} // namespace plookup::fixed_base \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp new file mode 100644 index 00000000000..f5ecfa32cde --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp @@ -0,0 +1,34 @@ +#pragma once + +#include +#include + +namespace plookup { +struct FixedBaseParams { + static constexpr size_t BITS_PER_TABLE = 9; + static constexpr size_t BITS_ON_CURVE = 254; + static constexpr size_t BITS_PER_LO_SCALAR = 128; + static constexpr size_t BITS_PER_HI_SCALAR = BITS_ON_CURVE - BITS_PER_LO_SCALAR; + static constexpr size_t MAX_TABLE_SIZE = (1UL) << BITS_PER_TABLE; + static constexpr size_t MAX_NUM_TABLES_IN_MULTITABLE = + (BITS_PER_LO_SCALAR / BITS_PER_TABLE) + (BITS_PER_LO_SCALAR % BITS_PER_TABLE == 0 ? 0 : 1); + static constexpr size_t NUM_FIXED_BASE_MULTI_TABLES = 4; + static constexpr size_t NUM_TABLES_PER_LO_MULTITABLE = + (BITS_PER_LO_SCALAR / BITS_PER_TABLE) + ((BITS_PER_LO_SCALAR % BITS_PER_TABLE == 0) ? 0 : 1); + static constexpr size_t NUM_TABLES_PER_HI_MULTITABLE = + (BITS_PER_LO_SCALAR / BITS_PER_TABLE) + ((BITS_PER_LO_SCALAR % BITS_PER_TABLE == 0) ? 0 : 1); + static constexpr size_t NUM_BASIC_TABLES_PER_BASE_POINT = + (NUM_TABLES_PER_LO_MULTITABLE + NUM_TABLES_PER_HI_MULTITABLE); + static constexpr size_t NUM_FIXED_BASE_BASIC_TABLES = NUM_BASIC_TABLES_PER_BASE_POINT * 2; + + template inline static constexpr size_t get_num_tables_per_multi_table() noexcept + { + return (num_bits / BITS_PER_TABLE) + ((num_bits % BITS_PER_TABLE == 0) ? 0 : 1); + } + static constexpr size_t get_num_bits_of_multi_table(const size_t multitable_index) + { + const bool is_lo_multi_table = (multitable_index & 1) == 0; + return is_lo_multi_table ? BITS_PER_LO_SCALAR : BITS_PER_HI_SCALAR; + } +}; +} // namespace plookup \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_chi.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_chi.hpp index 5db06fc8e78..396d4acbae0 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_chi.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_chi.hpp @@ -4,8 +4,7 @@ #include "barretenberg/common/constexpr_utils.hpp" #include "barretenberg/numeric/bitop/pow.hpp" -namespace plookup { -namespace keccak_tables { +namespace plookup::keccak_tables { /** * @brief Generates plookup tables required for CHI round of Keccak hash function @@ -59,7 +58,7 @@ namespace keccak_tables { class Chi { public: // 1 + 2a - b + c => a xor (~b & c) - static constexpr uint64_t CHI_NORMALIZATION_TABLE[5]{ + static constexpr std::array CHI_NORMALIZATION_TABLE{ 0, 0, 1, 1, 0, }; @@ -249,5 +248,4 @@ class Chi { return table; } }; -} // namespace keccak_tables -} // namespace plookup +} // namespace plookup::keccak_tables diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_input.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_input.hpp index ce161d24ebf..57ea1b06bd3 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_input.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_input.hpp @@ -5,8 +5,7 @@ #include "barretenberg/numeric/bitop/pow.hpp" #include "barretenberg/numeric/bitop/sparse_form.hpp" -namespace plookup { -namespace keccak_tables { +namespace plookup::keccak_tables { /** * @brief Generates plookup tables used convert 64-bit integers into a sparse representation used for Keccak hash @@ -65,9 +64,9 @@ class KeccakInput { for (uint64_t i = 0; i < table.size; ++i) { const uint64_t source = i; const auto target = numeric::map_into_sparse_form(source); - table.column_1.emplace_back(barretenberg::fr(source)); - table.column_2.emplace_back(barretenberg::fr(target)); - table.column_3.emplace_back(barretenberg::fr(source >> msb_shift)); + table.column_1.emplace_back(source); + table.column_2.emplace_back(target); + table.column_3.emplace_back(source >> msb_shift); } table.get_values_from_key = &get_keccak_input_values; @@ -140,5 +139,4 @@ class KeccakInput { } }; -} // namespace keccak_tables -} // namespace plookup +} // namespace plookup::keccak_tables diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_output.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_output.hpp index 3aaebfcdc35..8a5483ad489 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_output.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_output.hpp @@ -7,8 +7,7 @@ #include "../sparse.hpp" #include "../types.hpp" -namespace plookup { -namespace keccak_tables { +namespace plookup::keccak_tables { /** * @brief Converts a base-11 sparse integer representation into a regular base-2 binary integer. @@ -25,6 +24,9 @@ class KeccakOutput { static constexpr uint64_t EFFECTIVE_BASE = 2; static constexpr size_t TABLE_BITS = 8; + // We're doing some degenerate compile-time work with this C-array that can't be done with std::array, + // We pass it as a uint64_t* template parameter, no easy way to do that with std::array + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) static constexpr uint64_t OUTPUT_NORMALIZATION_TABLE[2]{ 0, 1 }; /** @@ -107,7 +109,7 @@ class KeccakOutput { table.get_values_from_key = &sparse_tables::get_sparse_normalization_values; table.column_1_step_size = barretenberg::fr(numeric::pow64(static_cast(BASE), TABLE_BITS)); - table.column_2_step_size = barretenberg::fr(((uint64_t)1 << TABLE_BITS)); + table.column_2_step_size = barretenberg::fr((static_cast(1) << TABLE_BITS)); table.column_3_step_size = 0; return table; } @@ -171,5 +173,4 @@ class KeccakOutput { } }; -} // namespace keccak_tables -} // namespace plookup +} // namespace plookup::keccak_tables diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_rho.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_rho.hpp index d29f4009b05..70d56a0dd66 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_rho.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_rho.hpp @@ -4,8 +4,7 @@ #include "barretenberg/common/constexpr_utils.hpp" #include "barretenberg/numeric/bitop/pow.hpp" -namespace plookup { -namespace keccak_tables { +namespace plookup::keccak_tables { /** * @brief Generate the plookup tables used for the RHO round of the Keccak hash algorithm @@ -71,7 +70,7 @@ template class Rho { 0, 1, 62, 28, 27, 36, 44, 6, 55, 20, 3, 10, 43, 25, 39, 41, 45, 15, 21, 8, 18, 2, 61, 56, 14, }; - static constexpr uint64_t RHO_NORMALIZATION_TABLE[3]{ + static constexpr std::array RHO_NORMALIZATION_TABLE{ 0, 1, 0, @@ -100,7 +99,7 @@ template class Rho { base_shift *= BASE; } - return { barretenberg::fr(accumulator), barretenberg::fr(accumulator / divisor) }; + return { barretenberg::fr(accumulator), barretenberg::fr{ accumulator / divisor } }; } /** @@ -241,9 +240,9 @@ template class Rho { MultiTable table; table.id = id; - table.column_1_step_sizes.push_back(1); - table.column_2_step_sizes.push_back(1); - table.column_3_step_sizes.push_back(1); + table.column_1_step_sizes.emplace_back(1); + table.column_2_step_sizes.emplace_back(1); + table.column_3_step_sizes.emplace_back(1); // generate table selector values for the 'right' slice barretenberg::constexpr_for<0, num_right_tables, 1>([&] { @@ -254,18 +253,18 @@ template class Rho { constexpr uint64_t scaled_base = numeric::pow64(BASE, bit_slice); if (i == num_right_tables - 1) { - table.column_1_step_sizes.push_back(scaled_base); - table.column_2_step_sizes.push_back(0); - table.column_3_step_sizes.push_back(0); + table.column_1_step_sizes.emplace_back(scaled_base); + table.column_2_step_sizes.emplace_back(0); + table.column_3_step_sizes.emplace_back(0); } else { - table.column_1_step_sizes.push_back(scaled_base); - table.column_2_step_sizes.push_back(scaled_base); - table.column_3_step_sizes.push_back(0); + table.column_1_step_sizes.emplace_back(scaled_base); + table.column_2_step_sizes.emplace_back(scaled_base); + table.column_3_step_sizes.emplace_back(0); } table.slice_sizes.push_back(scaled_base); table.get_table_values.emplace_back(&get_rho_renormalization_values); - table.lookup_ids.push_back((BasicTableId)((size_t)KECCAK_RHO_1 + (bit_slice - 1))); + table.lookup_ids.push_back(static_cast(static_cast(KECCAK_RHO_1) + (bit_slice - 1))); }); // generate table selector values for the 'left' slice @@ -278,19 +277,18 @@ template class Rho { constexpr uint64_t scaled_base = numeric::pow64(BASE, bit_slice); if (i != num_left_tables - 1) { - table.column_1_step_sizes.push_back(scaled_base); - table.column_2_step_sizes.push_back(scaled_base); - table.column_3_step_sizes.push_back(0); + table.column_1_step_sizes.emplace_back(scaled_base); + table.column_2_step_sizes.emplace_back(scaled_base); + table.column_3_step_sizes.emplace_back(0); } table.slice_sizes.push_back(scaled_base); table.get_table_values.emplace_back(&get_rho_renormalization_values); - table.lookup_ids.push_back((BasicTableId)((size_t)KECCAK_RHO_1 + (bit_slice - 1))); + table.lookup_ids.push_back(static_cast(static_cast(KECCAK_RHO_1) + (bit_slice - 1))); }); return table; } }; -} // namespace keccak_tables -} // namespace plookup +} // namespace plookup::keccak_tables diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_theta.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_theta.hpp index f7ed27f908a..e93c8bf726e 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_theta.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_theta.hpp @@ -4,8 +4,7 @@ #include "barretenberg/common/constexpr_utils.hpp" #include "barretenberg/numeric/bitop/pow.hpp" -namespace plookup { -namespace keccak_tables { +namespace plookup::keccak_tables { /** * @brief Generates plookup tables required for THETA round of Keccak hash function @@ -55,7 +54,7 @@ class Theta { static constexpr size_t TABLE_BITS = 4; static constexpr uint64_t BASE = 11; - static constexpr uint64_t THETA_NORMALIZATION_TABLE[11]{ + static constexpr std::array THETA_NORMALIZATION_TABLE{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, }; @@ -251,5 +250,4 @@ class Theta { return table; } }; -} // namespace keccak_tables -} // namespace plookup +} // namespace plookup::keccak_tables diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.cpp deleted file mode 100644 index 691848d9480..00000000000 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.cpp +++ /dev/null @@ -1,41 +0,0 @@ - -#include "./new_pedersen_experiment.hpp" - -namespace plookup::new_pedersen { - -bool table::lookup_table_exists_for_point(const grumpkin::g1::affine_element& input) -{ - return (input == crypto::pedersen_hash::generator_info::get_lhs_generator() || - input == crypto::pedersen_hash::generator_info::get_rhs_generator()); -} - -std::optional> table::get_lookup_table_ids_for_point( - const grumpkin::g1::affine_element& input) -{ - if (input == crypto::pedersen_hash::generator_info::get_lhs_generator()) { - return { { NEW_PEDERSEN_LEFT_LO, NEW_PEDERSEN_LEFT_HI } }; - } - if (input == crypto::pedersen_hash::generator_info::get_lhs_generator()) { - return { { NEW_PEDERSEN_RIGHT_LO, NEW_PEDERSEN_RIGHT_HI } }; - } - return {}; -} - -std::optional table::get_generator_offset_for_table_id(const MultiTableId table_id) -{ - if (table_id == NEW_PEDERSEN_LEFT_LO) { - return pedersen_table_offset_generators[0]; - } - if (table_id == NEW_PEDERSEN_LEFT_HI) { - return pedersen_table_offset_generators[1]; - } - if (table_id == NEW_PEDERSEN_RIGHT_LO) { - return pedersen_table_offset_generators[2]; - } - if (table_id == NEW_PEDERSEN_RIGHT_HI) { - return pedersen_table_offset_generators[3]; - } - return {}; -} - -} // namespace plookup::new_pedersen \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.hpp deleted file mode 100644 index 23e9dd700f5..00000000000 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/new_pedersen_experiment.hpp +++ /dev/null @@ -1,218 +0,0 @@ -#pragma once - -#include "./types.hpp" - -#include "barretenberg/common/constexpr_utils.hpp" -#include "barretenberg/crypto/pedersen_hash/pedersen.hpp" -#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" -#include "barretenberg/numeric/bitop/pow.hpp" -#include "barretenberg/numeric/bitop/rotate.hpp" -#include "barretenberg/numeric/bitop/sparse_form.hpp" - -namespace plookup::new_pedersen { - -class table { - public: - // static constexpr size_t BITS_PER_HASH = 512; - static constexpr size_t BITS_PER_TABLE = 9; - // static constexpr size_t BITS_OF_BETA = 192; - static constexpr size_t BITS_ON_CURVE = 254; - static constexpr size_t BITS_PER_LAST_TABLE = 2; - static constexpr size_t PEDERSEN_TABLE_SIZE = (1UL) << BITS_PER_TABLE; - static constexpr size_t PEDERSEN_SMALL_TABLE_SIZE = (1UL) << BITS_PER_LAST_TABLE; - static constexpr size_t NUM_PEDERSEN_TABLES = - (BITS_ON_CURVE / BITS_PER_TABLE) + (BITS_ON_CURVE % BITS_PER_TABLE == 0 ? 0 : 1); - static constexpr size_t NUM_PEDERSEN_POINTS = 2; - static constexpr size_t NUM_PEDERSEN_MULTI_TABLES = 4; - using affine_element = grumpkin::g1::affine_element; - using element = grumpkin::g1::element; - - using single_lookup_table = std::vector; - using fixed_base_scalar_mul_tables = std::vector; - using all_multi_tables = std::array; - - static single_lookup_table generate_single_lookup_table(const affine_element& base_point, - const affine_element& offset_generator) - { - std::vector table_raw(PEDERSEN_TABLE_SIZE); - - element accumulator = offset_generator; - for (size_t i = 0; i < PEDERSEN_TABLE_SIZE; ++i) { - table_raw[i] = accumulator; - accumulator += base_point; - } - element::batch_normalize(&table_raw[0], PEDERSEN_TABLE_SIZE); - single_lookup_table table(PEDERSEN_TABLE_SIZE); - for (size_t i = 0; i < table_raw.size(); ++i) { - if (i < 16) { - std::cout << "IUWAHRGIAWERUGH " << table_raw[i].x << std::endl; - } - table[i] = affine_element{ table_raw[i].x, table_raw[i].y }; - } - return table; - } - - template - static fixed_base_scalar_mul_tables generate_tables(const grumpkin::g1::affine_element& input) - { - constexpr size_t NUM_TABLES = (num_bits / BITS_PER_TABLE) + ((num_bits % BITS_PER_TABLE) ? 1 : 0); - - fixed_base_scalar_mul_tables result; - result.resize(NUM_TABLES); - - std::vector input_buf; - serialize::write(input_buf, input); - const auto offset_generators = grumpkin::g1::derive_generators_secure(input_buf, PEDERSEN_TABLE_SIZE); - - grumpkin::g1::element accumulator = input; - for (size_t i = 0; i < NUM_TABLES; ++i) { - result.emplace_back(generate_single_lookup_table(accumulator, offset_generators[i])); - for (size_t j = 0; j < BITS_PER_TABLE; ++j) { - accumulator = accumulator.dbl(); - } - } - return result; - } - - template - static grumpkin::g1::affine_element generate_generator_offset(const grumpkin::g1::affine_element& input) - { - constexpr size_t NUM_TABLES = (num_table_bits / BITS_PER_TABLE) + ((num_table_bits % BITS_PER_TABLE) ? 1 : 0); - - std::vector input_buf; - serialize::write(input_buf, input); - const auto offset_generators = grumpkin::g1::derive_generators_secure(input_buf, NUM_TABLES); - grumpkin::g1::element acc = grumpkin::g1::point_at_infinity; - for (const auto& gen : offset_generators) { - acc += gen; - } - return acc; - } - - inline static const all_multi_tables pedersen_tables = { - table::generate_tables<128>(crypto::pedersen_hash::generator_info::get_lhs_generator()), - table::generate_tables<126>(grumpkin::g1::element(crypto::pedersen_hash::generator_info::get_lhs_generator()) * - (uint256_t(1) << 128)), - table::generate_tables<128>(crypto::pedersen_hash::generator_info::get_rhs_generator()), - table::generate_tables<126>(grumpkin::g1::element(crypto::pedersen_hash::generator_info::get_rhs_generator()) * - (uint256_t(1) << 128)), - }; - - inline static const std::array - pedersen_table_offset_generators = { - table::generate_generator_offset<128>(crypto::pedersen_hash::generator_info::get_lhs_generator()), - table::generate_generator_offset<126>( - grumpkin::g1::element(crypto::pedersen_hash::generator_info::get_lhs_generator()) * - (uint256_t(1) << 128)), - table::generate_generator_offset<128>(crypto::pedersen_hash::generator_info::get_rhs_generator()), - table::generate_generator_offset<126>( - grumpkin::g1::element(crypto::pedersen_hash::generator_info::get_rhs_generator()) * - (uint256_t(1) << 128)), - }; - template - inline static std::array get_basic_pedersen_table_values(const std::array key) - { - static_assert(multitable_index < NUM_PEDERSEN_MULTI_TABLES); - static_assert(table_index < NUM_PEDERSEN_TABLES); - const auto& basic_table = pedersen_tables[multitable_index][table_index]; - - // const auto& basic_table = pedersen_tables[generator_index][table_index]; - const auto index = static_cast(key[0]); - std::cout << "get basic table values. index = " << index << " x = " << basic_table[index].x << std::endl; - return { basic_table[index].x, basic_table[index].y }; - } - - template - static inline BasicTable generate_basic_pedersen_table(BasicTableId id, - size_t basic_table_index, - size_t table_index) - { - ASSERT(multitable_index < NUM_PEDERSEN_MULTI_TABLES); - ASSERT(table_index < NUM_PEDERSEN_TABLES); - - BasicTable table; - table.id = id; - table.table_index = basic_table_index; - table.size = PEDERSEN_TABLE_SIZE; - table.use_twin_keys = false; - - const auto& basic_table = pedersen_tables[multitable_index][table_index]; - // table::generate_tables( - // crypto::pedersen_hash::generator_info::get_lhs_generator())[table_index]; - - // const auto& basic_table = pedersen_tables[generator_index][table_index]; - - for (size_t i = 0; i < table.size; ++i) { - table.column_1.emplace_back(i); - table.column_2.emplace_back(basic_table[i].x); - table.column_3.emplace_back(basic_table[i].y); - } - table.get_values_from_key = nullptr; - barretenberg::constexpr_for<0, NUM_PEDERSEN_TABLES, 1>([&]() { - if (i == table_index) { - table.get_values_from_key = &get_basic_pedersen_table_values; - } - }); - ASSERT(table.get_values_from_key != nullptr); - table.column_1_step_size = table.size; - table.column_2_step_size = 0; - table.column_3_step_size = 0; - - return table; - } - - template - static inline MultiTable get_pedersen_table(const MultiTableId id = NEW_PEDERSEN_LEFT_LO) - { - constexpr size_t NUM_TABLES = (num_bits / BITS_PER_TABLE) + ((num_bits % BITS_PER_TABLE) ? 1 : 0); - - std::cout << "get pedersen table mtidx = " << multitable_index << " num bits " << num_bits << std::endl; - - // todo. split explicitly into 126 / 128 bit chunks. - // Construct 126 bit chunk out of 14 9-bit tables - // Construct 128 bit chunk out of 12 9-bit tables and 2 10-bit tables - MultiTable table(PEDERSEN_TABLE_SIZE, 0, 0, NUM_TABLES); - - std::cout << "q0" << std::endl; - std::cout << "NUM TABLES = " << NUM_TABLES << std::endl; - table.id = id; - for (size_t i = 0; i < NUM_TABLES; ++i) { - std::cout << "beep" << std::endl; - table.slice_sizes.emplace_back(PEDERSEN_TABLE_SIZE); - } - std::cout << "q1" << std::endl; - - table.get_table_values.resize(NUM_TABLES); - table.lookup_ids.resize(NUM_TABLES); - - std::cout << "q2" << std::endl; - - barretenberg::constexpr_for<0, NUM_TABLES, 1>([&]() { - table.get_table_values[i] = &get_basic_pedersen_table_values; - size_t idx = i; - if (multitable_index == 0) { - idx += static_cast(PEDERSEN_0_0); - } else if (multitable_index == 1) { - idx += static_cast(PEDERSEN_1_0); - } else if (multitable_index == 2) { - idx += static_cast(PEDERSEN_2_0); - } else { - idx += static_cast(PEDERSEN_3_0); - } - std::cout << "q3" << std::endl; - table.lookup_ids[i] = static_cast(idx); - std::cout << "q4" << std::endl; - }); - std::cout << "q5" << std::endl; - std::cout << "RETURNING TABLE W. SLICE SIZES SIZE = " << table.slice_sizes.size() << std::endl; - return table; - } - - static bool lookup_table_exists_for_point(const grumpkin::g1::affine_element& input); - static std::optional> get_lookup_table_ids_for_point( - const grumpkin::g1::affine_element& input); - - static std::optional get_generator_offset_for_table_id(MultiTableId table_id); -}; - -} // namespace plookup::new_pedersen \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.cpp index 5a70295deaa..cb105b80dc3 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.cpp @@ -1,7 +1,6 @@ #include "non_native_group_generator.hpp" -namespace plookup { -namespace ecc_generator_tables { +namespace plookup::ecc_generator_tables { /** * Init 8-bit generator lookup tables @@ -31,9 +30,9 @@ template void ecc_generator_table::init_generator_tables() auto beta = G1::Fq::cube_root_of_unity(); for (size_t i = 0; i < 256; ++i) { - uint256_t endo_x = static_cast(point_table[i].x * beta); - uint256_t x = static_cast(point_table[i].x); - uint256_t y = static_cast(point_table[i].y); + auto endo_x = static_cast(point_table[i].x * beta); + auto x = static_cast(point_table[i].x); + auto y = static_cast(point_table[i].y); const uint256_t SHIFT = uint256_t(1) << 68; const uint256_t MASK = SHIFT - 1; @@ -60,18 +59,16 @@ template void ecc_generator_table::init_generator_tables() uint256_t y2 = y & MASK; y = y >> 68; uint256_t y3 = y & MASK; - ecc_generator_table::generator_xlo_table[i] = std::make_pair(x0, x1); - ecc_generator_table::generator_xhi_table[i] = std::make_pair(x2, x3); - ecc_generator_table::generator_endo_xlo_table[i] = - std::make_pair(endox0, endox1); - ecc_generator_table::generator_endo_xhi_table[i] = - std::make_pair(endox2, endox3); - ecc_generator_table::generator_ylo_table[i] = std::make_pair(y0, y1); - ecc_generator_table::generator_yhi_table[i] = std::make_pair(y2, y3); - ecc_generator_table::generator_xyprime_table[i] = std::make_pair( - barretenberg::fr(uint256_t(point_table[i].x)), barretenberg::fr(uint256_t(point_table[i].y))); - ecc_generator_table::generator_endo_xyprime_table[i] = std::make_pair( - barretenberg::fr(uint256_t(point_table[i].x * beta)), barretenberg::fr(uint256_t(point_table[i].y))); + ecc_generator_table::generator_xlo_table[i] = std::make_pair(x0, x1); + ecc_generator_table::generator_xhi_table[i] = std::make_pair(x2, x3); + ecc_generator_table::generator_endo_xlo_table[i] = std::make_pair(endox0, endox1); + ecc_generator_table::generator_endo_xhi_table[i] = std::make_pair(endox2, endox3); + ecc_generator_table::generator_ylo_table[i] = std::make_pair(y0, y1); + ecc_generator_table::generator_yhi_table[i] = std::make_pair(y2, y3); + ecc_generator_table::generator_xyprime_table[i] = std::make_pair( + barretenberg::fr{ uint256_t(point_table[i].x) }, barretenberg::fr{ uint256_t(point_table[i].y) }); + ecc_generator_table::generator_endo_xyprime_table[i] = std::make_pair( + barretenberg::fr{ uint256_t(point_table[i].x * beta) }, barretenberg::fr{ uint256_t(point_table[i].y) }); } init = true; } @@ -95,7 +92,7 @@ template std::array ecc_generator_table::get_xlo_values(const std::array key) { init_generator_tables(); - const size_t index = static_cast(key[0]); + const auto index = static_cast(key[0]); return { ecc_generator_table::generator_xlo_table[index].first, ecc_generator_table::generator_xlo_table[index].second }; } @@ -107,7 +104,7 @@ template std::array ecc_generator_table::get_xhi_values(const std::array key) { init_generator_tables(); - const size_t index = static_cast(key[0]); + const auto index = static_cast(key[0]); return { ecc_generator_table::generator_xhi_table[index].first, ecc_generator_table::generator_xhi_table[index].second }; } @@ -119,7 +116,7 @@ template std::array ecc_generator_table::get_xlo_endo_values(const std::array key) { init_generator_tables(); - const size_t index = static_cast(key[0]); + const auto index = static_cast(key[0]); return { ecc_generator_table::generator_endo_xlo_table[index].first, ecc_generator_table::generator_endo_xlo_table[index].second }; } @@ -131,7 +128,7 @@ template std::array ecc_generator_table::get_xhi_endo_values(const std::array key) { init_generator_tables(); - const size_t index = static_cast(key[0]); + const auto index = static_cast(key[0]); return { ecc_generator_table::generator_endo_xhi_table[index].first, ecc_generator_table::generator_endo_xhi_table[index].second }; } @@ -143,7 +140,7 @@ template std::array ecc_generator_table::get_ylo_values(const std::array key) { init_generator_tables(); - const size_t index = static_cast(key[0]); + const auto index = static_cast(key[0]); return { ecc_generator_table::generator_ylo_table[index].first, ecc_generator_table::generator_ylo_table[index].second }; } @@ -155,7 +152,7 @@ template std::array ecc_generator_table::get_yhi_values(const std::array key) { init_generator_tables(); - const size_t index = static_cast(key[0]); + const auto index = static_cast(key[0]); return { ecc_generator_table::generator_yhi_table[index].first, ecc_generator_table::generator_yhi_table[index].second }; } @@ -167,7 +164,7 @@ template std::array ecc_generator_table::get_xyprime_values(const std::array key) { init_generator_tables(); - const size_t index = static_cast(key[0]); + const auto index = static_cast(key[0]); return { ecc_generator_table::generator_xyprime_table[index].first, ecc_generator_table::generator_xyprime_table[index].second }; } @@ -179,7 +176,7 @@ template std::array ecc_generator_table::get_xyprime_endo_values(const std::array key) { init_generator_tables(); - const size_t index = static_cast(key[0]); + const auto index = static_cast(key[0]); return { ecc_generator_table::generator_endo_xyprime_table[index].first, ecc_generator_table::generator_endo_xyprime_table[index].second }; } @@ -494,5 +491,4 @@ MultiTable ecc_generator_table::get_xyprime_endo_table(const MultiTableId id template class ecc_generator_table; template class ecc_generator_table; -} // namespace ecc_generator_tables -} // namespace plookup \ No newline at end of file +} // namespace plookup::ecc_generator_tables \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.hpp index b579d770264..79eed4ef66f 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.hpp @@ -6,16 +6,17 @@ #include "barretenberg/ecc/curves/secp256k1/secp256k1.hpp" #include -namespace plookup { -namespace ecc_generator_tables { +namespace plookup::ecc_generator_tables { template class ecc_generator_table { public: - typedef typename G1::element element; + using element = typename G1::element; /** * Store arrays of precomputed 8-bit lookup tables for generator point coordinates (and their endomorphism *equivalents) **/ + // TODO(@zac-williamson) convert these into static const class members + // NOLINTBEGIN(cppcoreguidelines-avoid-non-const-global-variables) inline static std::array, 256> generator_endo_xlo_table; inline static std::array, 256> generator_endo_xhi_table; inline static std::array, 256> generator_xlo_table; @@ -25,39 +26,39 @@ template class ecc_generator_table { inline static std::array, 256> generator_xyprime_table; inline static std::array, 256> generator_endo_xyprime_table; inline static bool init = false; + // NOLINTEND(cppcoreguidelines-avoid-non-const-global-variables) static void init_generator_tables(); - static size_t convert_position_to_shifted_naf(const size_t position); - static size_t convert_shifted_naf_to_position(const size_t shifted_naf); - static std::array get_xlo_endo_values(const std::array key); - static std::array get_xhi_endo_values(const std::array key); - static std::array get_xlo_values(const std::array key); - static std::array get_xhi_values(const std::array key); - static std::array get_ylo_values(const std::array key); - static std::array get_yhi_values(const std::array key); - static std::array get_xyprime_values(const std::array key); - static std::array get_xyprime_endo_values(const std::array key); - static BasicTable generate_xlo_table(BasicTableId id, const size_t table_index); - static BasicTable generate_xhi_table(BasicTableId id, const size_t table_index); - static BasicTable generate_xlo_endo_table(BasicTableId id, const size_t table_index); - static BasicTable generate_xhi_endo_table(BasicTableId id, const size_t table_index); - static BasicTable generate_ylo_table(BasicTableId id, const size_t table_index); - static BasicTable generate_yhi_table(BasicTableId id, const size_t table_index); - static BasicTable generate_xyprime_table(BasicTableId id, const size_t table_index); - static BasicTable generate_xyprime_endo_table(BasicTableId id, const size_t table_index); - static MultiTable get_xlo_table(const MultiTableId id, const BasicTableId basic_id); - static MultiTable get_xhi_table(const MultiTableId id, const BasicTableId basic_id); - static MultiTable get_xlo_endo_table(const MultiTableId id, const BasicTableId basic_id); - static MultiTable get_xhi_endo_table(const MultiTableId id, const BasicTableId basic_id); - static MultiTable get_ylo_table(const MultiTableId id, const BasicTableId basic_id); - static MultiTable get_yhi_table(const MultiTableId id, const BasicTableId basic_id); - static MultiTable get_xyprime_table(const MultiTableId id, const BasicTableId basic_id); - static MultiTable get_xyprime_endo_table(const MultiTableId id, const BasicTableId basic_id); + static size_t convert_position_to_shifted_naf(size_t position); + static size_t convert_shifted_naf_to_position(size_t shifted_naf); + static std::array get_xlo_endo_values(std::array key); + static std::array get_xhi_endo_values(std::array key); + static std::array get_xlo_values(std::array key); + static std::array get_xhi_values(std::array key); + static std::array get_ylo_values(std::array key); + static std::array get_yhi_values(std::array key); + static std::array get_xyprime_values(std::array key); + static std::array get_xyprime_endo_values(std::array key); + static BasicTable generate_xlo_table(BasicTableId id, size_t table_index); + static BasicTable generate_xhi_table(BasicTableId id, size_t table_index); + static BasicTable generate_xlo_endo_table(BasicTableId id, size_t table_index); + static BasicTable generate_xhi_endo_table(BasicTableId id, size_t table_index); + static BasicTable generate_ylo_table(BasicTableId id, size_t table_index); + static BasicTable generate_yhi_table(BasicTableId id, size_t table_index); + static BasicTable generate_xyprime_table(BasicTableId id, size_t table_index); + static BasicTable generate_xyprime_endo_table(BasicTableId id, size_t table_index); + static MultiTable get_xlo_table(MultiTableId id, BasicTableId basic_id); + static MultiTable get_xhi_table(MultiTableId id, BasicTableId basic_id); + static MultiTable get_xlo_endo_table(MultiTableId id, BasicTableId basic_id); + static MultiTable get_xhi_endo_table(MultiTableId id, BasicTableId basic_id); + static MultiTable get_ylo_table(MultiTableId id, BasicTableId basic_id); + static MultiTable get_yhi_table(MultiTableId id, BasicTableId basic_id); + static MultiTable get_xyprime_table(MultiTableId id, BasicTableId basic_id); + static MultiTable get_xyprime_endo_table(MultiTableId id, BasicTableId basic_id); }; extern template class ecc_generator_table; extern template class ecc_generator_table; -} // namespace ecc_generator_tables -} // namespace plookup +} // namespace plookup::ecc_generator_tables diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.cpp index d29a93a5848..9bd534982b6 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.cpp @@ -6,8 +6,11 @@ namespace plookup { using namespace barretenberg; namespace { -static std::array MULTI_TABLES; -static bool inited = false; +// TODO(@zac-williamson) convert these into static const members of a struct +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +std::array MULTI_TABLES; +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +bool inited = false; void init_multi_tables() { @@ -91,14 +94,14 @@ void init_multi_tables() keccak_tables::Chi::get_chi_output_table(MultiTableId::KECCAK_CHI_OUTPUT); MULTI_TABLES[MultiTableId::KECCAK_FORMAT_OUTPUT] = keccak_tables::KeccakOutput::get_keccak_output_table(MultiTableId::KECCAK_FORMAT_OUTPUT); - MULTI_TABLES[MultiTableId::NEW_PEDERSEN_LEFT_LO] = - new_pedersen::table::get_pedersen_table<0, 128>(MultiTableId::NEW_PEDERSEN_LEFT_LO); - MULTI_TABLES[MultiTableId::NEW_PEDERSEN_LEFT_HI] = - new_pedersen::table::get_pedersen_table<1, 126>(MultiTableId::NEW_PEDERSEN_LEFT_LO); - MULTI_TABLES[MultiTableId::NEW_PEDERSEN_RIGHT_LO] = - new_pedersen::table::get_pedersen_table<2, 128>(MultiTableId::NEW_PEDERSEN_RIGHT_LO); - MULTI_TABLES[MultiTableId::NEW_PEDERSEN_RIGHT_HI] = - new_pedersen::table::get_pedersen_table<3, 126>(MultiTableId::NEW_PEDERSEN_RIGHT_HI); + MULTI_TABLES[MultiTableId::FIXED_BASE_LEFT_LO] = + fixed_base::table::get_fixed_base_table<0, 128>(MultiTableId::FIXED_BASE_LEFT_LO); + MULTI_TABLES[MultiTableId::FIXED_BASE_LEFT_HI] = + fixed_base::table::get_fixed_base_table<1, 126>(MultiTableId::FIXED_BASE_LEFT_HI); + MULTI_TABLES[MultiTableId::FIXED_BASE_RIGHT_LO] = + fixed_base::table::get_fixed_base_table<2, 128>(MultiTableId::FIXED_BASE_RIGHT_LO); + MULTI_TABLES[MultiTableId::FIXED_BASE_RIGHT_HI] = + fixed_base::table::get_fixed_base_table<3, 126>(MultiTableId::FIXED_BASE_RIGHT_HI); barretenberg::constexpr_for<0, 25, 1>([&]() { MULTI_TABLES[static_cast(MultiTableId::KECCAK_NORMALIZE_AND_ROTATE) + i] = @@ -123,27 +126,16 @@ ReadData get_lookup_accumulators(const MultiTableId id, const bool is_2_to_1_lookup) { // return multi-table, populating global array of all multi-tables if need be - std::cout << "z0" << std::endl; const auto& multi_table = create_table(id); - std::cout << "z1" << std::endl; const size_t num_lookups = multi_table.lookup_ids.size(); - std::cout << "z2" << std::endl; ReadData lookup; - std::cout << "z3" << std::endl; - std::cout << "slice sizes = " << multi_table.slice_sizes[0] << std::endl; - std::cout << "key a = " << uint256_t(key_a) << std::endl; - std::cout << "key a msb = " << uint256_t(key_a).get_msb() << std::endl; - std::cout << "num slices = " << multi_table.slice_sizes.size() << std::endl; - std::cout << "multitableid = " << id << std::endl; const auto key_a_slices = numeric::slice_input_using_variable_bases(key_a, multi_table.slice_sizes); const auto key_b_slices = numeric::slice_input_using_variable_bases(key_b, multi_table.slice_sizes); - std::cout << "z4" << std::endl; std::vector column_1_raw_values; std::vector column_2_raw_values; std::vector column_3_raw_values; - std::cout << "z5" << std::endl; for (size_t i = 0; i < num_lookups; ++i) { // get i-th table query function and then submit query @@ -157,12 +149,10 @@ ReadData get_lookup_accumulators(const MultiTableId id, const BasicTable::KeyEntry key_entry{ { key_a_slices[i], key_b_slices[i] }, values }; lookup.key_entries.emplace_back(key_entry); } - std::cout << "z6" << std::endl; lookup[ColumnIdx::C1].resize(num_lookups); lookup[ColumnIdx::C2].resize(num_lookups); lookup[ColumnIdx::C3].resize(num_lookups); - std::cout << "z7" << std::endl; /** * A multi-table consists of multiple basic tables (say L = 6). diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.hpp index d2707db51c4..145959017e3 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.hpp @@ -1,7 +1,7 @@ #pragma once #include "barretenberg/common/throw_or_abort.hpp" -#include "./new_pedersen_experiment.hpp" +#include "./fixed_base/fixed_base.hpp" #include "aes128.hpp" #include "blake2s.hpp" #include "dummy.hpp" @@ -28,27 +28,23 @@ ReadData get_lookup_accumulators(MultiTableId id, inline BasicTable create_basic_table(const BasicTableId id, const size_t index) { - // TODO(@zac-williamson) improve + // we have >50 basic fixed base tables so we match with some logic instead of a switch statement auto id_var = static_cast(id); - if (id_var >= static_cast(PEDERSEN_0_0) && - id_var < static_cast(PEDERSEN_0_0) + NUM_PEDERSEN_TABLES_LO) { - return new_pedersen::table::generate_basic_pedersen_table<0>( - id, index, id_var - static_cast(PEDERSEN_0_0)); - } - if (id_var >= static_cast(PEDERSEN_1_0) && - id_var < static_cast(PEDERSEN_1_0) + NUM_PEDERSEN_TABLES_HI) { - return new_pedersen::table::generate_basic_pedersen_table<1>( - id, index, id_var - static_cast(PEDERSEN_1_0)); - } - if (id_var >= static_cast(PEDERSEN_2_0) && - id_var < static_cast(PEDERSEN_2_0) + NUM_PEDERSEN_TABLES_LO) { - return new_pedersen::table::generate_basic_pedersen_table<2>( - id, index, id_var - static_cast(PEDERSEN_2_0)); - } - if (id_var >= static_cast(PEDERSEN_3_0) && - id_var < static_cast(PEDERSEN_3_0) + NUM_PEDERSEN_TABLES_HI) { - return new_pedersen::table::generate_basic_pedersen_table<3>( - id, index, id_var - static_cast(PEDERSEN_3_0)); + if (id_var >= static_cast(FIXED_BASE_0_0) && id_var < static_cast(FIXED_BASE_1_0)) { + return fixed_base::table::generate_basic_fixed_base_table<0>( + id, index, id_var - static_cast(FIXED_BASE_0_0)); + } + if (id_var >= static_cast(FIXED_BASE_1_0) && id_var < static_cast(FIXED_BASE_2_0)) { + return fixed_base::table::generate_basic_fixed_base_table<1>( + id, index, id_var - static_cast(FIXED_BASE_1_0)); + } + if (id_var >= static_cast(FIXED_BASE_2_0) && id_var < static_cast(FIXED_BASE_3_0)) { + return fixed_base::table::generate_basic_fixed_base_table<2>( + id, index, id_var - static_cast(FIXED_BASE_2_0)); + } + if (id_var >= static_cast(FIXED_BASE_3_0) && id_var < static_cast(PEDERSEN_29_SMALL)) { + return fixed_base::table::generate_basic_fixed_base_table<3>( + id, index, id_var - static_cast(FIXED_BASE_3_0)); } switch (id) { case AES_SPARSE_MAP: { diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sha256.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sha256.hpp index b738571db15..33e3d9c57fc 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sha256.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sha256.hpp @@ -8,9 +8,11 @@ #include "sparse.hpp" #include "types.hpp" -namespace plookup { -namespace sha256_tables { +namespace plookup::sha256_tables { +// We're doing some degenerate compile-time work with this C-array that can't be done with std::array, +// We pass it as a uint64_t* template parameter, no easy way to do that with std::array +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) static constexpr uint64_t choose_normalization_table[28]{ /* xor result = 0 */ 0, // e + 2f + 3g = 0 => e = 0, f = 0, g = 0 => t = 0 @@ -46,6 +48,9 @@ static constexpr uint64_t choose_normalization_table[28]{ 2, // e + 2f + 3g = 6 => e = 1, f = 1, g = 1 => t = 1 }; +// We're doing some degenerate compile-time work with this C-array that can't be done with std::array, +// We pass it as a uint64_t* template parameter, no easy way to do that with std::array +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) static constexpr uint64_t majority_normalization_table[16]{ /* xor result = 0 */ 0, // a + b + c = 0 => (a & b) ^ (a & c) ^ (b & c) = 0 @@ -69,6 +74,9 @@ static constexpr uint64_t majority_normalization_table[16]{ 2, }; +// We're doing some degenerate compile-time work with this C-array that can't be done with std::array, +// We pass it as a uint64_t* template parameter, no easy way to do that with std::array +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) static constexpr uint64_t witness_extension_normalization_table[16]{ /* xor result = 0 */ 0, @@ -423,5 +431,4 @@ inline MultiTable get_majority_input_table(const MultiTableId id = SHA256_MAJ_IN return table; } -} // namespace sha256_tables -} // namespace plookup +} // namespace plookup::sha256_tables diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sparse.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sparse.hpp index 006f9a3c7b0..6a9cf3a12a9 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sparse.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sparse.hpp @@ -7,8 +7,7 @@ #include "barretenberg/numeric/bitop/rotate.hpp" #include "barretenberg/numeric/bitop/sparse_form.hpp" -namespace plookup { -namespace sparse_tables { +namespace plookup::sparse_tables { template inline std::array get_sparse_table_with_rotation_values(const std::array key) @@ -16,7 +15,7 @@ inline std::array get_sparse_table_with_rotation_values(con const auto t0 = numeric::map_into_sparse_form(key[0]); barretenberg::fr t1; if constexpr (num_rotated_bits > 0) { - t1 = numeric::map_into_sparse_form(numeric::rotate32((uint32_t)key[0], num_rotated_bits)); + t1 = numeric::map_into_sparse_form(numeric::rotate32(static_cast(key[0]), num_rotated_bits)); } else { t1 = t0; } @@ -35,12 +34,12 @@ inline BasicTable generate_sparse_table_with_rotation(BasicTableId id, const siz for (uint64_t i = 0; i < table.size; ++i) { const uint64_t source = i; const auto target = numeric::map_into_sparse_form(source); - table.column_1.emplace_back(barretenberg::fr(source)); + table.column_1.emplace_back(source); table.column_2.emplace_back(barretenberg::fr(target)); if constexpr (num_rotated_bits > 0) { const auto rotated = - numeric::map_into_sparse_form(numeric::rotate32((uint32_t)source, num_rotated_bits)); + numeric::map_into_sparse_form(numeric::rotate32(static_cast(source), num_rotated_bits)); table.column_3.emplace_back(barretenberg::fr(rotated)); } else { table.column_3.emplace_back(barretenberg::fr(target)); @@ -98,22 +97,21 @@ inline BasicTable generate_sparse_normalization_table(BasicTableId id, const siz const auto& limbs = accumulator.get_limbs(); uint64_t key = 0; for (size_t j = 0; j < num_bits; ++j) { - const size_t table_idx = static_cast(limbs[j]); + const auto table_idx = static_cast(limbs[j]); key += ((base_table[table_idx]) << static_cast(j)); } table.column_1.emplace_back(accumulator.get_sparse_value()); table.column_2.emplace_back(key); - table.column_3.emplace_back(barretenberg::fr(0)); + table.column_3.emplace_back(0); accumulator += to_add; } table.get_values_from_key = &get_sparse_normalization_values; table.column_1_step_size = barretenberg::fr(table.size); - table.column_2_step_size = barretenberg::fr(((uint64_t)1 << num_bits)); + table.column_2_step_size = barretenberg::fr{ (static_cast(1) << num_bits) }; table.column_3_step_size = barretenberg::fr(0); return table; } -} // namespace sparse_tables -} // namespace plookup +} // namespace plookup::sparse_tables diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp index 1d1ca4a88e5..d17aa27b810 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp @@ -1,14 +1,13 @@ #pragma once #include +#include #include -// #include "./new_pedersen_experiment.hpp" +#include "./fixed_base/fixed_base_params.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" namespace plookup { -static constexpr size_t NUM_PEDERSEN_TABLES_LO = 15; // FIXFIXFIX -static constexpr size_t NUM_PEDERSEN_TABLES_HI = 14; // FIXFIXFIX -static constexpr size_t NUM_PEDERSEN_TABLES = 29; // FIXFIXFIX + enum BasicTableId { XOR, AND, @@ -54,11 +53,11 @@ enum BasicTableId { BLAKE_XOR_ROTATE1, BLAKE_XOR_ROTATE2, BLAKE_XOR_ROTATE4, - PEDERSEN_0_0, - PEDERSEN_1_0 = PEDERSEN_0_0 + NUM_PEDERSEN_TABLES_LO, - PEDERSEN_2_0 = PEDERSEN_1_0 + NUM_PEDERSEN_TABLES_HI, - PEDERSEN_3_0 = PEDERSEN_2_0 + NUM_PEDERSEN_TABLES_LO, - PEDERSEN_29_SMALL = PEDERSEN_3_0 + NUM_PEDERSEN_TABLES_HI, + FIXED_BASE_0_0, + FIXED_BASE_1_0 = FIXED_BASE_0_0 + FixedBaseParams::NUM_TABLES_PER_LO_MULTITABLE, + FIXED_BASE_2_0 = FIXED_BASE_1_0 + FixedBaseParams::NUM_TABLES_PER_HI_MULTITABLE, + FIXED_BASE_3_0 = FIXED_BASE_2_0 + FixedBaseParams::NUM_TABLES_PER_LO_MULTITABLE, + PEDERSEN_29_SMALL = FIXED_BASE_3_0 + FixedBaseParams::NUM_TABLES_PER_HI_MULTITABLE, PEDERSEN_28, PEDERSEN_27, PEDERSEN_26, @@ -121,12 +120,10 @@ enum MultiTableId { PEDERSEN_LEFT_LO, PEDERSEN_RIGHT_HI, PEDERSEN_RIGHT_LO, - NEW_PEDERSEN_LEFT, - NEW_PEDERSEN_RIGHT, - NEW_PEDERSEN_LEFT_HI, - NEW_PEDERSEN_LEFT_LO, - NEW_PEDERSEN_RIGHT_HI, - NEW_PEDERSEN_RIGHT_LO, + FIXED_BASE_LEFT_LO, + FIXED_BASE_LEFT_HI, + FIXED_BASE_RIGHT_LO, + FIXED_BASE_RIGHT_HI, UINT32_XOR, UINT32_AND, BN254_XLO, @@ -160,6 +157,7 @@ enum MultiTableId { }; struct MultiTable { + ~MultiTable() = default; // Coefficients are accumulated products of corresponding step sizes until that point std::vector column_1_coefficients; std::vector column_2_coefficients; @@ -170,17 +168,17 @@ struct MultiTable { std::vector column_1_step_sizes; std::vector column_2_step_sizes; std::vector column_3_step_sizes; - typedef std::array table_out; - typedef std::array table_in; + using table_out = std::array; + using table_in = std::array; std::vector get_table_values; private: void init_step_sizes() { const size_t num_lookups = column_1_coefficients.size(); - column_1_step_sizes.emplace_back(barretenberg::fr(1)); - column_2_step_sizes.emplace_back(barretenberg::fr(1)); - column_3_step_sizes.emplace_back(barretenberg::fr(1)); + column_1_step_sizes.emplace_back(1); + column_2_step_sizes.emplace_back(1); + column_3_step_sizes.emplace_back(1); std::vector coefficient_inverses(column_1_coefficients.begin(), column_1_coefficients.end()); std::copy(column_2_coefficients.begin(), column_2_coefficients.end(), std::back_inserter(coefficient_inverses)); @@ -212,17 +210,17 @@ struct MultiTable { } init_step_sizes(); } - MultiTable(const std::vector& col_1_coeffs, - const std::vector& col_2_coeffs, - const std::vector& col_3_coeffs) - : column_1_coefficients(col_1_coeffs) - , column_2_coefficients(col_2_coeffs) - , column_3_coefficients(col_3_coeffs) + MultiTable(std::vector col_1_coeffs, + std::vector col_2_coeffs, + std::vector col_3_coeffs) + : column_1_coefficients(std::move(col_1_coeffs)) + , column_2_coefficients(std::move(col_2_coeffs)) + , column_3_coefficients(std::move(col_3_coeffs)) { init_step_sizes(); } - MultiTable(){}; + MultiTable() = default; MultiTable(const MultiTable& other) = default; MultiTable(MultiTable&& other) = default; @@ -307,7 +305,7 @@ struct BasicTable { return key[0] < other.key[0] || ((key[0] == other.key[0]) && key[1] < other.key[1]); } - std::array to_sorted_list_components(const bool use_two_keys) const + [[nodiscard]] std::array to_sorted_list_components(const bool use_two_keys) const { return { barretenberg::fr(key[0]), diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/uint.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/uint.hpp index 05ccb4275e6..9c059e4c3f8 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/uint.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/uint.hpp @@ -4,8 +4,7 @@ #include "barretenberg/numeric/bitop/rotate.hpp" -namespace plookup { -namespace uint_tables { +namespace plookup::uint_tables { template inline std::array get_xor_rotate_values_from_key(const std::array key) @@ -103,5 +102,4 @@ inline MultiTable get_uint32_and_table(const MultiTableId id = UINT32_AND) return table; } -} // namespace uint_tables -} // namespace plookup +} // namespace plookup::uint_tables diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index 92dc6eaf347..374aca3cd63 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -685,7 +685,7 @@ cycle_group cycle_group::fixed_base_batch_mul( for (size_t i = 0; i < num_points; ++i) { std::optional> table_id = - plookup::new_pedersen::table::get_lookup_table_ids_for_point(base_points[i]); + plookup::fixed_base::table::get_lookup_table_ids_for_point(base_points[i]); if (table_id.has_value()) { plookup_table_ids.emplace_back(table_id.value()[0]); plookup_table_ids.emplace_back(table_id.value()[1]); @@ -704,26 +704,20 @@ cycle_group cycle_group::fixed_base_batch_mul( std::vector lookup_points; element offset_generator_accumulator = G1::point_at_infinity; for (size_t i = 0; i < plookup_scalars.size(); ++i) { - std::cout << "i = " << i << std::endl; plookup::ReadData lookup_data = plookup_read::get_lookup_accumulators(plookup_table_ids[i], plookup_scalars[i]); - std::cout << "t0" << std::endl; for (size_t j = 0; j < lookup_data[ColumnIdx::C2].size(); ++j) { const auto x = lookup_data[ColumnIdx::C2][j]; const auto y = lookup_data[ColumnIdx::C3][j]; - std::cout << "x/y = " << x << " : " << y << std::endl; lookup_points.emplace_back(cycle_group(context, x, y, false)); } std::optional offset_1 = - plookup::new_pedersen::table::get_generator_offset_for_table_id(plookup_table_ids[i]); + plookup::fixed_base::table::get_generator_offset_for_table_id(plookup_table_ids[i]); ASSERT(offset_1.has_value()); - // ASSERT(offset_2.has_value()); offset_generator_accumulator += offset_1.value(); - // offset_generator_accumulator += offset_2.value(); } - std::cout << "mark" << std::endl; cycle_group accumulator; const size_t leftover_points = leftover_scalars.size(); if (leftover_points > 0) { @@ -762,30 +756,23 @@ cycle_group cycle_group::fixed_base_batch_mul( } } } - std::cout << "mark 2" << std::endl; // cycle_group accumulator = lookup_points[0]; for (size_t i = 0; i < lookup_points.size(); ++i) { - std::cout << "i = " << i << std::endl; if (i == 0) { - std::cout << "leftover empty? " << leftover_scalars.empty() << " : " << leftover_scalars.size() - << std::endl; if (leftover_scalars.empty()) { accumulator = lookup_points[i]; } else { accumulator = accumulator.unconditional_add(lookup_points[i]); } } else { - std::cout << "acc vs pt " << accumulator << " : " << lookup_points[i] << std::endl; accumulator = accumulator.unconditional_add(lookup_points[i]); } } - std::cout << "mark 3" << std::endl; if (has_constant_component) { // we subtract off the offset_generator_accumulator, so subtract constant component from the accumulator! offset_generator_accumulator -= constant_component; } - std::cout << "mark 4" << std::endl; cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); accumulator = accumulator.unconditional_add(offset_generator_delta); return accumulator; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index 18a16933c33..6b319ede051 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -14,7 +14,6 @@ using namespace barretenberg; using namespace crypto::generators; template concept SupportsLookupTables = (Composer::CIRCUIT_TYPE == CircuitType::ULTRA); - template concept DoesNotSupportLookupTables = (Composer::CIRCUIT_TYPE != CircuitType::ULTRA); /** diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index 21b150448b0..123cd1cf235 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -487,15 +487,15 @@ TYPED_TEST(CycleGroupTest, TestFixedBaseBatchMul) auto element = crypto::pedersen_hash::generator_info::get_lhs_generator(); typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); - // 2: add entry where point is constant, scalar is witness + // 1: add entry where point is constant, scalar is witness expected += (element * scalar); points.emplace_back((element)); scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); - // // 4: add entry where point is constant, scalar is constant - // expected += (element * scalar); - // points.emplace_back((element)); - // scalars.emplace_back(typename cycle_group_ct::cycle_scalar(scalar)); + // 2: add entry where point is constant, scalar is constant + expected += (element * scalar); + points.emplace_back((element)); + scalars.emplace_back(typename cycle_group_ct::cycle_scalar(scalar)); } auto result = cycle_group_ct::fixed_base_batch_mul(scalars, points); EXPECT_EQ(result.get_value(), affine_element(expected)); From c09483e1033786320ea905d0d2ae656b0edbc76c Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Tue, 29 Aug 2023 18:21:53 +0000 Subject: [PATCH 09/50] added refactored pedersen hash methods + stdlib::pedersen_hash (needs tests) --- .../crypto/pedersen_commitment/pedersen.hpp | 6 +- .../pedersen_commitment/pedersen_refactor.cpp | 100 ++++++++++++++++ .../pedersen_commitment/pedersen_refactor.hpp | 62 ++++++++++ .../crypto/pedersen_hash/pedersen.hpp | 4 +- .../pedersen_hash/pedersen_refactor.cpp | 30 +++++ .../pedersen_hash/pedersen_refactor.hpp | 23 ++++ .../cpp/src/barretenberg/ecc/groups/group.hpp | 5 +- .../hash/pedersen/pedersen_refactor.cpp | 38 ++++++ .../hash/pedersen/pedersen_refactor.hpp | 37 ++++++ .../stdlib/primitives/group/cycle_group.cpp | 112 ++++++++---------- .../stdlib/primitives/group/cycle_group.hpp | 8 ++ .../primitives/group/cycle_group.test.cpp | 30 +++++ 12 files changed, 382 insertions(+), 73 deletions(-) create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.cpp create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.hpp index 3571016ebd7..80add981996 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.hpp @@ -4,8 +4,7 @@ #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include -namespace crypto { -namespace pedersen_commitment { +namespace crypto::pedersen_commitment { grumpkin::g1::element commit_single(const barretenberg::fr& in, generators::generator_index_t const& index); @@ -26,5 +25,4 @@ grumpkin::fq compress_native(const std::vector& input, const size_t has grumpkin::fq compress_native(const std::vector>& input_pairs); -} // namespace pedersen_commitment -} // namespace crypto +} // namespace crypto::pedersen_commitment \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp new file mode 100644 index 00000000000..71add33ec44 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp @@ -0,0 +1,100 @@ +#include "./pedersen_refactor.hpp" +#include "./convert_buffer_to_field.hpp" +#include "barretenberg/common/serialize.hpp" +#include "barretenberg/common/throw_or_abort.hpp" +#include +#ifndef NO_OMP_MULTITHREADING +#include +#endif + +using namespace crypto::generators; + +namespace crypto::pedersen_commitment_refactor { + +std::vector generator_info_temp::get_default_generators() +{ + const std::vector default_domain_separator(DEFAULT_DOMAIN_SEPARATOR.begin(), + DEFAULT_DOMAIN_SEPARATOR.end()); + + return grumpkin::g1::derive_generators_secure(default_domain_separator, DEFAULT_NUM_GENERATORS); +} + +std::vector generator_info_temp::get_generators(size_t num_generators, + size_t starting_index, + const std::string& domain_separator) +{ + std::vector result; + size_t start = starting_index; + size_t number = num_generators; + if (domain_separator == DEFAULT_DOMAIN_SEPARATOR && starting_index <= DEFAULT_NUM_GENERATORS) { + const size_t default_end = starting_index + num_generators < DEFAULT_NUM_GENERATORS + ? starting_index + num_generators + : DEFAULT_NUM_GENERATORS; + std::copy(default_generators.begin() + static_cast(starting_index), + default_generators.begin() + static_cast(default_end), + std::back_inserter(result)); + if (default_end == starting_index + num_generators) { + return result; + } + start = default_end; + number = num_generators - (default_end - starting_index); + } + + const std::vector domain_separator_bytes(domain_separator.begin(), domain_separator.end()); + + auto remainder = grumpkin::g1::derive_generators_secure(domain_separator_bytes, number, start); + + std::copy(remainder.begin(), remainder.end(), std::back_inserter(result)); + return result; +} + +grumpkin::g1::affine_element generator_info_temp::get_generator(size_t generator_index, + const std::string& domain_separator) +{ + return grumpkin::g1::get_secure_generator_from_index(generator_index, domain_separator); +} + +grumpkin::g1::affine_element generator_info_temp::get_lhs_generator() +{ + return lhs_generator; +} +grumpkin::g1::affine_element generator_info_temp::get_rhs_generator() +{ + return rhs_generator; +} +grumpkin::g1::affine_element generator_info_temp::get_length_generator() +{ + return length_generator; +} +/** + * Given a vector of fields, generate a pedersen commitment using the indexed generators. + */ +grumpkin::g1::affine_element commit_native(const std::vector& inputs, + const size_t hash_index, + const std::string& domain_separator) +{ + const auto base_points = generator_info_temp::get_generators(inputs.size(), hash_index, domain_separator); + + grumpkin::g1::element result = grumpkin::g1::point_at_infinity; + + for (size_t i = 0; i < inputs.size(); ++i) { + result += grumpkin::g1::element(base_points[i]) * static_cast(inputs[i]); + } + return result; +} + +grumpkin::g1::affine_element commit_native(const std::vector& inputs, + const size_t hash_index, + const std::string& domain_separator) +{ + const auto base_points = generator_info_temp::get_generators(inputs.size(), hash_index, domain_separator); + + grumpkin::g1::element result = grumpkin::g1::point_at_infinity; + + for (size_t i = 0; i < inputs.size(); ++i) { + std::cout << "base point[" << i << "] = " << base_points[i] << std::endl; + result += grumpkin::g1::element(base_points[i]) * (inputs[i]); + } + return result; +} +} // namespace crypto::pedersen_commitment_refactor diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp new file mode 100644 index 00000000000..9d61993c4b8 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp @@ -0,0 +1,62 @@ +#pragma once +#include "../generators/fixed_base_scalar_mul.hpp" +#include "../generators/generator_data.hpp" +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" +#include + +namespace crypto::pedersen_commitment_refactor { + +struct generator_info_temp { + inline static constexpr size_t DEFAULT_NUM_GENERATORS = 32; + inline static const std::string DEFAULT_DOMAIN_SEPARATOR = "default_domain_separator"; + inline static const grumpkin::g1::affine_element lhs_generator = + grumpkin::g1::get_secure_generator_from_index(0, DEFAULT_DOMAIN_SEPARATOR); + inline static const grumpkin::g1::affine_element rhs_generator = + grumpkin::g1::get_secure_generator_from_index(1, DEFAULT_DOMAIN_SEPARATOR); + + inline static const grumpkin::g1::affine_element length_generator = + grumpkin::g1::get_secure_generator_from_index(0, "pedersen_hash_length"); + + static std::vector get_default_generators(); + + static std::vector get_generators( + size_t num_generators, + size_t starting_index = 0, + const std::string& domain_separator = DEFAULT_DOMAIN_SEPARATOR); + static grumpkin::g1::affine_element get_generator(size_t generator_index, + const std::string& domain_separator = DEFAULT_DOMAIN_SEPARATOR); + inline static const std::vector default_generators = get_default_generators(); + + static grumpkin::g1::affine_element get_lhs_generator(); + static grumpkin::g1::affine_element get_rhs_generator(); + static grumpkin::g1::affine_element get_length_generator(); +}; + +grumpkin::g1::affine_element commit_native( + const std::vector& inputs, + size_t hash_index = 0, + const std::string& domain_separator = generator_info_temp::DEFAULT_DOMAIN_SEPARATOR); + +grumpkin::g1::affine_element commit_native( + const std::vector& inputs, + size_t hash_index = 0, + const std::string& domain_separator = generator_info_temp::DEFAULT_DOMAIN_SEPARATOR); + +// grumpkin::fq compress_native(const std::vector& inputs, +// size_t hash_index = 0, +// const std::vector& domain_separator = {}); + +// grumpkin::fq compress_native(const std::vector& input, +// size_t hash_index = 0, +// const std::vector& domain_separator = {}); + +// template +// grumpkin::fq compress_native(const std::array& inputs, +// const size_t hash_index = 0, +// const std::vector& domain_separator = {}) +// { +// std::vector converted(inputs.begin(), inputs.end()); +// return compress_native(converted, hash_index, domain_separator); +// } + +} // namespace crypto::pedersen_commitment_refactor diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp index 4cd2b095cfd..028ae82e6f9 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp @@ -8,9 +8,9 @@ namespace crypto::pedersen_hash { struct generator_info { inline static const grumpkin::g1::affine_element lhs_generator = - grumpkin::g1::get_secure_generator_from_index(0, "pedersen_hash_generator"); + grumpkin::g1::get_secure_generator_from_index(0, "default_domain_separator"); inline static const grumpkin::g1::affine_element rhs_generator = - grumpkin::g1::get_secure_generator_from_index(1, "pedersen_hash_generator"); + grumpkin::g1::get_secure_generator_from_index(1, "default_domain_separator"); static grumpkin::g1::affine_element get_lhs_generator(); static grumpkin::g1::affine_element get_rhs_generator(); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.cpp new file mode 100644 index 00000000000..d861bd9a861 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.cpp @@ -0,0 +1,30 @@ +#include "./pedersen_refactor.hpp" +#include +#ifndef NO_OMP_MULTITHREADING +#include +#endif + +namespace crypto::pedersen_hash { + +using namespace generators; + +/** + * Given a vector of fields, generate a pedersen hash using the indexed generators. + */ +grumpkin::fq hash_multiple(const std::vector& inputs, + const size_t hash_index, + const std::string& domain_separator) +{ + const auto length_generator = pedersen_commitment_refactor::generator_info_temp::get_length_generator(); + const auto base_points = + pedersen_commitment_refactor::generator_info_temp::get_generators(inputs.size(), hash_index, domain_separator); + + grumpkin::g1::element result = length_generator * grumpkin::fr(inputs.size()); + + for (size_t i = 0; i < inputs.size(); ++i) { + result += base_points[i] * grumpkin::fr(static_cast(inputs[i])); + } + return result.x; +} + +} // namespace crypto::pedersen_hash \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp new file mode 100644 index 00000000000..2f3bb99a824 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp @@ -0,0 +1,23 @@ +#pragma once +#include "../generators/fixed_base_scalar_mul.hpp" +#include "../generators/generator_data.hpp" +#include "../pedersen_commitment/pedersen_refactor.hpp" +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" +#include + +namespace crypto::pedersen_hash_refactor { + +grumpkin::fq hash_multiple( + const std::vector& inputs, + size_t hash_index = 0, + const std::string& domain_separator = pedersen_commitment_refactor::generator_info_temp::DEFAULT_DOMAIN_SEPARATOR); + +inline grumpkin::fq hash( + const std::vector& inputs, + size_t hash_index = 0, + const std::string& domain_separator = pedersen_commitment_refactor::generator_info_temp::DEFAULT_DOMAIN_SEPARATOR) +{ + return hash_multiple(inputs, hash_index, domain_separator); +} + +} // namespace crypto::pedersen_hash_refactor diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp index 6d656141520..46943bcc737 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp @@ -94,7 +94,8 @@ template */ static std::vector derive_generators_secure(const std::vector& domain_separator, - const size_t num_generators) + const size_t num_generators, + const size_t starting_index = 0) { std::vector result; std::array domain_hash = sha256::sha256(domain_separator); @@ -104,7 +105,7 @@ template (i); uint32_t mask = 0xff; generator_preimage[32] = static_cast(generator_index >> 24); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp new file mode 100644 index 00000000000..661406b5028 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp @@ -0,0 +1,38 @@ +#include "pedersen_refactor.hpp" +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" +#include "barretenberg/stdlib/primitives/group/cycle_group.hpp" +namespace proof_system::plonk::stdlib { + +using namespace barretenberg; +using namespace crypto::generators; +using namespace proof_system; + +template +field_t pedersen_hash_refactor::hash_multiple(const std::vector& inputs, + const size_t hash_index, + const std::string& domain_separator, + const bool /*unused*/) +{ + + using cycle_group = cycle_group; + using cycle_scalar = typename cycle_group::cycle_scalar; + using affine_element = typename cycle_group::G1::affine_element; + std::vector base_points = { + crypto::pedersen_commitment_refactor::generator_info_temp::get_length_generator() + }; + auto _base_points = + crypto::pedersen_commitment_refactor::generator_info_temp::get_generators(hash_index, 0, domain_separator); + std::copy(_base_points.begin(), _base_points.end(), std::back_inserter(base_points)); + std::vector scalars; + scalars.emplace_back(field_t(inputs.size())); + for (const auto& in : inputs) { + scalars.emplace_back(in); + } + + auto result = cycle_group::fixed_base_batch_mul(scalars, base_points); + return result.x; +} + +INSTANTIATE_STDLIB_TYPE(pedersen_hash_refactor); + +} // namespace proof_system::plonk::stdlib diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp new file mode 100644 index 00000000000..1c18c274468 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp @@ -0,0 +1,37 @@ +#pragma once +#include "../../primitives/circuit_builders/circuit_builders_fwd.hpp" +#include "../../primitives/field/field.hpp" +#include "../../primitives/point/point.hpp" +#include "barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp" + +namespace proof_system::plonk::stdlib { + +using namespace barretenberg; +template class pedersen_hash_refactor { + + private: + using field_t = stdlib::field_t; + using point = stdlib::point; + using bool_t = stdlib::bool_t; + + public: + static field_t hash_multiple( + const std::vector& in, + size_t hash_index = 0, + const std::string& domain_separator = + crypto::pedersen_commitment_refactor::generator_info_temp::DEFAULT_DOMAIN_SEPARATOR, + bool validate_inputs_in_field = true); + + static field_t hash(const std::vector& in, + size_t hash_index = 0, + const std::string& domain_separator = + crypto::pedersen_commitment_refactor::generator_info_temp::DEFAULT_DOMAIN_SEPARATOR, + bool validate_inputs_in_field = true) + { + return hash_multiple(in, hash_index, domain_separator, validate_inputs_in_field); + } +}; + +EXTERN_STDLIB_TYPE(pedersen_hash_refactor); + +} // namespace proof_system::plonk::stdlib diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index 374aca3cd63..b093783da15 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -552,17 +552,21 @@ cycle_group cycle_group::straus_lookup_table::read(const fie return cycle_group(_context, x, y, false); } -// for fixed base batch mul... -// 1. take each generator point and split into table_bits chunks -// 2. precompute multiples of each generator point and store in lookup table -// 3. template -cycle_group cycle_group::variable_base_batch_mul(const std::vector& _scalars, - const std::vector& _base_points) +typename cycle_group::batch_mul_internal_output cycle_group::_batch_mul_internal( + const std::vector& _scalars, + const std::vector& _base_points, + const bool unconditional_add) { ASSERT(_scalars.size() == _base_points.size()); Composer* context = nullptr; + for (auto& scalar : _scalars) { + if (scalar.lo.get_context() != nullptr) { + context = scalar.get_context(); + break; + } + } for (auto& point : _base_points) { if (point.get_context() != nullptr) { context = point.get_context(); @@ -586,7 +590,7 @@ cycle_group cycle_group::variable_base_batch_mul(const std:: } } if (!has_non_constant_component) { - return cycle_group(constant_component); + return { cycle_group(constant_component), G1::affine_point_at_infinity }; } // core algorithm // define a `table_bits` size lookup table @@ -601,41 +605,50 @@ cycle_group cycle_group::variable_base_batch_mul(const std:: straus_lookup_table(context, base_points[i], generators.generators[i + 1], table_bits)); } - element debug_acc = G1::point_at_infinity; - uint256_t debug_scalar = uint256_t(scalars[0].lo.get_value()) + - (uint256_t(scalars[0].hi.get_value()) * (uint256_t(1) << (cycle_scalar::LO_BITS))); - element offset_generator_accumulator = generators.generators[0]; cycle_group accumulator = generators.generators[0]; + for (size_t i = 0; i < num_rounds; ++i) { if (i != 0) { - // NOTE: IN FIXED BASE MODE WE CAN DOUBLE THE TABLES INSTEAD OF THE POINTS (if not using plookup) for (size_t j = 0; j < table_bits; ++j) { accumulator = accumulator.dbl(); offset_generator_accumulator = offset_generator_accumulator.dbl(); - debug_acc = debug_acc.dbl(); } } for (size_t j = 0; j < num_points; ++j) { const field_t scalar_slice = scalar_slices[j].read(num_rounds - i - 1); const cycle_group point = point_tables[j].read(scalar_slice); - accumulator = accumulator.constrained_unconditional_add(point); + accumulator = unconditional_add ? accumulator.unconditional_add(point) + : accumulator.constrained_unconditional_add(point); offset_generator_accumulator = offset_generator_accumulator + element(generators.generators[j + 1]); } } - // NOTE: should this be a general addition? - // e.g. x.[P] + -x.[P] . We want to be able to support this :/ if (has_constant_component) { // we subtract off the offset_generator_accumulator, so subtract constant component from the accumulator! offset_generator_accumulator -= constant_component; } - cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); + // cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); + // // use a full conditional add here in case we end with a point at infinity or a point doubling. + // // e.g. x[P] + x[P], or x[P] + -x[P] + // accumulator = accumulator + offset_generator_delta; + + return { accumulator, affine_element(-offset_generator_accumulator) }; +} + +template +cycle_group cycle_group::variable_base_batch_mul(const std::vector& _scalars, + const std::vector& _base_points) +{ + ASSERT(_scalars.size() == _base_points.size()); + + auto [accumulator, offset_generator_accumulator] = _batch_mul_internal(_scalars, _base_points, false); // use a full conditional add here in case we end with a point at infinity or a point doubling. // e.g. x[P] + x[P], or x[P] + -x[P] - accumulator = accumulator + offset_generator_delta; - + if (!offset_generator_accumulator.is_point_at_infinity()) { + return accumulator + (cycle_group(offset_generator_accumulator)); + } return accumulator; } @@ -660,7 +673,7 @@ cycle_group cycle_group::fixed_base_batch_mul( bool has_non_constant_component = false; element constant_component = G1::point_at_infinity; for (size_t i = 0; i < _scalars.size(); ++i) { - if (_scalars[i].is_constant()) { + if (_scalars[i].is_constant() && (uint256_t(_scalars[i].get_value()) != 0)) { has_constant_component = true; constant_component += _base_points[i] * _scalars[i].get_value(); } else { @@ -681,7 +694,7 @@ cycle_group cycle_group::fixed_base_batch_mul( std::vector plookup_base_points; std::vector plookup_scalars; std::vector leftover_scalars; - std::vector leftover_base_points; + std::vector leftover_base_points; for (size_t i = 0; i < num_points; ++i) { std::optional> table_id = @@ -719,44 +732,15 @@ cycle_group cycle_group::fixed_base_batch_mul( offset_generator_accumulator += offset_1.value(); } cycle_group accumulator; - const size_t leftover_points = leftover_scalars.size(); - if (leftover_points > 0) { - - auto generators = offset_generators(leftover_points); - std::vector scalar_slices; - std::vector point_tables; - for (size_t i = 0; i < leftover_points; ++i) { - scalar_slices.emplace_back(straus_scalar_slice(context, leftover_scalars[i], table_bits)); - point_tables.emplace_back(straus_lookup_table( - context, cycle_group(leftover_base_points[i]), generators.generators[i + 1], table_bits)); - } - - element debug_acc = G1::point_at_infinity; - uint256_t debug_scalar = - uint256_t(leftover_scalars[0].lo.get_value()) + - (uint256_t(leftover_scalars[0].hi.get_value()) * (uint256_t(1) << (cycle_scalar::LO_BITS))); - - offset_generator_accumulator += generators.generators[0]; - accumulator = generators.generators[0]; - for (size_t i = 0; i < num_rounds; ++i) { - if (i != 0) { - - for (size_t j = 0; j < table_bits; ++j) { - accumulator = accumulator.dbl(); - offset_generator_accumulator = offset_generator_accumulator.dbl(); - debug_acc = debug_acc.dbl(); - } - } - - for (size_t j = 0; j < leftover_points; ++j) { - const field_t scalar_slice = scalar_slices[j].read(num_rounds - i - 1); - const cycle_group point = point_tables[j].read(scalar_slice); - accumulator = accumulator.constrained_unconditional_add(point); - offset_generator_accumulator = offset_generator_accumulator + element(generators.generators[j + 1]); - } + if (!leftover_scalars.empty()) { + auto [var_accumulator, var_offset_generator] = + _batch_mul_internal(leftover_scalars, leftover_base_points, true); + accumulator = var_accumulator; + // todo explain subtract + if (!var_offset_generator.is_point_at_infinity()) { + offset_generator_accumulator -= var_offset_generator; } } - // cycle_group accumulator = lookup_points[0]; for (size_t i = 0; i < lookup_points.size(); ++i) { if (i == 0) { if (leftover_scalars.empty()) { @@ -772,9 +756,12 @@ cycle_group cycle_group::fixed_base_batch_mul( if (has_constant_component) { // we subtract off the offset_generator_accumulator, so subtract constant component from the accumulator! offset_generator_accumulator -= constant_component; + cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); + // Assuming independent generators, existence of constant component = cannot hit edge cases + return accumulator.unconditional_add(offset_generator_delta); } cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); - accumulator = accumulator.unconditional_add(offset_generator_delta); + accumulator = accumulator + (offset_generator_delta); return accumulator; } @@ -823,11 +810,6 @@ cycle_group cycle_group::fixed_base_batch_mul( using straus_round_tables = std::vector; std::vector point_tables(num_points); - // for (size_t i = 0; i < num_points; ++i) { - // scalar_slices.emplace_back(straus_scalar_slice(context, scalars[i], table_bits)); - // point_tables.emplace_back( - // straus_lookup_table(context, cycle_group(base_points[i]), generators.generators[i + 1], table_bits)); - // } // creating these point tables should cost 0 constraints if base points are constant for (size_t i = 0; i < num_points; ++i) { @@ -843,7 +825,7 @@ cycle_group cycle_group::fixed_base_batch_mul( element::batch_normalize(&round_offset_generators[0], num_rounds); point_tables[i].resize(num_rounds); for (size_t j = 0; j < num_rounds; ++j) { - point_tables[i][num_rounds - j - 1] = straus_lookup_table( + point_tables[i][j] = straus_lookup_table( context, cycle_group(round_points[j]), cycle_group(round_offset_generators[j]), FIXED_BASE_TABLE_BITS); } } @@ -862,7 +844,7 @@ cycle_group cycle_group::fixed_base_batch_mul( for (size_t j = 0; j < num_points; ++j) { auto& point_table = point_tables[j][i]; - const field_t scalar_slice = scalar_slices[j].read(num_rounds - i - 1); + const field_t scalar_slice = scalar_slices[j].read(i); const cycle_group point = point_table.read(scalar_slice); accumulator = accumulator.unconditional_add(point); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index 6b319ede051..b72300c3364 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -169,6 +169,14 @@ template class cycle_group { size_t rom_id = 0; }; + struct batch_mul_internal_output { + cycle_group accumulator; + affine_element offset_generator_delta; + }; + static batch_mul_internal_output _batch_mul_internal(const std::vector& scalars, + const std::vector& base_points, + bool unconditional_add); + static cycle_group fixed_base_batch_mul( const std::vector& _scalars, const std::vector& _base_points) requires SupportsLookupTables; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index 123cd1cf235..9b4ea4665c4 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -1,3 +1,4 @@ +#include "barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp" #include "barretenberg/crypto/pedersen_hash/pedersen.hpp" #include "barretenberg/numeric/random/engine.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" @@ -482,6 +483,7 @@ TYPED_TEST(CycleGroupTest, TestFixedBaseBatchMul) { std::vector points; std::vector scalars; + std::vector scalars_native; for (size_t i = 0; i < num_muls; ++i) { auto element = crypto::pedersen_hash::generator_info::get_lhs_generator(); @@ -490,17 +492,45 @@ TYPED_TEST(CycleGroupTest, TestFixedBaseBatchMul) // 1: add entry where point is constant, scalar is witness expected += (element * scalar); points.emplace_back((element)); + std::cout << "test base point[0] = " << element << std::endl; scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + scalars_native.emplace_back(scalar); // 2: add entry where point is constant, scalar is constant + element = crypto::pedersen_hash::generator_info::get_rhs_generator(); expected += (element * scalar); points.emplace_back((element)); + std::cout << "test base point[1] = " << element << std::endl; scalars.emplace_back(typename cycle_group_ct::cycle_scalar(scalar)); + scalars_native.emplace_back(scalar); } auto result = cycle_group_ct::fixed_base_batch_mul(scalars, points); EXPECT_EQ(result.get_value(), affine_element(expected)); + EXPECT_EQ(result.get_value(), crypto::pedersen_commitment_refactor::commit_native(scalars_native)); } + // case 2, MSM where input scalars are 0 + { + std::vector points; + std::vector scalars; + + for (size_t i = 0; i < num_muls; ++i) { + auto element = crypto::pedersen_hash::generator_info::get_lhs_generator(); + typename G1::subgroup_field scalar = 0; + + // 1: add entry where point is constant, scalar is witness + expected += (element * scalar); + points.emplace_back((element)); + scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + + // // 2: add entry where point is constant, scalar is constant + expected += (element * scalar); + points.emplace_back((element)); + scalars.emplace_back(typename cycle_group_ct::cycle_scalar(scalar)); + } + auto result = cycle_group_ct::fixed_base_batch_mul(scalars, points); + EXPECT_EQ(result.is_point_at_infinity().get_value(), true); + } bool proof_result = composer.check_circuit(); EXPECT_EQ(proof_result, true); } From 36624f33eefaf6907446f6c12bd26f5c14b5e1a7 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Sat, 2 Sep 2023 15:13:11 +0000 Subject: [PATCH 10/50] fixed, tidy up, comments --- .../pedersen_commitment/pedersen_refactor.cpp | 111 +-- .../pedersen_commitment/pedersen_refactor.hpp | 148 ++-- .../crypto/pedersen_hash/c_bind_refactor.cpp | 58 ++ .../crypto/pedersen_hash/pedersen.cpp | 13 - .../crypto/pedersen_hash/pedersen.hpp | 10 - .../pedersen_hash/pedersen_refactor.cpp | 45 +- .../pedersen_hash/pedersen_refactor.hpp | 78 +- .../ecc/curves/grumpkin/grumpkin.cpp | 4 - .../ecc/curves/grumpkin/grumpkin.hpp | 2 - .../ecc/curves/secp256k1/secp256k1.cpp | 4 - .../ecc/curves/secp256k1/secp256k1.hpp | 1 - .../ecc/curves/secp256r1/secp256r1.cpp | 4 - .../ecc/curves/secp256r1/secp256r1.hpp | 1 - .../cpp/src/barretenberg/ecc/groups/group.hpp | 9 +- .../circuit_builder/circuit_builder_base.hpp | 2 +- .../circuit_builder/ultra_circuit_builder.cpp | 4 +- .../plookup_tables/fixed_base/fixed_base.cpp | 142 +++- .../plookup_tables/fixed_base/fixed_base.hpp | 36 +- .../fixed_base/fixed_base_params.hpp | 42 +- .../stdlib/hash/pedersen/pedersen.test.cpp | 54 ++ .../hash/pedersen/pedersen_refactor.cpp | 29 +- .../hash/pedersen/pedersen_refactor.hpp | 26 +- .../stdlib/primitives/group/cycle_group.cpp | 754 ++++++++++++------ .../stdlib/primitives/group/cycle_group.hpp | 259 +++--- .../primitives/group/cycle_group.test.cpp | 265 +++--- 25 files changed, 1361 insertions(+), 740 deletions(-) create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_refactor.cpp create mode 100644 circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp index 71add33ec44..487fc0d7f37 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp @@ -7,94 +7,55 @@ #include #endif -using namespace crypto::generators; +namespace crypto { -namespace crypto::pedersen_commitment_refactor { - -std::vector generator_info_temp::get_default_generators() -{ - const std::vector default_domain_separator(DEFAULT_DOMAIN_SEPARATOR.begin(), - DEFAULT_DOMAIN_SEPARATOR.end()); - - return grumpkin::g1::derive_generators_secure(default_domain_separator, DEFAULT_NUM_GENERATORS); -} - -std::vector generator_info_temp::get_generators(size_t num_generators, - size_t starting_index, - const std::string& domain_separator) -{ - std::vector result; - size_t start = starting_index; - size_t number = num_generators; - if (domain_separator == DEFAULT_DOMAIN_SEPARATOR && starting_index <= DEFAULT_NUM_GENERATORS) { - const size_t default_end = starting_index + num_generators < DEFAULT_NUM_GENERATORS - ? starting_index + num_generators - : DEFAULT_NUM_GENERATORS; - std::copy(default_generators.begin() + static_cast(starting_index), - default_generators.begin() + static_cast(default_end), - std::back_inserter(result)); - if (default_end == starting_index + num_generators) { - return result; - } - start = default_end; - number = num_generators - (default_end - starting_index); - } - - const std::vector domain_separator_bytes(domain_separator.begin(), domain_separator.end()); - - auto remainder = grumpkin::g1::derive_generators_secure(domain_separator_bytes, number, start); - - std::copy(remainder.begin(), remainder.end(), std::back_inserter(result)); - return result; -} - -grumpkin::g1::affine_element generator_info_temp::get_generator(size_t generator_index, - const std::string& domain_separator) -{ - return grumpkin::g1::get_secure_generator_from_index(generator_index, domain_separator); -} - -grumpkin::g1::affine_element generator_info_temp::get_lhs_generator() -{ - return lhs_generator; -} -grumpkin::g1::affine_element generator_info_temp::get_rhs_generator() -{ - return rhs_generator; -} -grumpkin::g1::affine_element generator_info_temp::get_length_generator() -{ - return length_generator; -} /** - * Given a vector of fields, generate a pedersen commitment using the indexed generators. + * @brief Given a vector of fields, generate a pedersen commitment using the indexed generators. + * + * @details This method uses `Curve::BaseField` members as inputs. This aligns with what we expect when creating + * grumpkin commitments to field elements inside a BN254 SNARK circuit. + * @param inputs + * @param hash_index + * @param generator_context + * @return Curve::AffineElement */ -grumpkin::g1::affine_element commit_native(const std::vector& inputs, - const size_t hash_index, - const std::string& domain_separator) +template +typename Curve::AffineElement pedersen_commitment_refactor::commit_native( + const std::vector& inputs, const size_t hash_index, const generator_data* const generator_context) { - const auto base_points = generator_info_temp::get_generators(inputs.size(), hash_index, domain_separator); - - grumpkin::g1::element result = grumpkin::g1::point_at_infinity; + const auto generators = generator_context->conditional_extend(inputs.size() + hash_index); + Element result = Group::point_at_infinity; for (size_t i = 0; i < inputs.size(); ++i) { - result += grumpkin::g1::element(base_points[i]) * static_cast(inputs[i]); + result += Element(generators.get(i, hash_index)) * static_cast(inputs[i]); } return result; } -grumpkin::g1::affine_element commit_native(const std::vector& inputs, - const size_t hash_index, - const std::string& domain_separator) +/** + * @brief Given a vector of fields, generate a pedersen commitment using the indexed generators. + * + * @details This method uses `ScalarField` members as inputs. This aligns with what we expect for a "canonical" + * elliptic curve commitment function. However, when creating grumpkin commitments inside a BN254 SNARK crcuit it is not + * efficient to pack data into grumpkin::fr elements, as grumpkin::fq is the native field of BN254 circuits. + * + * @note This method is used currently for tests. If we find no downstream use for it by Jan 2024, delete! + * @param inputs + * @param hash_index + * @param generator_context + * @return Curve::AffineElement + */ +template +typename Curve::AffineElement pedersen_commitment_refactor::commit_native( + const std::vector& inputs, const size_t hash_index, const generator_data* const generator_context) { - const auto base_points = generator_info_temp::get_generators(inputs.size(), hash_index, domain_separator); - - grumpkin::g1::element result = grumpkin::g1::point_at_infinity; + const auto generators = generator_context->conditional_extend(inputs.size() + hash_index); + Element result = Group::point_at_infinity; for (size_t i = 0; i < inputs.size(); ++i) { - std::cout << "base point[" << i << "] = " << base_points[i] << std::endl; - result += grumpkin::g1::element(base_points[i]) * (inputs[i]); + result += Element(generators.get(i, hash_index)) * (inputs[i]); } return result; } -} // namespace crypto::pedersen_commitment_refactor +template class pedersen_commitment_refactor; +} // namespace crypto diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp index 9d61993c4b8..dabb481d5e1 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp @@ -1,62 +1,112 @@ #pragma once #include "../generators/fixed_base_scalar_mul.hpp" #include "../generators/generator_data.hpp" +#include "barretenberg/ecc/curves/bn254/bn254.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include -namespace crypto::pedersen_commitment_refactor { +namespace crypto { -struct generator_info_temp { - inline static constexpr size_t DEFAULT_NUM_GENERATORS = 32; - inline static const std::string DEFAULT_DOMAIN_SEPARATOR = "default_domain_separator"; - inline static const grumpkin::g1::affine_element lhs_generator = - grumpkin::g1::get_secure_generator_from_index(0, DEFAULT_DOMAIN_SEPARATOR); - inline static const grumpkin::g1::affine_element rhs_generator = - grumpkin::g1::get_secure_generator_from_index(1, DEFAULT_DOMAIN_SEPARATOR); +/** + * @brief Contains a vector of precomputed generator points. + * Generators are defined via a domain separator. + * Number of generators in generator_data is fixed for a given object instance. + * + * @details generator_data is used to precompute short lists of commonly used generators, + * (e.g. static inline const default_generators = generator_data()). + * If an algorithm requires more than `_size_ generators, + * the `conditional_extend` method can be called to return a new `generator_data` object. + * N.B. we explicitly do not support mutating an existing `generator_data` object to increase the size of + * its `std::vector generators` member variable. + * This is because this class is intended to be used as a `static` member of other classes to provide lists + * of precomputed generators. Mutating static member variables is *not* thread safe! + */ +template class generator_data { + public: + using Group = typename Curve::Group; + using AffineElement = typename Curve::AffineElement; + static inline constexpr size_t DEFAULT_NUM_GENERATORS = 32; + static inline const std::string DEFAULT_DOMAIN_SEPARATOR = "default_domain_separator"; + inline generator_data(const size_t num_generators = DEFAULT_NUM_GENERATORS, + const std::string& domain_separator = DEFAULT_DOMAIN_SEPARATOR) + : _domain_separator(domain_separator) + , _domain_separator_bytes(domain_separator.begin(), domain_separator.end()) + , _size(num_generators){}; - inline static const grumpkin::g1::affine_element length_generator = - grumpkin::g1::get_secure_generator_from_index(0, "pedersen_hash_length"); + [[nodiscard]] inline std::string domain_separator() const { return _domain_separator; } + [[nodiscard]] inline size_t size() const { return _size; } + [[nodiscard]] inline AffineElement get(const size_t index, const size_t offset = 0) const + { + ASSERT(index + offset <= _size); + return generators[index + offset]; + } - static std::vector get_default_generators(); + /** + * @brief If more generators than `_size` are required, this method will return a new `generator_data` object + * with the required generators. + * + * @note Question: is this a good pattern to support? Ideally downstream code would ensure their + * `generator_data` object is sufficiently large to cover potential needs. + * But if we did not support this pattern, it would make downstream code more complex as each method that + * uses `generator_data` would have to perform this accounting logic. + * + * @param target_num_generators + * @return generator_data + */ + [[nodiscard]] inline generator_data conditional_extend(const size_t target_num_generators) const + { + if (target_num_generators <= _size) { + return *this; + } + return { target_num_generators, _domain_separator }; + } - static std::vector get_generators( - size_t num_generators, - size_t starting_index = 0, - const std::string& domain_separator = DEFAULT_DOMAIN_SEPARATOR); - static grumpkin::g1::affine_element get_generator(size_t generator_index, - const std::string& domain_separator = DEFAULT_DOMAIN_SEPARATOR); - inline static const std::vector default_generators = get_default_generators(); + private: + std::string _domain_separator; + std::vector _domain_separator_bytes; + size_t _size; + // ordering of static variable initialization is undefined, so we make `default_generators` private + // and only accessible via `get_default_generators()`, which ensures var will be initialized at the cost of some + // small runtime checks + inline static const generator_data default_generators = + generator_data(generator_data::DEFAULT_NUM_GENERATORS, generator_data::DEFAULT_DOMAIN_SEPARATOR); - static grumpkin::g1::affine_element get_lhs_generator(); - static grumpkin::g1::affine_element get_rhs_generator(); - static grumpkin::g1::affine_element get_length_generator(); + public: + inline static const generator_data* get_default_generators() { return &default_generators; } + const std::vector generators = (Group::derive_generators_secure(_domain_separator_bytes, _size)); }; -grumpkin::g1::affine_element commit_native( - const std::vector& inputs, - size_t hash_index = 0, - const std::string& domain_separator = generator_info_temp::DEFAULT_DOMAIN_SEPARATOR); - -grumpkin::g1::affine_element commit_native( - const std::vector& inputs, - size_t hash_index = 0, - const std::string& domain_separator = generator_info_temp::DEFAULT_DOMAIN_SEPARATOR); - -// grumpkin::fq compress_native(const std::vector& inputs, -// size_t hash_index = 0, -// const std::vector& domain_separator = {}); - -// grumpkin::fq compress_native(const std::vector& input, -// size_t hash_index = 0, -// const std::vector& domain_separator = {}); - -// template -// grumpkin::fq compress_native(const std::array& inputs, -// const size_t hash_index = 0, -// const std::vector& domain_separator = {}) -// { -// std::vector converted(inputs.begin(), inputs.end()); -// return compress_native(converted, hash_index, domain_separator); -// } - -} // namespace crypto::pedersen_commitment_refactor +template class generator_data; + +/** + * @brief Performs pedersen commitments! + * + * To commit to a size-n list of field elements `x`, a commitment is defined as: + * + * Commit(x) = x[0].g[0] + x[1].g[1] + ... + x[n-1].g[n-1] + * + * Where `g` is a list of generator points defined by `generator_data` + * + */ +template class pedersen_commitment_refactor { + public: + using AffineElement = typename Curve::AffineElement; + using Element = typename Curve::Element; + using Fr = typename Curve::ScalarField; + using Fq = typename Curve::BaseField; + using Group = typename Curve::Group; + using generator_data = generator_data; + + static AffineElement commit_native( + const std::vector& inputs, + size_t hash_index = 0, + const generator_data* generator_context = generator_data::get_default_generators()); + + static AffineElement commit_native( + const std::vector& inputs, + size_t hash_index = 0, + const generator_data* generator_context = generator_data::get_default_generators()); +}; + +extern template class pedersen_commitment_refactor; +} // namespace crypto diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_refactor.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_refactor.cpp new file mode 100644 index 00000000000..496fe633d89 --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_refactor.cpp @@ -0,0 +1,58 @@ +#include "barretenberg/common/mem.hpp" +#include "barretenberg/common/serialize.hpp" +#include "c_bind.hpp" +#include "pedersen_refactor.hpp" + +extern "C" { + +WASM_EXPORT void pedersen_hash_pair(uint8_t const* left, uint8_t const* right, uint8_t* result) +{ + auto lhs = barretenberg::fr::serialize_from_buffer(left); + auto rhs = barretenberg::fr::serialize_from_buffer(right); + auto r = crypto::pedersen_hash_refactor::hash_multiple({ lhs, rhs }); + barretenberg::fr::serialize_to_buffer(r, result); +} + +WASM_EXPORT void pedersen_hash_multiple(uint8_t const* inputs_buffer, uint8_t* output) +{ + std::vector to_compress; + read(inputs_buffer, to_compress); + auto r = crypto::pedersen_hash_refactor::hash_multiple(to_compress); + barretenberg::fr::serialize_to_buffer(r, output); +} + +WASM_EXPORT void pedersen_hash_multiple_with_hash_index(uint8_t const* inputs_buffer, + uint32_t const* hash_index, + uint8_t* output) +{ + std::vector to_compress; + read(inputs_buffer, to_compress); + auto r = crypto::pedersen_hash_refactor::hash_multiple(to_compress, ntohl(*hash_index)); + barretenberg::fr::serialize_to_buffer(r, output); +} + +/** + * Given a buffer containing 32 byte pedersen leaves, return a new buffer containing the leaves and all pairs of + * nodes that define a merkle tree. + * e.g. + * input: [1][2][3][4] + * output: [1][2][3][4][compress(1,2)][compress(3,4)][compress(5,6)] + */ +WASM_EXPORT void pedersen_hash_to_tree(fr::vec_in_buf data, fr::vec_out_buf out) +{ + auto fields = from_buffer>(data); + auto num_outputs = fields.size() * 2 - 1; + fields.reserve(num_outputs); + + for (size_t i = 0; fields.size() < num_outputs; i += 2) { + fields.push_back(crypto::pedersen_hash_refactor::hash_multiple({ fields[i], fields[i + 1] })); + } + + auto buf_size = 4 + num_outputs * sizeof(grumpkin::fq); + // TODO(@charlielye) Can we get rid of cppcoreguidelines-owning-memory warning here? + // NOLINTNEXTLINE(cppcoreguidelines-owning-memory, cppcoreguidelines-no-malloc) + *out = static_cast(malloc(buf_size)); + auto* dst = *out; + write(dst, fields); +} +} \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp index 32b28862fe6..70d64fb6cb0 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp @@ -8,15 +8,6 @@ namespace crypto::pedersen_hash { using namespace generators; -grumpkin::g1::affine_element generator_info::get_lhs_generator() -{ - return lhs_generator; -} -grumpkin::g1::affine_element generator_info::get_rhs_generator() -{ - return rhs_generator; -} - grumpkin::g1::element hash_single(const barretenberg::fr& in, generator_index_t const& index) { auto gen_data = get_generator_data(index); @@ -75,8 +66,4 @@ grumpkin::fq hash_multiple(const std::vector& inputs, const size_t r.is_point_at_infinity() ? grumpkin::g1::affine_element(0, 0) : static_cast(r); return result.x; } - -struct foo; -struct generator_info; -// class grumpkin::g1::affine_element generator_info::get_rhs_generator(); } // namespace crypto::pedersen_hash \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp index 028ae82e6f9..5219b31c31c 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp @@ -6,16 +6,6 @@ namespace crypto::pedersen_hash { -struct generator_info { - inline static const grumpkin::g1::affine_element lhs_generator = - grumpkin::g1::get_secure_generator_from_index(0, "default_domain_separator"); - inline static const grumpkin::g1::affine_element rhs_generator = - grumpkin::g1::get_secure_generator_from_index(1, "default_domain_separator"); - - static grumpkin::g1::affine_element get_lhs_generator(); - static grumpkin::g1::affine_element get_rhs_generator(); -}; - grumpkin::g1::element hash_single(const barretenberg::fr& in, generators::generator_index_t const& index); grumpkin::fq hash_multiple(const std::vector& inputs, size_t hash_index = 0); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.cpp index d861bd9a861..1c55ba37207 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.cpp @@ -4,27 +4,52 @@ #include #endif -namespace crypto::pedersen_hash { +namespace crypto { using namespace generators; /** * Given a vector of fields, generate a pedersen hash using the indexed generators. */ -grumpkin::fq hash_multiple(const std::vector& inputs, - const size_t hash_index, - const std::string& domain_separator) + +/** + * @brief Given a vector of fields, generate a pedersen hash using generators from `generator_context`. + * + * @details `hash_index` is used to access offset elements of `generator_context` if required. + * e.g. if one desires to compute + * `inputs[0] * [generators[hash_index]] + `inputs[1] * [generators[hash_index + 1]]` + ... etc + * Potentially useful to ensure multiple hashes with the same domain separator cannot collide. + * + * TODO(@suyash67) can we change downstream code so that `hash_index` is no longer required? Now we have a proper + * domain_separator parameter, we no longer need to specify different generator indices to ensure hashes cannot collide. + * @param inputs what are we hashing? + * @param hash_index Describes an offset into the list of generators, if required + * @param generator_context + * @return Fq (i.e. SNARK circuit scalar field, when hashing using a curve defined over the SNARK circuit scalar field) + */ +template +typename Curve::BaseField pedersen_hash_refactor::hash_multiple(const std::vector& inputs, + const size_t hash_index, + const generator_data* const generator_context) { - const auto length_generator = pedersen_commitment_refactor::generator_info_temp::get_length_generator(); - const auto base_points = - pedersen_commitment_refactor::generator_info_temp::get_generators(inputs.size(), hash_index, domain_separator); + const auto generators = generator_context->conditional_extend(inputs.size() + hash_index); - grumpkin::g1::element result = length_generator * grumpkin::fr(inputs.size()); + Element result = get_length_generator() * Fr(inputs.size()); for (size_t i = 0; i < inputs.size(); ++i) { - result += base_points[i] * grumpkin::fr(static_cast(inputs[i])); + result += generators.get(i, hash_index) * Fr(static_cast(inputs[i])); } + result = result.normalize(); return result.x; } -} // namespace crypto::pedersen_hash \ No newline at end of file +template +typename Curve::BaseField pedersen_hash_refactor::hash(const std::vector& inputs, + size_t hash_index, + const generator_data* const generator_context) +{ + return hash_multiple(inputs, hash_index, generator_context); +} + +template class pedersen_hash_refactor; +} // namespace crypto \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp index 2f3bb99a824..646e52abfa4 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp @@ -5,19 +5,71 @@ #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include -namespace crypto::pedersen_hash_refactor { +namespace crypto { -grumpkin::fq hash_multiple( - const std::vector& inputs, - size_t hash_index = 0, - const std::string& domain_separator = pedersen_commitment_refactor::generator_info_temp::DEFAULT_DOMAIN_SEPARATOR); +/** + * @brief Performs pedersen hashes! + * + * To hash to a size-n list of field elements `x`, we return the X-coordinate of: + * + * Hash(x) = n.[h] + Commit(x) + * + * Where `g` is a list of generator points defined by `generator_data` + * And `h` is a unique generator whose domain separator is the string `pedersen_hash_length`. + * + * The addition of `n.[h]` into the hash is to prevent length-extension attacks. + * It also ensures that the hash output is never the point at infinity. + * + * It is neccessary that all generator points are linearly independent of one another, + * so that finding collisions is equivalent to solving the discrete logarithm problem. + * This is ensured via the generator derivation algorithm in `generator_data` + */ +template class pedersen_hash_refactor { + public: + using AffineElement = typename Curve::AffineElement; + using Element = typename Curve::Element; + using Fr = typename Curve::ScalarField; + using Fq = typename Curve::BaseField; + using Group = typename Curve::Group; + using generator_data = typename crypto::generator_data; -inline grumpkin::fq hash( - const std::vector& inputs, - size_t hash_index = 0, - const std::string& domain_separator = pedersen_commitment_refactor::generator_info_temp::DEFAULT_DOMAIN_SEPARATOR) -{ - return hash_multiple(inputs, hash_index, domain_separator); -} + /** + * @brief lhs_generator is an alias for the first element in `default_generators`. + * i.e. the 1st generator point in a size-2 pedersen hash + * + * @details Short story: don't make global static member variables publicly accessible. + * Ordering of global static variable initialization is not defined. + * Consider a scenario where this class has `inline static const AffineElement lhs_generator;` + * If another static variable's init function accesses `pedersen_hash_refactor::lhs_generator`, + * there is a chance that `lhs_generator` is not yet initialized due to undefined init order. + * This creates merry havoc due to assertions triggering during runtime initialization of global statics. + * So...don't do that. Wrap your statics. + */ + inline static AffineElement get_lhs_generator() { return generator_data::get_default_generators()->get(0); } + /** + * @brief rhs_generator is an alias for the second element in `default_generators`. + * i.e. the 2nd generator point in a size-2 pedersen hash + */ + inline static AffineElement get_rhs_generator() { return generator_data::get_default_generators()->get(1); } + /** + * @brief length_generator is used to ensure pedersen hash is not vulnerable to length-exstension attacks + */ + inline static AffineElement get_length_generator() + { + static const AffineElement length_generator = Group::get_secure_generator_from_index(0, "pedersen_hash_length"); + return length_generator; + } -} // namespace crypto::pedersen_hash_refactor + // TODO(@suyash67) as part of refactor project, can we remove this and replace with `hash` + // (i.e. simplify the name as we no longer have a need for `hash_single`) + static Fq hash_multiple(const std::vector& inputs, + size_t hash_index = 0, + const generator_data* generator_context = nullptr); + + static Fq hash(const std::vector& inputs, + size_t hash_index = 0, + const generator_data* generator_context = nullptr); +}; + +extern template class pedersen_hash_refactor; +} // namespace crypto diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp index fdb0348b877..167d0fec0c7 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp @@ -17,8 +17,4 @@ namespace { // ASSERT(generator_index < max_num_generators); // return generators[generator_index]; // } -g1::affine_element get_generator(const size_t generator_index) -{ - return g1::get_secure_generator_from_index(generator_index, "grumpkin_default_generator"); -} } // namespace grumpkin \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp index fc799acce55..ddcc276b196 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp @@ -30,8 +30,6 @@ struct GrumpkinG1Params { }; typedef barretenberg::group g1; -g1::affine_element get_generator(const size_t generator_index); - }; // namespace grumpkin namespace curve { diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp index 3d958a8d729..b2f5fa4f782 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp @@ -13,8 +13,4 @@ the maximum possible, as the y coordinate in that case is determined by the x-co // ASSERT(generator_index < max_num_generators); // return generators[generator_index]; // } -g1::affine_element get_generator(const size_t generator_index) -{ - return g1::get_secure_generator_from_index(generator_index, "secp256k1_default_generator"); -} } // namespace secp256k1 \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp index 9761389b33c..2a0aa0838ef 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp @@ -120,7 +120,6 @@ struct Secp256k1G1Params { typedef barretenberg:: group, barretenberg::field, Secp256k1G1Params> g1; -g1::affine_element get_generator(const size_t generator_index); } // namespace secp256k1 namespace curve { diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp index c3057b7d476..46875462194 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp @@ -4,8 +4,4 @@ namespace secp256r1 { /* In case where prime bit length is 256, the method produces a generator, but only with one less bit of randomness than the maximum possible, as the y coordinate in that case is determined by the x-coordinate. */ -g1::affine_element get_generator(const size_t generator_index) -{ - return g1::get_secure_generator_from_index(generator_index, "secp256r1_default_generator"); -} } // namespace secp256r1 \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp index ef6b431ab48..7f2acf595e6 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp @@ -107,7 +107,6 @@ struct Secp256r1G1Params { typedef barretenberg:: group, barretenberg::field, Secp256r1G1Params> g1; -g1::affine_element get_generator(const size_t generator_index); } // namespace secp256r1 namespace curve { diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp index 46943bcc737..7cad218cf13 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp @@ -93,9 +93,9 @@ template */ - static std::vector derive_generators_secure(const std::vector& domain_separator, - const size_t num_generators, - const size_t starting_index = 0) + inline static std::vector derive_generators_secure(const std::vector& domain_separator, + const size_t num_generators, + const size_t starting_index = 0) { std::vector result; std::array domain_hash = sha256::sha256(domain_separator); @@ -117,7 +117,8 @@ template domain_hash = sha256::sha256(domain_separator); std::vector generator_preimage; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp index a9d3f83d614..b440b2085f8 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp @@ -13,7 +13,7 @@ template class CircuitBuilderBase { public: using FF = typename Arithmetization::FF; using EmbeddedCurve = - std::conditional_t, barretenberg::g1, grumpkin::g1>; + std::conditional_t, curve::BN254, curve::Grumpkin>; static constexpr size_t NUM_WIRES = Arithmetization::NUM_WIRES; // Keeping NUM_WIRES, at least temporarily, for backward compatibility diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp index f0acddfd040..466249c4029 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp @@ -3431,8 +3431,8 @@ template inline FF UltraCircuitBuilder_::compute_elliptic_double_identity( FF q_elliptic_double_value, FF w_1_value, FF w_2_value, FF w_3_value, FF w_4_value, FF alpha_base, FF alpha) const { - constexpr FF curve_b = CircuitBuilderBase>::EmbeddedCurve::curve_b; - static_assert(CircuitBuilderBase>::EmbeddedCurve::curve_a == 0); + constexpr FF curve_b = CircuitBuilderBase>::EmbeddedCurve::Group::curve_b; + static_assert(CircuitBuilderBase>::EmbeddedCurve::Group::curve_a == 0); const auto x1 = w_1_value; const auto y1 = w_4_value; const auto x3 = w_2_value; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp index 98a6b0c60b7..f58e10ff50a 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp @@ -2,12 +2,22 @@ #include "./fixed_base.hpp" #include "barretenberg/common/constexpr_utils.hpp" +#include "barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp" #include "barretenberg/numeric/bitop/pow.hpp" #include "barretenberg/numeric/bitop/rotate.hpp" #include "barretenberg/numeric/bitop/sparse_form.hpp" - namespace plookup::fixed_base { +/** + * @brief Given a base_point [P] and an offset_generator [G], compute a lookup table of MAX_TABLE_SIZE that contains the + * following terms: + * + * { [G] + 0.[P] , [G] + 1.[P], ..., [G] + (MAX_TABLE_SIZE - 1).[P] } + * + * @param base_point + * @param offset_generator + * @return table::single_lookup_table + */ table::single_lookup_table table::generate_single_lookup_table(const affine_element& base_point, const affine_element& offset_generator) { @@ -26,6 +36,18 @@ table::single_lookup_table table::generate_single_lookup_table(const affine_elem return table; } +/** + * @brief For a given base point [P], compute the lookup tables required to traverse a `num_bits` sized lookup + * + * i.e. call `generate_single_lookup_table` for the following base points: + * + * { [P], [P] * (1 << BITS_PER_TABLE), [P] * (1 << BITS_PER_TABLE * 2), ..., [P] * (1 << BITS_PER_TABLE * (NUM_TABLES - + * 1)) } + * + * @tparam num_bits + * @param input + * @return table::fixed_base_scalar_mul_tables + */ template table::fixed_base_scalar_mul_tables table::generate_tables(const affine_element& input) { constexpr size_t NUM_TABLES = get_num_tables_per_multi_table(); @@ -47,6 +69,18 @@ template table::fixed_base_scalar_mul_tables table::generate_t return result; } +/** + * @brief For a fixed-base lookup of size `num_table_bits` and an input base point `input`, + * return the total contrbution in the scalar multiplication output from the offset generators in the lookup + * tables. + * + * @note We need the base point as an input parameter because we derive the offset generator using our hash-to-curve + * algorithm, where the base point is used as the domain separator. Ensures generator points cannot collide with base + * points w/o solving the dlog problem + * @tparam num_table_bits + * @param input + * @return grumpkin::g1::affine_element + */ template grumpkin::g1::affine_element table::generate_generator_offset(const grumpkin::g1::affine_element& input) { @@ -61,24 +95,46 @@ grumpkin::g1::affine_element table::generate_generator_offset(const grumpkin::g1 } return acc; } + +/** + * @brief Given a point, do we have a precomputed lookup table for this point? + * + * @param input + * @return true + * @return false + */ bool table::lookup_table_exists_for_point(const grumpkin::g1::affine_element& input) { - return (input == crypto::pedersen_hash::generator_info::get_lhs_generator() || - input == crypto::pedersen_hash::generator_info::get_rhs_generator()); + return (input == native_pedersen::get_lhs_generator() || input == native_pedersen::get_rhs_generator()); } +/** + * @brief Given a point, return (if it exists) the 2 MultiTableId's that correspond to the LO_SCALAR, HI_SCALAR + * MultiTables + * + * @param input + * @return std::optional> + */ std::optional> table::get_lookup_table_ids_for_point( const grumpkin::g1::affine_element& input) { - if (input == crypto::pedersen_hash::generator_info::get_lhs_generator()) { + if (input == native_pedersen::get_lhs_generator()) { return { { FIXED_BASE_LEFT_LO, FIXED_BASE_LEFT_HI } }; } - if (input == crypto::pedersen_hash::generator_info::get_lhs_generator()) { + if (input == native_pedersen::get_rhs_generator()) { return { { FIXED_BASE_RIGHT_LO, FIXED_BASE_RIGHT_HI } }; } return {}; } +/** + * @brief Given a table id, return the offset generator term that will be present in the final scalar mul output. + * + * Return value is std::optional in case the table_id is not a fixed-base table. + * + * @param table_id + * @return std::optional + */ std::optional table::get_generator_offset_for_table_id(const MultiTableId table_id) { if (table_id == FIXED_BASE_LEFT_LO) { @@ -95,6 +151,37 @@ std::optional table::get_generator_offset_for_tabl } return {}; } + +using function_ptr = std::array (*)(const std::array); +using function_ptr_table = + std::array, table::NUM_FIXED_BASE_MULTI_TABLES>; +/** + * @brief create a compile-time static 2D array of all our required `get_basic_fixed_base_table_values` function + * pointers, so that we can specify the function pointer required for this method call using runtime variables + * `multitable_index`, `table_index`. (downstream code becomes a lot simpler if `table_index` is not compile time, + * particularly the init code in `plookup_tables.cpp`) + * @return constexpr function_ptr_table + */ +constexpr function_ptr_table make_function_pointer_table() +{ + function_ptr_table table; + barretenberg::constexpr_for<0, table::NUM_FIXED_BASE_MULTI_TABLES, 1>([&]() { + barretenberg::constexpr_for<0, table::MAX_NUM_TABLES_IN_MULTITABLE, 1>( + [&]() { table[i][j] = &table::get_basic_fixed_base_table_values; }); + }); + return table; +}; + +/** + * @brief Generate a single fixed-base-scalar-mul plookup table + * + * @tparam multitable_index , which of our 4 multitables is this basic table a part of? + * @param id the BasicTableId + * @param basic_table_index plookup table index + * @param table_index This index describes which bit-slice the basic table corresponds to. i.e. table_index = 0 maps to + * the least significant bit slice + * @return BasicTable + */ template BasicTable table::generate_basic_fixed_base_table(BasicTableId id, size_t basic_table_index, size_t table_index) { @@ -122,13 +209,9 @@ BasicTable table::generate_basic_fixed_base_table(BasicTableId id, size_t basic_ } table.get_values_from_key = nullptr; - // this needs to be a compile-time loop so we can convert `table_index, multitable_index` into a template parameter. - // prevents us having to make `table_index` a template parameter of this method, which simplifies upstream code - barretenberg::constexpr_for<0, MAX_NUM_TABLES_IN_MULTITABLE, 1>([&]() { - if (i == table_index) { - table.get_values_from_key = &get_basic_fixed_base_table_values; - } - }); + constexpr function_ptr_table get_values_from_key_table = make_function_pointer_table(); + table.get_values_from_key = get_values_from_key_table[multitable_index][table_index]; + ASSERT(table.get_values_from_key != nullptr); table.column_1_step_size = table.size; table.column_2_step_size = 0; @@ -137,32 +220,37 @@ BasicTable table::generate_basic_fixed_base_table(BasicTableId id, size_t basic_ return table; } +/** + * @brief Generate a multi-table that describes the lookups required to cover a fixed-base-scalar-mul of `num_bits` + * + * @tparam multitable_index , which one of our 4 multitables are we generating? + * @tparam num_bits , this will be either `BITS_PER_LO_SCALAR` or `BITS_PER_HI_SCALAR` + * @param id + * @return MultiTable + */ template MultiTable table::get_fixed_base_table(const MultiTableId id) { + static_assert(num_bits == BITS_PER_LO_SCALAR || num_bits == BITS_PER_HI_SCALAR); constexpr size_t NUM_TABLES = get_num_tables_per_multi_table(); + constexpr std::array basic_table_ids{ + FIXED_BASE_0_0, + FIXED_BASE_1_0, + FIXED_BASE_2_0, + FIXED_BASE_3_0, + }; + constexpr function_ptr_table get_values_from_key_table = make_function_pointer_table(); MultiTable table(MAX_TABLE_SIZE, 0, 0, NUM_TABLES); - table.id = id; table.get_table_values.resize(NUM_TABLES); table.lookup_ids.resize(NUM_TABLES); - - barretenberg::constexpr_for<0, NUM_TABLES, 1>([&]() { + for (size_t i = 0; i < NUM_TABLES; ++i) { table.slice_sizes.emplace_back(MAX_TABLE_SIZE); - table.get_table_values[i] = &get_basic_fixed_base_table_values; - size_t idx = i; - if constexpr (multitable_index == 0) { - idx += static_cast(FIXED_BASE_0_0); - } else if constexpr (multitable_index == 1) { - idx += static_cast(FIXED_BASE_1_0); - } else if constexpr (multitable_index == 2) { - idx += static_cast(FIXED_BASE_2_0); - } else if constexpr (multitable_index == 3) { - idx += static_cast(FIXED_BASE_3_0); - } + table.get_table_values[i] = get_values_from_key_table[multitable_index][i]; static_assert(multitable_index < NUM_FIXED_BASE_MULTI_TABLES); + size_t idx = i + static_cast(basic_table_ids[multitable_index]); table.lookup_ids[i] = static_cast(idx); - }); + } return table; } diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.hpp index 3a3c3031782..010f222074d 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.hpp @@ -2,19 +2,23 @@ #include "../types.hpp" #include "./fixed_base_params.hpp" -#include "barretenberg/crypto/pedersen_hash/pedersen.hpp" +#include "barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" namespace plookup::fixed_base { +/** + * @brief Generates plookup tables required to perform fixed-base scalar multiplication over a fixed number of points. + * + */ class table : public FixedBaseParams { public: using affine_element = grumpkin::g1::affine_element; using element = grumpkin::g1::element; - using single_lookup_table = std::vector; using fixed_base_scalar_mul_tables = std::vector; using all_multi_tables = std::array; + using native_pedersen = crypto::pedersen_hash_refactor; static inline single_lookup_table generate_single_lookup_table(const affine_element& base_point, const affine_element& offset_generator); @@ -23,10 +27,19 @@ class table : public FixedBaseParams { template static affine_element generate_generator_offset(const affine_element& input); static constexpr uint256_t MAX_LO_SCALAR = uint256_t(1) << BITS_PER_LO_SCALAR; - inline static const affine_element lhs_base_point_lo = crypto::pedersen_hash::generator_info::get_lhs_generator(); + // We split each scalar mulitplier into BITS_PER_LO_SCALAR, BITS_PER_HI_SCALAR chunks and perform 2 scalar muls of + // size BITS_PER_LO_SCALAR, BITS_PER_HI_SCALAR (see fixed_base_params.hpp for more details) + // i.e. we treat 1 scalar mul as two independent scalar muls over (roughly) half-width input scalars. + // The base_point members describe the fixed-base points that correspond to the two independent scalar muls, + // for our two supported points + inline static const affine_element lhs_base_point_lo = native_pedersen::get_lhs_generator(); inline static const affine_element lhs_base_point_hi = element(lhs_base_point_lo) * MAX_LO_SCALAR; - inline static const affine_element rhs_base_point_lo = crypto::pedersen_hash::generator_info::get_rhs_generator(); + inline static const affine_element rhs_base_point_lo = native_pedersen::get_rhs_generator(); inline static const affine_element rhs_base_point_hi = element(rhs_base_point_lo) * MAX_LO_SCALAR; + + // fixed_base_tables = lookup tables of precomputed base points required for our lookup arguments. + // N.B. these "tables" are not plookup tables, just regular ol' software lookup tables. + // Used to build the proper plookup table and in the `BasicTable::get_values_from_key` method inline static const all_multi_tables fixed_base_tables = { table::generate_tables(lhs_base_point_lo), table::generate_tables(lhs_base_point_hi), @@ -34,6 +47,21 @@ class table : public FixedBaseParams { table::generate_tables(rhs_base_point_hi), }; + /** + * @brief offset generators! + * + * We add a unique "offset generator" into each lookup table to ensure that we never trigger + * incomplete addition formulae for short Weierstrass curves. + * The offset generators are linearly independent from the fixed-base points we're multiplying, ensuring that a + * collision is as likely as solving the discrete logarithm problem. + * For example, imagine a 2-bit lookup table of a point [P]. The table would normally contain { + * 0.[P], 1.[P], 2.[P], 3.[P]}. But, we dont want to have to handle points at infinity and we also don't want to + * deal with windowed-non-adjacent-form complexities. Instead, we derive offset generator [G] and make the table + * equal to { [G] + 0.[P], [G] + 1.[P], [G] + 2.[P], [G] + 3.[P]}. Each table uses a unique offset generator to + * prevent collisions. + * The final scalar multiplication output will have a precisely-known contribution from the offset generators, + * which can then be subtracted off with a single point subtraction. + **/ inline static const std::array fixed_base_table_offset_generators = { table::generate_generator_offset(lhs_base_point_lo), diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp index f5ecfa32cde..b70233de735 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp @@ -1,32 +1,70 @@ #pragma once +#include #include #include namespace plookup { +/** + * @brief Parameters definitions for our fixed-base-scalar-multiplication lookup tables + * + */ struct FixedBaseParams { static constexpr size_t BITS_PER_TABLE = 9; static constexpr size_t BITS_ON_CURVE = 254; + + // We split 1 254-bit scalar mul into two scalar muls of size BITS_PER_LO_SCALAR, BITS_PER_HI_SCALAR. + // This enables us to efficiently decompose our input scalar multiplier into two chunks of a known size. + // (i.e. we get free BITS_PER_LO_SCALAR, BITS_PER_HI_SCALAR range checks as part of the lookup table subroutine) + // This in turn allows us to perform a primality test more efficiently. + // i.e. check that input scalar < prime modulus when evaluated over the integers + // (the primality check requires us to split the input into high / low bit chunks so getting this for free as part + // of the lookup algorithm is nice!) static constexpr size_t BITS_PER_LO_SCALAR = 128; static constexpr size_t BITS_PER_HI_SCALAR = BITS_ON_CURVE - BITS_PER_LO_SCALAR; + // max table size because the last lookup table might be smaller (BITS_PER_TABLE does not neatly divide + // BITS_PER_LO_SCALAR) static constexpr size_t MAX_TABLE_SIZE = (1UL) << BITS_PER_TABLE; + // how many BITS_PER_TABLE lookup tables do we need to traverse BITS_PER_LO_SCALAR-amount of bits? + // (we implicitly assume BITS_PER_LO_SCALAR > BITS_PER_HI_SCALAR) static constexpr size_t MAX_NUM_TABLES_IN_MULTITABLE = (BITS_PER_LO_SCALAR / BITS_PER_TABLE) + (BITS_PER_LO_SCALAR % BITS_PER_TABLE == 0 ? 0 : 1); - static constexpr size_t NUM_FIXED_BASE_MULTI_TABLES = 4; + static constexpr size_t NUM_POINTS = 2; + // how many multitables are we creating? It's 4 because we want enough lookup tables to cover two field elements, + // two field elements = 2 scalar muls = 4 scalar mul hi/lo slices = 4 multitables + static constexpr size_t NUM_FIXED_BASE_MULTI_TABLES = NUM_POINTS * 2; static constexpr size_t NUM_TABLES_PER_LO_MULTITABLE = (BITS_PER_LO_SCALAR / BITS_PER_TABLE) + ((BITS_PER_LO_SCALAR % BITS_PER_TABLE == 0) ? 0 : 1); static constexpr size_t NUM_TABLES_PER_HI_MULTITABLE = (BITS_PER_LO_SCALAR / BITS_PER_TABLE) + ((BITS_PER_LO_SCALAR % BITS_PER_TABLE == 0) ? 0 : 1); + // how many lookups are required to perform a scalar mul of a field element with a base point? static constexpr size_t NUM_BASIC_TABLES_PER_BASE_POINT = (NUM_TABLES_PER_LO_MULTITABLE + NUM_TABLES_PER_HI_MULTITABLE); - static constexpr size_t NUM_FIXED_BASE_BASIC_TABLES = NUM_BASIC_TABLES_PER_BASE_POINT * 2; + // how many basic lookup tables are we creating in total to support fixed-base-scalar-muls over two precomputed base + // points. + static constexpr size_t NUM_FIXED_BASE_BASIC_TABLES = NUM_BASIC_TABLES_PER_BASE_POINT * NUM_POINTS; + /** + * @brief For a scalar multiplication table that covers input scalars up to `(1 << num_bits) - 1`, + * how many individual lookup tables of max size BITS_PER_TABLE do we need? + * (e.g. if BITS_PER_TABLE = 9, for `num_bits = 126` it's 14. For `num_bits = 128` it's 15) + * @tparam num_bits + * @return constexpr size_t + */ template inline static constexpr size_t get_num_tables_per_multi_table() noexcept { return (num_bits / BITS_PER_TABLE) + ((num_bits % BITS_PER_TABLE == 0) ? 0 : 1); } + /** + * @brief For a given multitable index, how many scalar mul bits are we traversing with our multitable? + * + * @param multitable_index Ranges from 0 to NUM_FIXED_BASE_MULTI_TABLES - 1 + * @return constexpr size_t + */ static constexpr size_t get_num_bits_of_multi_table(const size_t multitable_index) { + ASSERT(multitable_index < NUM_FIXED_BASE_MULTI_TABLES); + // This...is very hacky. const bool is_lo_multi_table = (multitable_index & 1) == 0; return is_lo_multi_table ? BITS_PER_LO_SCALAR : BITS_PER_HI_SCALAR; } diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp new file mode 100644 index 00000000000..2598d3849cb --- /dev/null +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp @@ -0,0 +1,54 @@ +#include "../../primitives/circuit_builders/circuit_builders.hpp" +#include "./pedersen_refactor.hpp" +#include "barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp" +#include "barretenberg/numeric/random/engine.hpp" +#include + +#define STDLIB_TYPE_ALIASES using Composer = TypeParam; + +namespace stdlib_pedersen_tests { +using namespace barretenberg; +using namespace proof_system::plonk; + +namespace { +auto& engine = numeric::random::get_debug_engine(); +} + +template class PedersenTest : public ::testing::Test { + public: + static void SetUpTestSuite(){ + + }; +}; + +using CircuitTypes = ::testing:: + Types; +TYPED_TEST_SUITE(PedersenTest, CircuitTypes); + +TYPED_TEST(PedersenTest, TestHash) +{ + STDLIB_TYPE_ALIASES; + using field_ct = stdlib::field_t; + using witness_ct = stdlib::witness_t; + auto composer = Composer(); + + const size_t num_inputs = 10; + + std::vector inputs; + std::vector inputs_native; + + for (size_t i = 0; i < num_inputs; ++i) { + const auto element = fr::random_element(&engine); + inputs_native.emplace_back(element); + inputs.emplace_back(field_ct(witness_ct(&composer, element))); + } + + auto result = stdlib::pedersen_hash_refactor::hash(inputs); + auto expected = crypto::pedersen_hash_refactor::hash(inputs_native); + + EXPECT_EQ(result.get_value(), expected); + + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, true); +} +} // namespace stdlib_pedersen_tests \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp index 661406b5028..c5412ab19d1 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp @@ -16,23 +16,32 @@ field_t pedersen_hash_refactor::hash_multiple(const std::vector& using cycle_group = cycle_group; using cycle_scalar = typename cycle_group::cycle_scalar; - using affine_element = typename cycle_group::G1::affine_element; - std::vector base_points = { - crypto::pedersen_commitment_refactor::generator_info_temp::get_length_generator() - }; - auto _base_points = - crypto::pedersen_commitment_refactor::generator_info_temp::get_generators(hash_index, 0, domain_separator); - std::copy(_base_points.begin(), _base_points.end(), std::back_inserter(base_points)); + using Curve = typename C::EmbeddedCurve; + + auto base_points = grumpkin::g1::get_generators(inputs.size(), hash_index, domain_separator); + std::vector scalars; + std::vector points; scalars.emplace_back(field_t(inputs.size())); - for (const auto& in : inputs) { - scalars.emplace_back(in); + points.emplace_back(crypto::pedersen_hash_refactor::get_length_generator()); + for (size_t i = 0; i < inputs.size(); ++i) { + scalars.emplace_back(inputs[i]); + // constructs constant cycle_group objects (non-witness) + points.emplace_back(base_points[i]); } - auto result = cycle_group::fixed_base_batch_mul(scalars, base_points); + auto result = cycle_group::batch_mul(scalars, points); return result.x; } +template +field_t pedersen_hash_refactor::hash(const std::vector& in, + size_t hash_index, + const std::string& domain_separator, + bool validate_inputs_in_field) +{ + return hash_multiple(in, hash_index, domain_separator, validate_inputs_in_field); +} INSTANTIATE_STDLIB_TYPE(pedersen_hash_refactor); } // namespace proof_system::plonk::stdlib diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp index 1c18c274468..046bdf6b3f2 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp @@ -7,6 +7,12 @@ namespace proof_system::plonk::stdlib { using namespace barretenberg; +/** + * @brief stdlib class that evaluates in-circuit pedersen hashes, consistent with behavior in + * crypto::pedersen_hash_refactor + * + * @tparam ComposerContext + */ template class pedersen_hash_refactor { private: @@ -15,21 +21,17 @@ template class pedersen_hash_refactor { using bool_t = stdlib::bool_t; public: - static field_t hash_multiple( - const std::vector& in, - size_t hash_index = 0, - const std::string& domain_separator = - crypto::pedersen_commitment_refactor::generator_info_temp::DEFAULT_DOMAIN_SEPARATOR, - bool validate_inputs_in_field = true); + // TODO(@suyash67) as part of refactor project, can we remove this and replace with `hash` + // (i.e. simplify the name as we no longer have a need for `hash_single`) + static field_t hash_multiple(const std::vector& in, + size_t hash_index = 0, + const std::string& domain_separator = grumpkin::g1::DEFAULT_DOMAIN_SEPARATOR, + bool validate_inputs_in_field = true); static field_t hash(const std::vector& in, size_t hash_index = 0, - const std::string& domain_separator = - crypto::pedersen_commitment_refactor::generator_info_temp::DEFAULT_DOMAIN_SEPARATOR, - bool validate_inputs_in_field = true) - { - return hash_multiple(in, hash_index, domain_separator, validate_inputs_in_field); - } + const std::string& domain_separator = grumpkin::g1::DEFAULT_DOMAIN_SEPARATOR, + bool validate_inputs_in_field = true); }; EXTERN_STDLIB_TYPE(pedersen_hash_refactor); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index b093783da15..29d8936ad7e 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -10,6 +10,127 @@ #include "barretenberg/stdlib/primitives/plookup/plookup.hpp" namespace proof_system::plonk::stdlib { +template +cycle_group::cycle_group(Composer* _context) + : context(_context) + , x(0) + , y(0) + , _is_infinity(true) + , _is_constant(true) +{} + +/** + * @brief Construct a new cycle group::cycle group object + * + * @param _x + * @param _y + * @param is_infinity + */ +template +cycle_group::cycle_group(field_t _x, field_t _y, bool_t is_infinity) + : context(_x.get_context() == nullptr + ? _y.get_context() == nullptr + ? is_infinity.get_context() == nullptr ? nullptr : is_infinity.get_context() + : _y.get_context() + : _x.get_context()) + , x(_x.normalize()) + , y(_y.normalize()) + , _is_infinity(is_infinity) + , _is_constant(_x.is_constant() && _y.is_constant() && is_infinity.is_constant()) +{} + +/** + * @brief Construct a new cycle group::cycle group object + * + * @details is_infinity is a circuit constant. We EXPLICITLY require that whether this point is infinity/not infinity is + * known at circuit-construction time *and* we know this point is on the curve. These checks are not constrained. + * Use from_witness if these conditions are not met. + * Examples of when conditions are met: point is a derived from a point that is on the curve + not at infinity. + * e.g. output of a doubling operation + * @tparam Composer + * @param _x + * @param _y + * @param is_infinity + */ +template +cycle_group::cycle_group(const FF& _x, const FF& _y, bool is_infinity) + : context(nullptr) + , x(_x) + , y(_y) + , _is_infinity(is_infinity) + , _is_constant(true) +{ + ASSERT(get_value().on_curve()); +} + +/** + * @brief Construct a cycle_group object out of an AffineElement object + * + * @note This produces a circuit-constant object i.e. known at compile-time, no constraints. + * If `_in` is not fixed for a given circuit, use `from_witness` instead + * + * @tparam Composer + * @param _in + */ +template +cycle_group::cycle_group(const AffineElement& _in) + : context(nullptr) + , x(_in.x) + , y(_in.y) + , _is_infinity(_in.is_point_at_infinity()) + , _is_constant(true) +{} + +/** + * @brief Converts an AffineElement into a circuit witness. + * + * @details Somewhat expensive as we do an on-curve check and `_is_infiity` is a witness and not a constant. + * If an element is being converted where it is known the element is on the curve and/or cannot be point at + * infinity, it is best to use other methods (e.g. direct conversion of field_t coordinates) + * + * @tparam Composer + * @param _context + * @param _in + * @return cycle_group + */ +template +cycle_group cycle_group::from_witness(Composer* _context, const AffineElement& _in) +{ + cycle_group result(_context); + result.x = field_t(witness_t(_context, _in.x)); + result.y = field_t(witness_t(_context, _in.y)); + result._is_infinity = bool_t(witness_t(_context, _in.is_point_at_infinity())); + result._is_constant = false; + result.validate_is_on_curve(); + return result; +} + +/** + * @brief Converts a native AffineElement into a witness, but constrains the witness values to be known constants. + * + * @details When performing group operations where one operand is a witness and one is a constant, + * it can be more efficient to convert the constant element into a witness. This is because we have custom gates + * that evaluate additions in one constraint, but only if both operands are witnesses. + * + * @tparam Composer + * @param _context + * @param _in + * @return cycle_group + */ +template +cycle_group cycle_group::from_constant_witness(Composer* _context, const AffineElement& _in) +{ + cycle_group result(_context); + result.x = field_t(witness_t(_context, _in.x)); + result.y = field_t(witness_t(_context, _in.y)); + result.x.assert_equal(_in.x); + result.y.assert_equal(_in.y); + // point at infinity is circuit constant + result._is_infinity = _in.is_point_at_infinity(); + result._is_constant = false; + return result; +} + template Composer* cycle_group::get_context(const cycle_group& other) const { if (get_context() != nullptr) { @@ -21,6 +142,31 @@ template Composer* cycle_group::get_context(const return nullptr; } +template typename cycle_group::AffineElement cycle_group::get_value() const +{ + AffineElement result(x.get_value(), y.get_value()); + if (is_point_at_infinity().get_value()) { + result.self_set_infinity(); + } + return result; +} + +/** + * @brief On-curve check. + * + * @tparam Composer + */ +template void cycle_group::validate_is_on_curve() const +{ + // This class is for short Weierstrass curves only! + static_assert(Group::curve_a == 0); + auto xx = x * x; + auto xxx = xx * x; + auto res = y.madd(y, -xxx - Group::curve_b); + res *= is_point_at_infinity(); + res.assert_is_zero(); +} + /** * @brief Evaluates a doubling * @@ -36,16 +182,18 @@ template cycle_group cycle_group::dbl() auto lambda = (x1 * x1 * 3) / (y1 + y1); auto x3 = lambda * lambda - x1 - x1; auto y3 = lambda * (x1 - x3) - y1; - affine_element p3(x3, y3); + AffineElement p3(x3, y3); if (is_constant()) { - return cycle_group(p3); + return cycle_group(p3); } auto context = get_context(); - cycle_group result = cycle_group::from_witness(context, p3); - result.is_infinity = is_point_at_infinity(); + field_t r_x(witness_t(context, p3.x)); + field_t r_y(witness_t(context, p3.y)); + cycle_group result = cycle_group(r_x, r_y, false); + result.set_point_at_infinity(is_point_at_infinity()); proof_system::ecc_dbl_gate_ dbl_gate{ .x1 = x.get_witness_index(), .y1 = y.get_witness_index(), @@ -74,21 +222,23 @@ cycle_group cycle_group::unconditional_add(const cycle_group const bool lhs_constant = is_constant(); const bool rhs_constant = other.is_constant(); if (lhs_constant && !rhs_constant) { - auto lhs = cycle_group::from_witness(context, get_value()); + auto lhs = cycle_group::from_constant_witness(context, get_value()); return lhs.unconditional_add(other); } if (!lhs_constant && rhs_constant) { - auto rhs = cycle_group::from_witness(context, other.get_value()); + auto rhs = cycle_group::from_constant_witness(context, other.get_value()); return unconditional_add(rhs); } const auto p1 = get_value(); const auto p2 = other.get_value(); - affine_element p3(element(p1) + element(p2)); + AffineElement p3(Element(p1) + Element(p2)); if (lhs_constant && rhs_constant) { return cycle_group(p3); } - cycle_group result = cycle_group::from_witness(context, p3); + field_t r_x(witness_t(context, p3.x)); + field_t r_y(witness_t(context, p3.y)); + cycle_group result(r_x, r_y, false); proof_system::ecc_add_gate_ add_gate{ .x1 = x.get_witness_index(), @@ -123,20 +273,22 @@ cycle_group cycle_group::unconditional_subtract(const cycle_ const bool rhs_constant = other.is_constant(); if (lhs_constant && !rhs_constant) { - auto lhs = cycle_group::from_witness(context, get_value()); + auto lhs = cycle_group::from_constant_witness(context, get_value()); return lhs.unconditional_subtract(other); } if (!lhs_constant && rhs_constant) { - auto rhs = cycle_group::from_witness(context, other.get_value()); + auto rhs = cycle_group::from_constant_witness(context, other.get_value()); return unconditional_subtract(rhs); } auto p1 = get_value(); auto p2 = other.get_value(); - affine_element p3(element(p1) - element(p2)); + AffineElement p3(Element(p1) - Element(p2)); if (lhs_constant && rhs_constant) { return cycle_group(p3); } - cycle_group result = cycle_group::from_witness(context, p3); + field_t r_x(witness_t(context, p3.x)); + field_t r_y(witness_t(context, p3.y)); + cycle_group result(r_x, r_y, false); proof_system::ecc_add_gate_ add_gate{ .x1 = x.get_witness_index(), @@ -219,7 +371,7 @@ template cycle_group cycle_group::operat auto lambda = (y2 - y1) / x_diff; auto x3 = lambda.madd(lambda, -(x2 + x1)); auto y3 = lambda.madd(x1 - x3, -y1); - cycle_group add_result(context, x3, y3, x_coordinates_match); + cycle_group add_result(x3, y3, x_coordinates_match); auto dbl_result = dbl(); @@ -245,7 +397,7 @@ template cycle_group cycle_group::operat // n.b. can likely optimise this bool_t result_is_infinity = infinity_predicate && (!lhs_infinity && !rhs_infinity); result_is_infinity = result_is_infinity || (lhs_infinity && rhs_infinity); - result.is_infinity = result_is_infinity; + result.set_point_at_infinity(result_is_infinity); return result; } @@ -261,7 +413,6 @@ template cycle_group cycle_group::operat */ template cycle_group cycle_group::operator-(const cycle_group& other) const { - Composer* context = get_context(other); const bool_t x_coordinates_match = (x == other.x); const bool_t y_coordinates_match = (y == other.y); @@ -276,7 +427,7 @@ template cycle_group cycle_group::operat auto lambda = (-y2 - y1) / x_diff; auto x3 = lambda.madd(lambda, -(x2 + x1)); auto y3 = lambda.madd(x1 - x3, -y1); - cycle_group add_result(context, x3, y3, x_coordinates_match); + cycle_group add_result(x3, y3, x_coordinates_match); auto dbl_result = dbl(); @@ -302,7 +453,7 @@ template cycle_group cycle_group::operat // n.b. can likely optimise this bool_t result_is_infinity = infinity_predicate && (!lhs_infinity && !rhs_infinity); result_is_infinity = result_is_infinity || (lhs_infinity && rhs_infinity); - result.is_infinity = result_is_infinity; + result.set_point_at_infinity(result_is_infinity); return result; } @@ -319,16 +470,6 @@ template cycle_group& cycle_group::opera return *this; } -template cycle_group::offset_generators::offset_generators(size_t num_points) -{ - auto generator_temp = G1::template derive_generators<300>(); // hmm bad - const size_t num_generators = num_points + 1; - for (size_t i = 0; i < num_generators; ++i) { - generators.emplace_back(generator_temp[i]); - } - - auto init_generator = generators[0]; -} template cycle_group::cycle_scalar::cycle_scalar(const field_t& _lo, const field_t& _hi) : lo(_lo) @@ -385,6 +526,19 @@ typename cycle_group::cycle_scalar::ScalarField cycle_group: return ScalarField(lo_v + (hi_v << LO_BITS)); } +/** + * @brief Construct a new cycle group::straus scalar slice::straus scalar slice object + * + * @details As part of slicing algoirthm, we also perform a primality test on the inut scalar. + * + * TODO(@zac-williamson) make the primality test configurable. + * We may want to validate the input < BN254::Fr OR input < Grumpkin::Fr depending on context! + * + * @tparam Composer + * @param context + * @param scalar + * @param table_bits + */ template cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* context, const cycle_scalar& scalar, @@ -473,24 +627,46 @@ cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* contex std::copy(hi_slices.begin(), hi_slices.end(), std::back_inserter(slices)); } +/** + * @brief Return a bit-slice associated with round `index`. + * + * @details In Straus algorithm, `index` is a known parameter, so no need for expensive lookup tables + * + * @tparam Composer + * @param index + * @return field_t + */ template field_t cycle_group::straus_scalar_slice::read(size_t index) { ASSERT(slices.size() > index); return slices[index]; } +/** + * @brief Construct a new cycle group::straus lookup table::straus lookup table object + * + * @details Constructs a `table_bits` lookup table. + * + * If Composer is not ULTRA, `table_bits = 1` + * If Composer is ULTRA, ROM table is used as lookup table + * + * @tparam Composer + * @param context + * @param base_point + * @param offset_generator + * @param table_bits + */ template cycle_group::straus_lookup_table::straus_lookup_table(Composer* context, const cycle_group& base_point, - const cycle_group& generator_point, + const cycle_group& offset_generator, size_t table_bits) : _table_bits(table_bits) , _context(context) { const size_t table_size = 1UL << table_bits; - point_table.resize(table_size); - point_table[0] = generator_point; + point_table[0] = offset_generator; // We want to support the case where input points are points at infinity. // If base point is at infinity, we want every point in the table to just be `generator_point`. @@ -500,24 +676,22 @@ cycle_group::straus_lookup_table::straus_lookup_table(Composer* contex // x-coordinate collisions in honest case) 3: When assigning to the point table, we conditionally assign either // the output of the point addition (if not at infinity) or the generator point (if at infinity) // Note: if `base_point.is_point_at_infinity()` is constant, these conditional assigns produce zero gate overhead - cycle_group fallback_point(G1::affine_one); + cycle_group fallback_point(Group::affine_one); field_t modded_x = field_t::conditional_assign(base_point.is_point_at_infinity(), fallback_point.x, base_point.x); field_t modded_y = field_t::conditional_assign(base_point.is_point_at_infinity(), fallback_point.y, base_point.y); - cycle_group modded_base_point(context, modded_x, modded_y, false); + cycle_group modded_base_point(modded_x, modded_y, false); for (size_t i = 1; i < table_size; ++i) { - auto add_output = point_table[i - 1].constrained_unconditional_add(modded_base_point); - field_t x = field_t::conditional_assign(base_point.is_point_at_infinity(), generator_point.x, add_output.x); - field_t y = field_t::conditional_assign(base_point.is_point_at_infinity(), generator_point.y, add_output.y); - point_table[i] = cycle_group(context, x, y, false); + field_t x = field_t::conditional_assign(base_point.is_point_at_infinity(), offset_generator.x, add_output.x); + field_t y = field_t::conditional_assign(base_point.is_point_at_infinity(), offset_generator.y, add_output.y); + point_table[i] = cycle_group(x, y, false); } - if constexpr (IS_ULTRA) { rom_id = context->create_ROM_array(table_size); for (size_t i = 0; i < table_size; ++i) { if (point_table[i].is_constant()) { auto element = point_table[i].get_value(); - point_table[i] = cycle_group::from_witness(_context, element); + point_table[i] = cycle_group::from_constant_witness(_context, element); point_table[i].x.assert_equal(element.x); point_table[i].y.assert_equal(element.y); } @@ -531,6 +705,13 @@ cycle_group::straus_lookup_table::straus_lookup_table(Composer* contex } } +/** + * @brief Given an `_index` witness, return `straus_lookup_table[index]` + * + * @tparam Composer + * @param _index + * @return cycle_group + */ template cycle_group cycle_group::straus_lookup_table::read(const field_t& _index) { @@ -543,325 +724,428 @@ cycle_group cycle_group::straus_lookup_table::read(const fie auto output_indices = _context->read_ROM_array_pair(rom_id, index.get_witness_index()); field_t x = field_t::from_witness_index(_context, output_indices[0]); field_t y = field_t::from_witness_index(_context, output_indices[1]); - return cycle_group(_context, x, y, false); + return cycle_group(x, y, false); } - // idx * point_table[1] + (1 - idx) * point_table[0] - // idx (point_table[1] - point_table[0]) + point_table[0] field_t x = _index * (point_table[1].x - point_table[0].x) + point_table[0].x; field_t y = _index * (point_table[1].y - point_table[0].y) + point_table[0].y; - return cycle_group(_context, x, y, false); + return cycle_group(x, y, false); } +/** + * @brief Internal algorithm to perform a variable-base batch mul. + * + * @note Explicit assumption that all base_points are witnesses and not constants! + * Constant points must be filtered out by `batch_mul` before calling this. + * + * @details batch mul performed via the Straus multiscalar multiplication algorithm + * (optimal for MSMs where num points <128-ish). + * If Composer is not ULTRA, number of bits per Straus round = 1, + * which reduces to the basic double-and-add algorithm + * + * @details If `unconditional_add = true`, we use `::unconditional_add` instead of `::constrained_unconditional_add`. + * Use with caution! Only should be `true` if we're doing an ULTRA fixed-base MSM so we know the points cannot + * collide with the offset generators. + * + * @note ULTRA Composer will call `_variable_base_batch_mul_internal` to evaluate fixed-base MSMs over points that do + * not exist in our precomputed plookup tables. This is a comprimise between maximising circuit efficiency and + * minimising the blowup size of our precomputed table polynomials. variable-base mul uses small ROM lookup tables + * which are witness-defined and not part of the plookup protocol. + * @tparam Composer + * @param scalars + * @param base_points + * @param offset_generators + * @param unconditional_add + * @return cycle_group::batch_mul_internal_output + */ template -typename cycle_group::batch_mul_internal_output cycle_group::_batch_mul_internal( - const std::vector& _scalars, - const std::vector& _base_points, +typename cycle_group::batch_mul_internal_output cycle_group::_variable_base_batch_mul_internal( + const std::span scalars, + const std::span base_points, + const std::span offset_generators, const bool unconditional_add) { - ASSERT(_scalars.size() == _base_points.size()); + ASSERT(scalars.size() == base_points.size()); Composer* context = nullptr; - for (auto& scalar : _scalars) { + for (auto& scalar : scalars) { if (scalar.lo.get_context() != nullptr) { context = scalar.get_context(); break; } } - for (auto& point : _base_points) { + for (auto& point : base_points) { if (point.get_context() != nullptr) { context = point.get_context(); break; } } - std::vector scalars; - std::vector base_points; - bool has_constant_component = false; - bool has_non_constant_component = false; - element constant_component = G1::point_at_infinity; - for (size_t i = 0; i < _scalars.size(); ++i) { - if (_scalars[i].is_constant() && _base_points[i].is_constant()) { - has_constant_component = true; - constant_component += _base_points[i].get_value() * _scalars[i].get_value(); - } else { - has_non_constant_component = true; - scalars.emplace_back(_scalars[i]); - base_points.emplace_back(_base_points[i]); - } - } - if (!has_non_constant_component) { - return { cycle_group(constant_component), G1::affine_point_at_infinity }; - } - // core algorithm - // define a `table_bits` size lookup table const size_t num_points = scalars.size(); - auto generators = offset_generators(num_points); std::vector scalar_slices; std::vector point_tables; for (size_t i = 0; i < num_points; ++i) { - scalar_slices.emplace_back(straus_scalar_slice(context, scalars[i], table_bits)); - point_tables.emplace_back( - straus_lookup_table(context, base_points[i], generators.generators[i + 1], table_bits)); + scalar_slices.emplace_back(straus_scalar_slice(context, scalars[i], TABLE_BITS)); + point_tables.emplace_back(straus_lookup_table(context, base_points[i], offset_generators[i + 1], TABLE_BITS)); } - element offset_generator_accumulator = generators.generators[0]; - cycle_group accumulator = generators.generators[0]; + Element offset_generator_accumulator = offset_generators[0]; + cycle_group accumulator = offset_generators[0]; - for (size_t i = 0; i < num_rounds; ++i) { + for (size_t i = 0; i < NUM_ROUNDS; ++i) { if (i != 0) { - for (size_t j = 0; j < table_bits; ++j) { + for (size_t j = 0; j < TABLE_BITS; ++j) { + // offset_generator_accuulator is a regular Element, so dbl() won't add constraints accumulator = accumulator.dbl(); offset_generator_accumulator = offset_generator_accumulator.dbl(); } } for (size_t j = 0; j < num_points; ++j) { - const field_t scalar_slice = scalar_slices[j].read(num_rounds - i - 1); + const field_t scalar_slice = scalar_slices[j].read(NUM_ROUNDS - i - 1); const cycle_group point = point_tables[j].read(scalar_slice); accumulator = unconditional_add ? accumulator.unconditional_add(point) : accumulator.constrained_unconditional_add(point); - offset_generator_accumulator = offset_generator_accumulator + element(generators.generators[j + 1]); + offset_generator_accumulator = offset_generator_accumulator + Element(offset_generators[j + 1]); } } - if (has_constant_component) { - // we subtract off the offset_generator_accumulator, so subtract constant component from the accumulator! - offset_generator_accumulator -= constant_component; - } - // cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); - // // use a full conditional add here in case we end with a point at infinity or a point doubling. - // // e.g. x[P] + x[P], or x[P] + -x[P] - // accumulator = accumulator + offset_generator_delta; - - return { accumulator, affine_element(-offset_generator_accumulator) }; -} - -template -cycle_group cycle_group::variable_base_batch_mul(const std::vector& _scalars, - const std::vector& _base_points) -{ - ASSERT(_scalars.size() == _base_points.size()); - - auto [accumulator, offset_generator_accumulator] = _batch_mul_internal(_scalars, _base_points, false); - // use a full conditional add here in case we end with a point at infinity or a point doubling. - // e.g. x[P] + x[P], or x[P] + -x[P] - if (!offset_generator_accumulator.is_point_at_infinity()) { - return accumulator + (cycle_group(offset_generator_accumulator)); - } - return accumulator; + /** + * offset_generator_accumulator represents the sum of all the offset generator terms present in `accumulator`. + * We don't subtract off yet, as we may be able to combine `offset_generator_accumulator` with other constant terms + * in `batch_mul` before performing the subtraction. + */ + return { accumulator, AffineElement(offset_generator_accumulator) }; } +/** + * @brief Internal algorithm to perform a fixed-base batch mul for ULTRA Composer + * + * @details Uses plookup tables which contain lookups for precomputed multiples of the input base points. + * Means we can avoid all point doublings and reduce one scalar mul to ~29 lookups + 29 ecc addition gates + * + * @tparam Composer + * @param scalars + * @param base_points + * @param off + * @return cycle_group::batch_mul_internal_output + */ template -cycle_group cycle_group::fixed_base_batch_mul( - const std::vector& _scalars, - const std::vector& _base_points) requires SupportsLookupTables +typename cycle_group::batch_mul_internal_output cycle_group::_fixed_base_batch_mul_internal( + const std::span scalars, + const std::span base_points, + [[maybe_unused]] const std::span off) requires SupportsLookupTables { - ASSERT(_scalars.size() == _base_points.size()); + ASSERT(scalars.size() == base_points.size()); Composer* context = nullptr; - for (auto& scalar : _scalars) { + for (auto& scalar : scalars) { if (scalar.get_context() != nullptr) { context = scalar.get_context(); break; } } - std::vector scalars; - std::vector base_points; - bool has_constant_component = false; - bool has_non_constant_component = false; - element constant_component = G1::point_at_infinity; - for (size_t i = 0; i < _scalars.size(); ++i) { - if (_scalars[i].is_constant() && (uint256_t(_scalars[i].get_value()) != 0)) { - has_constant_component = true; - constant_component += _base_points[i] * _scalars[i].get_value(); - } else { - has_non_constant_component = true; - scalars.emplace_back(_scalars[i]); - base_points.emplace_back(_base_points[i]); - } - } - if (!has_non_constant_component) { - return cycle_group(constant_component); - } - const size_t num_points = base_points.size(); using MultiTableId = plookup::MultiTableId; using ColumnIdx = plookup::ColumnIdx; std::vector plookup_table_ids; - std::vector plookup_base_points; + std::vector plookup_base_points; std::vector plookup_scalars; - std::vector leftover_scalars; - std::vector leftover_base_points; for (size_t i = 0; i < num_points; ++i) { std::optional> table_id = plookup::fixed_base::table::get_lookup_table_ids_for_point(base_points[i]); - if (table_id.has_value()) { - plookup_table_ids.emplace_back(table_id.value()[0]); - plookup_table_ids.emplace_back(table_id.value()[1]); - plookup_base_points.emplace_back(base_points[i]); - plookup_base_points.emplace_back(element(base_points[i]) * (uint256_t(1) << 128)); - plookup_scalars.emplace_back(scalars[i].lo); - plookup_scalars.emplace_back(scalars[i].hi); - - } else { - leftover_base_points.emplace_back(base_points[i]); - leftover_scalars.emplace_back(scalars[i]); - } ASSERT(table_id.has_value()); + plookup_table_ids.emplace_back(table_id.value()[0]); + plookup_table_ids.emplace_back(table_id.value()[1]); + plookup_base_points.emplace_back(base_points[i]); + plookup_base_points.emplace_back(Element(base_points[i]) * (uint256_t(1) << cycle_scalar::LO_BITS)); + plookup_scalars.emplace_back(scalars[i].lo); + plookup_scalars.emplace_back(scalars[i].hi); } std::vector lookup_points; - element offset_generator_accumulator = G1::point_at_infinity; + Element offset_generator_accumulator = Group::point_at_infinity; for (size_t i = 0; i < plookup_scalars.size(); ++i) { plookup::ReadData lookup_data = plookup_read::get_lookup_accumulators(plookup_table_ids[i], plookup_scalars[i]); for (size_t j = 0; j < lookup_data[ColumnIdx::C2].size(); ++j) { const auto x = lookup_data[ColumnIdx::C2][j]; const auto y = lookup_data[ColumnIdx::C3][j]; - lookup_points.emplace_back(cycle_group(context, x, y, false)); + lookup_points.emplace_back(cycle_group(x, y, false)); } - std::optional offset_1 = + std::optional offset_1 = plookup::fixed_base::table::get_generator_offset_for_table_id(plookup_table_ids[i]); ASSERT(offset_1.has_value()); offset_generator_accumulator += offset_1.value(); } - cycle_group accumulator; - if (!leftover_scalars.empty()) { - auto [var_accumulator, var_offset_generator] = - _batch_mul_internal(leftover_scalars, leftover_base_points, true); - accumulator = var_accumulator; - // todo explain subtract - if (!var_offset_generator.is_point_at_infinity()) { - offset_generator_accumulator -= var_offset_generator; - } - } - for (size_t i = 0; i < lookup_points.size(); ++i) { - if (i == 0) { - if (leftover_scalars.empty()) { - accumulator = lookup_points[i]; - } else { - accumulator = accumulator.unconditional_add(lookup_points[i]); - } - } else { - accumulator = accumulator.unconditional_add(lookup_points[i]); - } + cycle_group accumulator = lookup_points[0]; + // Perform all point additions sequentially. The Ultra ecc_addition relation costs 1 gate iff additions are chained + // and output point of previous addition = input point of current addition. + // If this condition is not met, the addition relation costs 2 gates. So it's good to do these sequentially! + for (size_t i = 1; i < lookup_points.size(); ++i) { + accumulator = accumulator.unconditional_add(lookup_points[i]); } - - if (has_constant_component) { - // we subtract off the offset_generator_accumulator, so subtract constant component from the accumulator! - offset_generator_accumulator -= constant_component; - cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); - // Assuming independent generators, existence of constant component = cannot hit edge cases - return accumulator.unconditional_add(offset_generator_delta); - } - cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); - accumulator = accumulator + (offset_generator_delta); - return accumulator; + /** + * offset_generator_accumulator represents the sum of all the offset generator terms present in `accumulator`. + * We don't subtract off yet, as we may be able to combine `offset_generator_accumulator` with other constant terms + * in `batch_mul` before performing the subtraction. + */ + return { accumulator, offset_generator_accumulator }; } +/** + * @brief Internal algorithm to perform a fixed-base batch mul for Non-ULTRA Composers + * + * @details Multiples of the base point are precomputed, which avoids us having to add ecc doubling gates. + * More efficient than variable-base version. + * + * @tparam Composer + * @param scalars + * @param base_points + * @param off + * @return cycle_group::batch_mul_internal_output + */ template -cycle_group cycle_group::fixed_base_batch_mul( - const std::vector& _scalars, - const std::vector& _base_points) requires DoesNotSupportLookupTables +typename cycle_group::batch_mul_internal_output cycle_group::_fixed_base_batch_mul_internal( + const std::span scalars, + const std::span base_points, + const std::span offset_generators) requires DoesNotSupportLookupTables { - ASSERT(_scalars.size() == _base_points.size()); - static constexpr size_t FIXED_BASE_TABLE_BITS = 1; + ASSERT(scalars.size() == base_points.size()); + static_assert(TABLE_BITS == 1); Composer* context = nullptr; - for (auto& scalar : _scalars) { + for (auto& scalar : scalars) { if (scalar.get_context() != nullptr) { context = scalar.get_context(); break; } } - std::vector scalars; - std::vector base_points; - bool has_constant_component = false; - bool has_non_constant_component = false; - element constant_component = G1::point_at_infinity; - for (size_t i = 0; i < _scalars.size(); ++i) { - if (_scalars[i].is_constant()) { - has_constant_component = true; - constant_component += _base_points[i] * _scalars[i].get_value(); - } else { - has_non_constant_component = true; - scalars.emplace_back(_scalars[i]); - base_points.emplace_back(_base_points[i]); - } - } - if (!has_non_constant_component) { - return cycle_group(constant_component); - } // core algorithm // define a `table_bits` size lookup table const size_t num_points = scalars.size(); + using straus_round_tables = std::vector; - auto generators = offset_generators(num_points); std::vector scalar_slices; - // std::vector point_tables; - - using straus_round_tables = std::vector; std::vector point_tables(num_points); // creating these point tables should cost 0 constraints if base points are constant for (size_t i = 0; i < num_points; ++i) { - std::vector round_points(num_rounds); - std::vector round_offset_generators(num_rounds); + std::vector round_points(NUM_ROUNDS); + std::vector round_offset_generators(NUM_ROUNDS); round_points[0] = base_points[i]; - round_offset_generators[0] = generators.generators[i + 1]; - for (size_t j = 1; j < num_rounds; ++j) { + round_offset_generators[0] = offset_generators[i + 1]; + for (size_t j = 1; j < NUM_ROUNDS; ++j) { round_points[j] = round_points[j - 1].dbl(); round_offset_generators[j] = round_offset_generators[j - 1].dbl(); } - element::batch_normalize(&round_points[0], num_rounds); - element::batch_normalize(&round_offset_generators[0], num_rounds); - point_tables[i].resize(num_rounds); - for (size_t j = 0; j < num_rounds; ++j) { + Element::batch_normalize(&round_points[0], NUM_ROUNDS); + Element::batch_normalize(&round_offset_generators[0], NUM_ROUNDS); + point_tables[i].resize(NUM_ROUNDS); + for (size_t j = 0; j < NUM_ROUNDS; ++j) { point_tables[i][j] = straus_lookup_table( - context, cycle_group(round_points[j]), cycle_group(round_offset_generators[j]), FIXED_BASE_TABLE_BITS); + context, cycle_group(round_points[j]), cycle_group(round_offset_generators[j]), TABLE_BITS); } + scalar_slices.emplace_back(straus_scalar_slice(context, scalars[i], TABLE_BITS)); } - - for (size_t i = 0; i < num_points; ++i) { - scalar_slices.emplace_back(straus_scalar_slice(context, scalars[i], FIXED_BASE_TABLE_BITS)); + Element offset_generator_accumulator = offset_generators[0]; + cycle_group accumulator = cycle_group(Element(offset_generators[0]) * (uint256_t(1) << (NUM_ROUNDS - 1))); + for (size_t i = 0; i < NUM_ROUNDS; ++i) { + offset_generator_accumulator = (i > 0) ? offset_generator_accumulator.dbl() : offset_generator_accumulator; + for (size_t j = 0; j < num_points; ++j) { + auto& point_table = point_tables[j][i]; + const field_t scalar_slice = scalar_slices[j].read(i); + const cycle_group point = point_table.read(scalar_slice); + accumulator = accumulator.unconditional_add(point); + offset_generator_accumulator = offset_generator_accumulator + Element(offset_generators[j + 1]); + } } - element offset_generator_accumulator = generators.generators[0]; - cycle_group accumulator = cycle_group(element(generators.generators[0]) * (uint256_t(1) << 253)); - for (size_t i = 0; i < num_rounds; ++i) { + /** + * offset_generator_accumulator represents the sum of all the offset generator terms present in `accumulator`. + * We don't subtract off yet, as we may be able to combine `offset_generator_accumulator` with other constant terms + * in `batch_mul` before performing the subtraction. + */ + return { accumulator, offset_generator_accumulator }; +} + +/** + * @brief Multiscalar multiplication algorithm. + * + * @details Uses the Straus MSM algorithm. `batch_mul` splits inputs into three categories: + * 1. point and scalar multiplier are both constant + * 2. point is constant, scalar multiplier is a witness + * 3. point is a witness, scalar multiplier can be witness or constant + * + * For Category 1, the scalar mul can be precomuted without constraints + * For Category 2, we use a fixed-base variant of Straus (with plookup tables if available). + * For Category 3, we use standard Straus. + * The results from all 3 categories are combined and returned as an output point. + * + * @note batch_mul can handle all known cases of trigger incomplete addition formula exceptions and other weirdness: + * 1. some/all of the input points are points at infinity + * 2. some/all of the input scalars are 0 + * 3. some/all input points are equal to each other + * 4. output is the point at infinity + * 5. input vectors are empty + * + * @note offset_generator_data is a pointer to precomputed offset generator list. + * There is a default parameter point that poitns to a list with DEFAULT_NUM_GENERATORS generator points (32) + * If more offset generators are required, they will be derived in-place which can be expensive. + * (num required offset generators is either num input points + 1 or num input points + 2, + * depends on if one or both of _fixed_base_batch_mul_internal, _variable_base_batch_mul_internal are called) + * If you're calling this function repeatedly and you KNOW you need >32 offset generators, + * it's faster to create a `generator_data` object with the required size and pass it in as a parameter. + * @tparam Composer + * @param scalars + * @param base_points + * @param offset_generator_data + * @return cycle_group + */ +template +cycle_group cycle_group::batch_mul(const std::vector& scalars, + const std::vector& base_points, + const generator_data* const offset_generator_data) +{ + ASSERT(scalars.size() == base_points.size()); + + std::vector variable_base_scalars; + std::vector variable_base_points; + std::vector fixed_base_scalars; + std::vector fixed_base_points; - if (i > 0) { - offset_generator_accumulator = offset_generator_accumulator.dbl(); + // When calling `_variable_base_batch_mul_internal`, we can unconditionally add iff all of the input points + // are fixed-base points + // (i.e. we are ULTRA Composer and we are doing fixed-base mul over points not present in our plookup tables) + bool can_unconditional_add = true; + bool has_non_constant_component = false; + Element constant_acc = Group::point_at_infinity; + for (size_t i = 0; i < scalars.size(); ++i) { + bool scalar_constant = scalars[i].is_constant(); + bool point_constant = base_points[i].is_constant(); + if (scalar_constant && point_constant) { + constant_acc += (base_points[i].get_value()) * (scalars[i].get_value()); + } else if (!scalar_constant && point_constant) { + if (base_points[i].get_value().is_point_at_infinity()) { + // oi mate, why are you creating a circuit that multiplies a known point at infinity? + continue; + } + if constexpr (IS_ULTRA) { + if (plookup::fixed_base::table::lookup_table_exists_for_point(base_points[i].get_value())) { + fixed_base_scalars.push_back(scalars[i]); + fixed_base_points.push_back(base_points[i].get_value()); + } else { + // womp womp. We have lookup tables at home. ROM tables. + variable_base_scalars.push_back(scalars[i]); + variable_base_points.push_back(base_points[i]); + } + } else { + fixed_base_scalars.push_back(scalars[i]); + fixed_base_points.push_back(base_points[i].get_value()); + } + has_non_constant_component = true; + } else { + variable_base_scalars.push_back(scalars[i]); + variable_base_points.push_back(base_points[i]); + can_unconditional_add = false; + has_non_constant_component = true; + // variable base } - for (size_t j = 0; j < num_points; ++j) { - auto& point_table = point_tables[j][i]; + } - const field_t scalar_slice = scalar_slices[j].read(i); + // If all inputs are constant, return the computed constant component and call it a day. + if (!has_non_constant_component) { + return cycle_group(constant_acc); + } - const cycle_group point = point_table.read(scalar_slice); - accumulator = accumulator.unconditional_add(point); - offset_generator_accumulator = offset_generator_accumulator + element(generators.generators[j + 1]); + // add the constant component into our offset accumulator + // (we'll subtract `offset_accumulator` from the MSM output i.e. we negate here to counter the future negation) + Element offset_accumulator = -constant_acc; + const bool has_variable_points = !variable_base_points.empty(); + const bool has_fixed_points = !fixed_base_points.empty(); + + // Compute all required offset generators. + const size_t num_offset_generators = + variable_base_points.size() + fixed_base_points.size() + has_variable_points + has_fixed_points; + std::vector offset_generators = + offset_generator_data->conditional_extend(num_offset_generators).generators; + + cycle_group result; + if (has_fixed_points) { + const auto [fixed_accumulator, offset_generator_delta] = + _fixed_base_batch_mul_internal(fixed_base_scalars, fixed_base_points, offset_generators); + offset_accumulator += offset_generator_delta; + result = fixed_accumulator; + } + + if (has_variable_points) { + std::span offset_generators_for_variable_base_batch_mul{ + offset_generators.data() + fixed_base_points.size(), offset_generators.size() - fixed_base_points.size() + }; + const auto [variable_accumulator, offset_generator_delta] = + _variable_base_batch_mul_internal(variable_base_scalars, + variable_base_points, + offset_generators_for_variable_base_batch_mul, + can_unconditional_add); + offset_accumulator += offset_generator_delta; + if (has_fixed_points) { + result = can_unconditional_add ? result.unconditional_add(variable_accumulator) + : result.constrained_unconditional_add(variable_accumulator); + } else { + result = variable_accumulator; } } - if (has_constant_component) { - // we subtract off the offset_generator_accumulator, so subtract constant component from the accumulator! - offset_generator_accumulator -= constant_component; + // Update `result` to remove the offset generator terms, and add in any constant terms from `constant_acc`. + // We have two potential modes here: + // 1. All inputs are fixed-base and we constant_acc is not the point at infinity + // 2. Everything else. + // Case 1 is a special case, as we *know* we cannot hit incomplete addition edge cases, + // under the assumption that all input points are linearly independent of one another. + // Because constant_acc is not the point at infnity we know that at least 1 input scalar was not zero, + // i.e. the output will not be the point at infinity. We also know under case 1, we won't trigger the + // doubling formula either, as every point is lienarly independent of every other point (including offset + // generators). + if (!constant_acc.is_point_at_infinity() && can_unconditional_add) { + result = result.unconditional_add(AffineElement(-offset_accumulator)); + } else { + // For case 2, we must use a full subtraction operation that handles all possible edge cases, as the output + // point may be the point at infinity. + // TODO(@zac-williamson) We can probably optimise this a bit actually. We might hit the point at infinity, + // but an honest prover won't trigger the doubling edge case. + // (doubling edge case implies input points are also the offset generator points, + // which we can assume an honest Prover will not do if we make this case produce unsatisfiable constraints) + // We could do the following: + // 1. If x-coords match, assert y-coords do not match + // 2. If x-coords match, return point at infinity, else return result - offset_accumulator. + // This would be slightly cheaper than operator- as we do not have to evaluate the double edge case. + result = result - AffineElement(offset_accumulator); } - cycle_group offset_generator_delta(affine_element(-offset_generator_accumulator)); - // use a full conditional add here in case we end with a point at infinity or a point doubling. - // e.g. x[P] + x[P], or x[P] + -x[P] - accumulator = accumulator + offset_generator_delta; + return result; +} + +template cycle_group cycle_group::operator*(const cycle_scalar& scalar) const +{ + return batch_mul({ scalar }, { *this }); +} - return accumulator; +template cycle_group& cycle_group::operator*=(const cycle_scalar& scalar) +{ + *this = operator*(scalar); + return *this; +} + +template +cycle_group cycle_group::operator/(const cycle_scalar& /*unused*/) const +{ + throw_or_abort("Implementation under construction..."); } INSTANTIATE_STDLIB_TYPE(cycle_group); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index b72300c3364..a29fcac60be 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -10,14 +10,11 @@ namespace proof_system::plonk::stdlib { -using namespace barretenberg; -using namespace crypto::generators; - template concept SupportsLookupTables = (Composer::CIRCUIT_TYPE == CircuitType::ULTRA); template concept DoesNotSupportLookupTables = (Composer::CIRCUIT_TYPE != CircuitType::ULTRA); /** - * @brief cycle_group represents a group element of the proving system's embedded curve + * @brief cycle_group represents a group Element of the proving system's embedded curve * i.e. a curve with a cofactor 1 defined over a field equal to the circuit's native field Composer::FF * * (todo @zac-williamson) once the pedersen refactor project is finished, this class will supercede @@ -31,136 +28,99 @@ template class cycle_group { using bool_t = bool_t; using witness_t = witness_t; using FF = typename Composer::FF; - using G1 = typename Composer::EmbeddedCurve; - using element = typename G1::element; - using affine_element = typename G1::affine_element; - + using Curve = typename Composer::EmbeddedCurve; + using Group = typename Curve::Group; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using generator_data = crypto::generator_data; + + static constexpr size_t STANDARD_NUM_TABLE_BITS = 1; + static constexpr size_t ULTRA_NUM_TABLE_BITS = 4; static constexpr bool IS_ULTRA = Composer::CIRCUIT_TYPE == CircuitType::ULTRA; - static constexpr size_t table_bits = IS_ULTRA ? 4 : 1; - static constexpr size_t num_bits = FF::modulus.get_msb() + 1; - static constexpr size_t num_rounds = (num_bits + table_bits - 1) / table_bits; + static constexpr size_t TABLE_BITS = IS_ULTRA ? ULTRA_NUM_TABLE_BITS : STANDARD_NUM_TABLE_BITS; + static constexpr size_t NUM_BITS = FF::modulus.get_msb() + 1; + static constexpr size_t NUM_ROUNDS = (NUM_BITS + TABLE_BITS - 1) / TABLE_BITS; + inline static const std::string OFFSET_GENERATOR_DOMAIN_SEPARATOR = "cycle_group_offset_generator"; - Composer* get_context(const cycle_group& other) const; - - cycle_group(Composer* _context = nullptr) - : context(_context) - , x(0) - , y(0) - , is_infinity(true) - , _is_constant(true) - {} - - cycle_group(Composer* _context, field_t _x, field_t _y, bool_t _is_infinity) - : context(_context) - , x(_x.normalize()) - , y(_y.normalize()) - , is_infinity(_is_infinity) - , _is_constant(_x.is_constant() && _y.is_constant() && _is_infinity.is_constant()) - {} - - cycle_group(const FF& _x, const FF& _y, bool _is_infinity) - : context(nullptr) - , x(_x) - , y(_y) - , is_infinity(_is_infinity) - , _is_constant(true) - {} - - cycle_group(const affine_element& _in) - : context(nullptr) - , x(_in.x) - , y(_in.y) - , is_infinity(_in.is_point_at_infinity()) - , _is_constant(true) - {} + private: + inline static const generator_data default_offset_generators = + generator_data(generator_data::DEFAULT_NUM_GENERATORS, OFFSET_GENERATOR_DOMAIN_SEPARATOR); + public: /** - * @brief + * @brief cycle_scalar represents a member of the cycle curve SCALAR FIELD. + * This is NOT the native circuit field type. + * i.e. for a BN254 circuit, cycle_group will be Grumpkin and cycle_scalar will be Grumpkin::ScalarField + * (BN254 native field is BN254::ScalarField == Grumpkin::BaseField) * - * N.B. make sure _in is not the point at infinity! - * (todo: shoul we validate on curve?) - * @param _context - * @param _in - * @return cycle_group + * @details We convert scalar multiplication inputs into cycle_scalars to enable scalar multiplication to be + * *complete* i.e. Grumpkin points multiplied by BN254 scalars does not produce a cyclic group + * as BN254::ScalarField < Grumpkin::ScalarField + * This complexity *should* not leak outside the cycle_group / cycle_scalar implementations, as cycle_scalar + * performs all required conversions if the input scalars are stdlib::field_t elements + * + * @note We opted to create a new class to represent `cycle_scalar` instead of using `bigfield`, + * as `bigfield` is inefficient in this context. All required range checks for `cycle_scalar` can be obtained for + * free from the `batch_mul` algorithm, making the range checks performed by `bigfield` largely redundant. */ - static cycle_group from_witness(Composer* _context, const affine_element& _in) - { - cycle_group result(_context); - result.x = field_t(witness_t(_context, _in.x)); - result.y = field_t(witness_t(_context, _in.y)); - result.is_infinity = false; - result._is_constant = false; - return result; - } - - Composer* get_context() const { return context; } - [[nodiscard]] bool is_constant() const { return _is_constant; } - - affine_element get_value() const - { - affine_element result(x.get_value(), y.get_value()); - if (is_infinity.get_value()) { - result.self_set_infinity(); - } - return result; - } - - bool_t is_point_at_infinity() const { return is_infinity; } - void validate_is_on_curve() const - { - auto xx = x * x; - auto xxx = xx * x; - auto res = y.madd(y, -xxx - G1::curve_b); - res *= is_point_at_infinity(); - res.assert_is_zero(); - } - cycle_group dbl() const; - cycle_group unconditional_add(const cycle_group& other) const; - cycle_group constrained_unconditional_add(const cycle_group& other) const; - cycle_group conditional_add(const cycle_group& other) const; - cycle_group operator+(const cycle_group& other) const; - cycle_group unconditional_subtract(const cycle_group& other) const; - cycle_group constrained_unconditional_subtract(const cycle_group& other) const; - cycle_group operator-(const cycle_group& other) const; - cycle_group& operator+=(const cycle_group& other); - cycle_group& operator-=(const cycle_group& other); - - class offset_generators { - public: - offset_generators(size_t num_points); - // cycle_group get_generator(size_t generator_idx); - // cycle_group get_final_generator_offset(); - std::vector generators; - }; - struct cycle_scalar { - using ScalarField = typename G1::subgroup_field; + using ScalarField = typename Curve::ScalarField; static constexpr size_t LO_BITS = 128; static constexpr size_t HI_BITS = ScalarField::modulus.get_msb() + 1 - LO_BITS; - static cycle_scalar from_witness(Composer* context, const ScalarField& value); - cycle_scalar(const ScalarField& _in); + cycle_scalar(const ScalarField& _in = 0); cycle_scalar(const field_t& _lo, const field_t& _hi); cycle_scalar(const field_t& _in); + static cycle_scalar from_witness(Composer* context, const ScalarField& value); [[nodiscard]] bool is_constant() const; ScalarField get_value() const; + Composer* get_context() const { return lo.get_context() != nullptr ? lo.get_context() : hi.get_context(); } field_t lo; field_t hi; - - Composer* get_context() const { return lo.get_context() != nullptr ? lo.get_context() : hi.get_context(); } }; - class straus_scalar_slice { - public: + + /** + * @brief straus_scalar_slice decomposes an input scalar into `table_bits` bit-slices. + * Used in `batch_mul`, which ses the Straus multiscalar multiplication algorithm. + * + */ + struct straus_scalar_slice { straus_scalar_slice(Composer* context, const cycle_scalar& scalars, size_t table_bits); field_t read(size_t index); size_t _table_bits; std::vector slices; }; - class straus_lookup_table { + + /** + * @brief straus_lookup_table computes a lookup table of size 1 << table_bits + * + * @details for an input base_point [P] and offset_generator point [G], where N = 1 << table_bits, the following is + * computed: + * + * { [G] + 0.[P], [G] + 1.[P], ..., [G] + (N - 1).[P] } + * + * The point [G] is used to ensure that we do not have to handle the point at infinity associated with 0.[P]. + * + * For an HONEST Prover, the probability of [G] and [P] colliding is equivalent to solving the dlog problem. + * This allows us to partially ignore the incomplete addition formula edge-cases for short Weierstrass curves. + * + * When adding group elements in `batch_mul`, we can constrain+assert the x-coordinates of the operand points do not + * match. An honest prover will never trigger the case where x-coordinates match due to the above. Validating + * x-coordinates do not match is much cheaper than evaluating the full complete addition formulae for short + * Weierstrass curves. + * + * @note For the case of fixed-base scalar multipliation, all input points are defined at circuit compile. + * We can ensure that all Provers cannot create point collisions between the base points and offset generators. + * For this restricted case we can skip the x-coordiante collision checks when performing group operations. + * + * @note straus_lookup_table uses UltraPlonk ROM tables if available. If not, we use simple conditional assignment + * constraints and restrict the table size to be 1 bit. + */ + struct straus_lookup_table { public: straus_lookup_table() = default; straus_lookup_table(Composer* context, const cycle_group& base_point, - const cycle_group& generator_point, + const cycle_group& offset_generator, size_t table_bits); cycle_group read(const field_t& index); size_t _table_bits; @@ -169,46 +129,67 @@ template class cycle_group { size_t rom_id = 0; }; + private: + /** + * @brief Stores temporary variables produced by internal multiplication algorithms + * + */ struct batch_mul_internal_output { cycle_group accumulator; - affine_element offset_generator_delta; + AffineElement offset_generator_delta; }; - static batch_mul_internal_output _batch_mul_internal(const std::vector& scalars, - const std::vector& base_points, - bool unconditional_add); - - static cycle_group fixed_base_batch_mul( - const std::vector& _scalars, - const std::vector& _base_points) requires SupportsLookupTables; - - static cycle_group fixed_base_batch_mul( - const std::vector& _scalars, - const std::vector& _base_points) requires DoesNotSupportLookupTables; - - // static cycle_group fixed_base_batch_mul(const std::vector& scalars, - // const std::vector& base_points) - // requires(!cycle_group::IS_ULTRA); - - static cycle_group variable_base_batch_mul(const std::vector& scalars, - const std::vector& base_points); + public: + cycle_group(Composer* _context = nullptr); + cycle_group(field_t _x, field_t _y, bool_t _is_infinity); + cycle_group(const FF& _x, const FF& _y, bool _is_infinity); + cycle_group(const AffineElement& _in); + static cycle_group from_witness(Composer* _context, const AffineElement& _in); + static cycle_group from_constant_witness(Composer* _context, const AffineElement& _in); + Composer* get_context(const cycle_group& other) const; + Composer* get_context() const { return context; } + AffineElement get_value() const; + [[nodiscard]] bool is_constant() const { return _is_constant; } + bool_t is_point_at_infinity() const { return _is_infinity; } + void set_point_at_infinity(const bool_t& is_infinity) { _is_infinity = is_infinity; } + void validate_is_on_curve() const; + cycle_group dbl() const; + cycle_group unconditional_add(const cycle_group& other) const; + cycle_group unconditional_subtract(const cycle_group& other) const; + cycle_group constrained_unconditional_add(const cycle_group& other) const; + cycle_group constrained_unconditional_subtract(const cycle_group& other) const; + cycle_group operator+(const cycle_group& other) const; + cycle_group operator-(const cycle_group& other) const; + cycle_group& operator+=(const cycle_group& other); + cycle_group& operator-=(const cycle_group& other); + static cycle_group batch_mul(const std::vector& scalars, + const std::vector& base_points, + const generator_data* offset_generator_data = &default_offset_generators); + cycle_group operator*(const cycle_scalar& scalar) const; + cycle_group& operator*=(const cycle_scalar& scalar); + cycle_group operator/(const cycle_scalar& scalar) const; Composer* context; field_t x; field_t y; - bool_t is_infinity; + + private: + bool_t _is_infinity; bool _is_constant; + static batch_mul_internal_output _variable_base_batch_mul_internal(std::span scalars, + std::span base_points, + std::span offset_generators, + bool unconditional_add); + + static batch_mul_internal_output _fixed_base_batch_mul_internal( + std::span scalars, + std::span base_points, + std::span offset_generators) requires SupportsLookupTables; + static batch_mul_internal_output _fixed_base_batch_mul_internal( + std::span scalars, + std::span base_points, + std::span offset_generators) requires DoesNotSupportLookupTables; }; -// template -// requires(cycle_group::IS_ULTRA) -// class cycle_group_upper : public cycle_group { -// using cycle_scalar = typename cycle_group::cycle_scalar; -// using affine_element = typename cycle_group::affine_element; - -// static cycle_group fixed_base_batch_mul(const std::vector& _scalars, -// const std::vector& _base_points); -// }; - template inline std::ostream& operator<<(std::ostream& os, cycle_group const& v) { diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index 9b4ea4665c4..b89cadb4378 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -9,9 +9,12 @@ #define STDLIB_TYPE_ALIASES \ using Composer = TypeParam; \ using cycle_group_ct = stdlib::cycle_group; \ - using G1 = typename Composer::EmbeddedCurve; \ - using element = typename G1::element; \ - using affine_element = typename G1::affine_element; + using Curve = typename Composer::EmbeddedCurve; \ + using Element = typename Curve::Element; \ + using AffineElement = typename Curve::AffineElement; \ + using Group = typename Curve::Group; \ + using bool_ct = stdlib::bool_t; \ + using witness_ct = stdlib::witness_t; namespace stdlib_cycle_group_tests { using namespace barretenberg; @@ -20,22 +23,25 @@ using namespace proof_system::plonk; namespace { auto& engine = numeric::random::get_debug_engine(); } +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-local-typedef" template class CycleGroupTest : public ::testing::Test { public: - using G1 = typename Composer::EmbeddedCurve; - using FF = typename G1::subgroup_field; + using Curve = typename Composer::EmbeddedCurve; + using Group = typename Composer::EmbeddedCurve::Group; - using element = typename G1::element; - using affine_element = typename G1::affine_element; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; static constexpr size_t num_generators = 110; - static inline std::array generators{}; + static inline std::array generators{}; static void SetUpTestSuite() { + for (size_t i = 0; i < num_generators; ++i) { - generators[i] = G1::one * FF::random_element(); + generators[i] = Group::one * Curve::ScalarField::random_element(&engine); } }; }; @@ -52,8 +58,8 @@ TYPED_TEST(CycleGroupTest, TestDbl) auto lhs = TestFixture::generators[0]; cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct c = a.dbl(); - affine_element expected(element(lhs).dbl()); - affine_element result = c.get_value(); + AffineElement expected(Element(lhs).dbl()); + AffineElement result = c.get_value(); EXPECT_EQ(result, expected); bool proof_result = composer.check_circuit(); @@ -66,12 +72,12 @@ TYPED_TEST(CycleGroupTest, TestUnconditionalAdd) auto composer = Composer(); auto add = - [&](const affine_element& lhs, const affine_element& rhs, const bool lhs_constant, const bool rhs_constant) { + [&](const AffineElement& lhs, const AffineElement& rhs, const bool lhs_constant, const bool rhs_constant) { cycle_group_ct a = lhs_constant ? cycle_group_ct(lhs) : cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = rhs_constant ? cycle_group_ct(rhs) : cycle_group_ct::from_witness(&composer, rhs); cycle_group_ct c = a.unconditional_add(b); - affine_element expected(element(lhs) + element(rhs)); - affine_element result = c.get_value(); + AffineElement expected(Element(lhs) + Element(rhs)); + AffineElement result = c.get_value(); EXPECT_EQ(result, expected); }; @@ -96,8 +102,8 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalAddSucceed) cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); cycle_group_ct c = a.constrained_unconditional_add(b); - affine_element expected(element(lhs) + element(rhs)); - affine_element result = c.get_value(); + AffineElement expected(Element(lhs) + Element(rhs)); + AffineElement result = c.get_value(); EXPECT_EQ(result, expected); bool proof_result = composer.check_circuit(); @@ -106,8 +112,7 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalAddSucceed) TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalAddFail) { - using Composer = TypeParam; - using cycle_group_ct = stdlib::cycle_group; + STDLIB_TYPE_ALIASES; auto composer = Composer(); auto lhs = TestFixture::generators[0]; @@ -127,23 +132,21 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalAddFail) TYPED_TEST(CycleGroupTest, TestAdd) { STDLIB_TYPE_ALIASES; - using bool_ct = stdlib::bool_t; - using witness_ct = stdlib::witness_t; auto composer = Composer(); auto lhs = TestFixture::generators[0]; auto rhs = -TestFixture::generators[1]; cycle_group_ct point_at_infinity = cycle_group_ct::from_witness(&composer, rhs); - point_at_infinity.is_infinity = bool_ct(witness_ct(&composer, true)); + point_at_infinity.set_point_at_infinity(bool_ct(witness_ct(&composer, true))); // case 1. no edge-cases triggered { cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); cycle_group_ct c = a + b; - affine_element expected(element(lhs) + element(rhs)); - affine_element result = c.get_value(); + AffineElement expected(Element(lhs) + Element(rhs)); + AffineElement result = c.get_value(); EXPECT_EQ(result, expected); } @@ -152,7 +155,7 @@ TYPED_TEST(CycleGroupTest, TestAdd) cycle_group_ct a = point_at_infinity; cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); cycle_group_ct c = a + b; - affine_element result = c.get_value(); + AffineElement result = c.get_value(); EXPECT_EQ(result, rhs); } @@ -161,7 +164,7 @@ TYPED_TEST(CycleGroupTest, TestAdd) cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = point_at_infinity; cycle_group_ct c = a + b; - affine_element result = c.get_value(); + AffineElement result = c.get_value(); EXPECT_EQ(result, lhs); } @@ -170,7 +173,7 @@ TYPED_TEST(CycleGroupTest, TestAdd) cycle_group_ct a = point_at_infinity; cycle_group_ct b = point_at_infinity; cycle_group_ct c = a + b; - EXPECT_TRUE(c.is_infinity.get_value()); + EXPECT_TRUE(c.is_point_at_infinity().get_value()); EXPECT_TRUE(c.get_value().is_point_at_infinity()); } @@ -179,7 +182,7 @@ TYPED_TEST(CycleGroupTest, TestAdd) cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = cycle_group_ct::from_witness(&composer, -lhs); cycle_group_ct c = a + b; - EXPECT_TRUE(c.is_infinity.get_value()); + EXPECT_TRUE(c.is_point_at_infinity().get_value()); EXPECT_TRUE(c.get_value().is_point_at_infinity()); } @@ -188,8 +191,8 @@ TYPED_TEST(CycleGroupTest, TestAdd) cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct c = a + b; - affine_element expected((element(lhs)).dbl()); - affine_element result = c.get_value(); + AffineElement expected((Element(lhs)).dbl()); + AffineElement result = c.get_value(); EXPECT_EQ(result, expected); } @@ -203,12 +206,12 @@ TYPED_TEST(CycleGroupTest, TestUnconditionalSubtract) auto composer = Composer(); auto add = - [&](const affine_element& lhs, const affine_element& rhs, const bool lhs_constant, const bool rhs_constant) { + [&](const AffineElement& lhs, const AffineElement& rhs, const bool lhs_constant, const bool rhs_constant) { cycle_group_ct a = lhs_constant ? cycle_group_ct(lhs) : cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = rhs_constant ? cycle_group_ct(rhs) : cycle_group_ct::from_witness(&composer, rhs); cycle_group_ct c = a.unconditional_subtract(b); - affine_element expected(element(lhs) - element(rhs)); - affine_element result = c.get_value(); + AffineElement expected(Element(lhs) - Element(rhs)); + AffineElement result = c.get_value(); EXPECT_EQ(result, expected); }; @@ -233,8 +236,8 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalSubtractSucceed) cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); cycle_group_ct c = a.constrained_unconditional_subtract(b); - affine_element expected(element(lhs) - element(rhs)); - affine_element result = c.get_value(); + AffineElement expected(Element(lhs) - Element(rhs)); + AffineElement result = c.get_value(); EXPECT_EQ(result, expected); bool proof_result = composer.check_circuit(); @@ -243,8 +246,7 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalSubtractSucceed) TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalSubtractFail) { - using Composer = TypeParam; - using cycle_group_ct = stdlib::cycle_group; + STDLIB_TYPE_ALIASES; auto composer = Composer(); auto lhs = TestFixture::generators[0]; @@ -272,15 +274,15 @@ TYPED_TEST(CycleGroupTest, TestSubtract) auto rhs = -TestFixture::generators[1]; cycle_group_ct point_at_infinity = cycle_group_ct::from_witness(&composer, rhs); - point_at_infinity.is_infinity = bool_ct(witness_ct(&composer, true)); + point_at_infinity.set_point_at_infinity(bool_ct(witness_ct(&composer, true))); // case 1. no edge-cases triggered { cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); cycle_group_ct c = a - b; - affine_element expected(element(lhs) - element(rhs)); - affine_element result = c.get_value(); + AffineElement expected(Element(lhs) - Element(rhs)); + AffineElement result = c.get_value(); EXPECT_EQ(result, expected); } @@ -289,7 +291,7 @@ TYPED_TEST(CycleGroupTest, TestSubtract) cycle_group_ct a = point_at_infinity; cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); cycle_group_ct c = a - b; - affine_element result = c.get_value(); + AffineElement result = c.get_value(); EXPECT_EQ(result, -rhs); } @@ -298,7 +300,7 @@ TYPED_TEST(CycleGroupTest, TestSubtract) cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = point_at_infinity; cycle_group_ct c = a - b; - affine_element result = c.get_value(); + AffineElement result = c.get_value(); EXPECT_EQ(result, lhs); } @@ -307,7 +309,7 @@ TYPED_TEST(CycleGroupTest, TestSubtract) cycle_group_ct a = point_at_infinity; cycle_group_ct b = point_at_infinity; cycle_group_ct c = a - b; - EXPECT_TRUE(c.is_infinity.get_value()); + EXPECT_TRUE(c.is_point_at_infinity().get_value()); EXPECT_TRUE(c.get_value().is_point_at_infinity()); } @@ -316,8 +318,8 @@ TYPED_TEST(CycleGroupTest, TestSubtract) cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = cycle_group_ct::from_witness(&composer, -lhs); cycle_group_ct c = a - b; - affine_element expected((element(lhs)).dbl()); - affine_element result = c.get_value(); + AffineElement expected((Element(lhs)).dbl()); + AffineElement result = c.get_value(); EXPECT_EQ(result, expected); } @@ -326,7 +328,7 @@ TYPED_TEST(CycleGroupTest, TestSubtract) cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct c = a - b; - EXPECT_TRUE(c.is_infinity.get_value()); + EXPECT_TRUE(c.is_point_at_infinity().get_value()); EXPECT_TRUE(c.get_value().is_point_at_infinity()); } @@ -334,24 +336,22 @@ TYPED_TEST(CycleGroupTest, TestSubtract) EXPECT_EQ(proof_result, true); } -TYPED_TEST(CycleGroupTest, TestVariableBaseBatchMul) +TYPED_TEST(CycleGroupTest, TestBatchMul) { STDLIB_TYPE_ALIASES; - using witness_ct = stdlib::witness_t; auto composer = Composer(); const size_t num_muls = 1; - element expected = G1::point_at_infinity; - // case 1, general MSM with inputs that are combinations of constant and witnesses { std::vector points; std::vector scalars; + Element expected = Group::point_at_infinity; for (size_t i = 0; i < num_muls; ++i) { auto element = TestFixture::generators[i]; - typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); + typename Group::subgroup_field scalar = Group::subgroup_field::random_element(&engine); // 1: add entry where point, scalar are witnesses expected += (element * scalar); @@ -373,8 +373,8 @@ TYPED_TEST(CycleGroupTest, TestVariableBaseBatchMul) points.emplace_back(cycle_group_ct(element)); scalars.emplace_back(typename cycle_group_ct::cycle_scalar(scalar)); } - auto result = cycle_group_ct::variable_base_batch_mul(scalars, points); - EXPECT_EQ(result.get_value(), affine_element(expected)); + auto result = cycle_group_ct::batch_mul(scalars, points); + EXPECT_EQ(result.get_value(), AffineElement(expected)); } // case 2, MSM that produces point at infinity @@ -383,14 +383,14 @@ TYPED_TEST(CycleGroupTest, TestVariableBaseBatchMul) std::vector scalars; auto element = TestFixture::generators[0]; - typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); + typename Group::subgroup_field scalar = Group::subgroup_field::random_element(&engine); points.emplace_back(cycle_group_ct::from_witness(&composer, element)); scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); points.emplace_back(cycle_group_ct::from_witness(&composer, element)); scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, -scalar)); - auto result = cycle_group_ct::variable_base_batch_mul(scalars, points); + auto result = cycle_group_ct::batch_mul(scalars, points); EXPECT_TRUE(result.is_point_at_infinity().get_value()); } @@ -400,10 +400,10 @@ TYPED_TEST(CycleGroupTest, TestVariableBaseBatchMul) std::vector scalars; auto element = TestFixture::generators[0]; - typename G1::subgroup_field scalar = 0; + typename Group::subgroup_field scalar = 0; points.emplace_back(cycle_group_ct::from_witness(&composer, element)); scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); - auto result = cycle_group_ct::variable_base_batch_mul(scalars, points); + auto result = cycle_group_ct::batch_mul(scalars, points); EXPECT_TRUE(result.is_point_at_infinity().get_value()); } @@ -413,126 +413,155 @@ TYPED_TEST(CycleGroupTest, TestVariableBaseBatchMul) std::vector scalars; auto element = TestFixture::generators[0]; - typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); + typename Group::subgroup_field scalar = Group::subgroup_field::random_element(&engine); // is_infinity = witness { cycle_group_ct point = cycle_group_ct::from_witness(&composer, element); - point.is_infinity = witness_ct(&composer, true); + point.set_point_at_infinity(witness_ct(&composer, true)); points.emplace_back(point); scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); } // is_infinity = constant { cycle_group_ct point = cycle_group_ct::from_witness(&composer, element); - point.is_infinity = true; + point.set_point_at_infinity(true); points.emplace_back(point); scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); } - auto result = cycle_group_ct::variable_base_batch_mul(scalars, points); + auto result = cycle_group_ct::batch_mul(scalars, points); EXPECT_TRUE(result.is_point_at_infinity().get_value()); } - bool proof_result = composer.check_circuit(); - EXPECT_EQ(proof_result, true); -} - -TYPED_TEST(CycleGroupTest, ProfileVariableBaseBatcMul) -{ - STDLIB_TYPE_ALIASES; - auto composer = Composer(); - - const size_t num_muls = 2; - - element expected = G1::point_at_infinity; - - // case 1, general MSM with inputs that are combinations of constant and witnesses + // case 5, fixed-base MSM with inputs that are combinations of constant and witnesses (group elements are in lookup + // table) { std::vector points; std::vector scalars; - + std::vector scalars_native; + Element expected = Group::point_at_infinity; for (size_t i = 0; i < num_muls; ++i) { - auto element = TestFixture::generators[i]; - typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); + auto element = crypto::pedersen_hash_refactor::get_lhs_generator(); + typename Group::subgroup_field scalar = Group::subgroup_field::random_element(&engine); - // 1: add entry where point, scalar are witnesses + // 1: add entry where point is constant, scalar is witness expected += (element * scalar); - points.emplace_back(cycle_group_ct::from_witness(&composer, element)); + points.emplace_back(element); scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + scalars_native.emplace_back(scalar); + + // 2: add entry where point is constant, scalar is constant + element = crypto::pedersen_hash_refactor::get_rhs_generator(); + expected += (element * scalar); + points.emplace_back(element); + scalars.emplace_back(typename cycle_group_ct::cycle_scalar(scalar)); + scalars_native.emplace_back(scalar); } - auto result = cycle_group_ct::variable_base_batch_mul(scalars, points); - EXPECT_EQ(result.get_value(), affine_element(expected)); + auto result = cycle_group_ct::batch_mul(scalars, points); + EXPECT_EQ(result.get_value(), AffineElement(expected)); + EXPECT_EQ(result.get_value(), crypto::pedersen_commitment_refactor::commit_native(scalars_native)); } - std::cout << "composer gates = " << composer.get_num_gates() << std::endl; - bool proof_result = composer.check_circuit(); - EXPECT_EQ(proof_result, true); -} - -TYPED_TEST(CycleGroupTest, TestFixedBaseBatchMul) -{ - STDLIB_TYPE_ALIASES; - // using witness_ct = stdlib::witness_t; - auto composer = Composer(); - - const size_t num_muls = 1; - - element expected = G1::point_at_infinity; - - // case 1, general MSM with inputs that are combinations of constant and witnesses + // case 6, fixed-base MSM with inputs that are combinations of constant and witnesses (some group elements are in + // lookup table) { - std::vector points; + std::vector points; std::vector scalars; - std::vector scalars_native; - + std::vector scalars_native; + Element expected = Group::point_at_infinity; for (size_t i = 0; i < num_muls; ++i) { - auto element = crypto::pedersen_hash::generator_info::get_lhs_generator(); - typename G1::subgroup_field scalar = G1::subgroup_field::random_element(); + auto element = crypto::pedersen_hash_refactor::get_lhs_generator(); + typename Group::subgroup_field scalar = Group::subgroup_field::random_element(&engine); // 1: add entry where point is constant, scalar is witness expected += (element * scalar); - points.emplace_back((element)); - std::cout << "test base point[0] = " << element << std::endl; + points.emplace_back(element); scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); scalars_native.emplace_back(scalar); // 2: add entry where point is constant, scalar is constant - element = crypto::pedersen_hash::generator_info::get_rhs_generator(); + element = crypto::pedersen_hash_refactor::get_rhs_generator(); expected += (element * scalar); - points.emplace_back((element)); - std::cout << "test base point[1] = " << element << std::endl; + points.emplace_back(element); scalars.emplace_back(typename cycle_group_ct::cycle_scalar(scalar)); scalars_native.emplace_back(scalar); + + // // 3: add entry where point is constant, scalar is witness + scalar = Group::subgroup_field::random_element(&engine); + element = Group::one * Group::subgroup_field::random_element(&engine); + expected += (element * scalar); + points.emplace_back(element); + scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); + scalars_native.emplace_back(scalar); } - auto result = cycle_group_ct::fixed_base_batch_mul(scalars, points); - EXPECT_EQ(result.get_value(), affine_element(expected)); - EXPECT_EQ(result.get_value(), crypto::pedersen_commitment_refactor::commit_native(scalars_native)); + auto result = cycle_group_ct::batch_mul(scalars, points); + EXPECT_EQ(result.get_value(), AffineElement(expected)); } - // case 2, MSM where input scalars are 0 + // case 7, Fixed-base MSM where input scalars are 0 { - std::vector points; + std::vector points; std::vector scalars; for (size_t i = 0; i < num_muls; ++i) { - auto element = crypto::pedersen_hash::generator_info::get_lhs_generator(); - typename G1::subgroup_field scalar = 0; + auto element = crypto::pedersen_hash_refactor::get_lhs_generator(); + typename Group::subgroup_field scalar = 0; // 1: add entry where point is constant, scalar is witness - expected += (element * scalar); points.emplace_back((element)); scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); // // 2: add entry where point is constant, scalar is constant - expected += (element * scalar); points.emplace_back((element)); scalars.emplace_back(typename cycle_group_ct::cycle_scalar(scalar)); } - auto result = cycle_group_ct::fixed_base_batch_mul(scalars, points); + auto result = cycle_group_ct::batch_mul(scalars, points); EXPECT_EQ(result.is_point_at_infinity().get_value(), true); } + + bool proof_result = composer.check_circuit(); + EXPECT_EQ(proof_result, true); +} + +TYPED_TEST(CycleGroupTest, TestMul) +{ + STDLIB_TYPE_ALIASES + auto composer = Composer(); + + const size_t num_muls = 5; + + // case 1, general MSM with inputs that are combinations of constant and witnesses + { + cycle_group_ct point; + typename cycle_group_ct::cycle_scalar scalar; + for (size_t i = 0; i < num_muls; ++i) { + auto element = TestFixture::generators[i]; + typename Group::subgroup_field native_scalar = Group::subgroup_field::random_element(&engine); + + // 1: add entry where point, scalar are witnesses + point = (cycle_group_ct::from_witness(&composer, element)); + scalar = (cycle_group_ct::cycle_scalar::from_witness(&composer, native_scalar)); + EXPECT_EQ((point * scalar).get_value(), (element * native_scalar)); + + // 2: add entry where point is constant, scalar is witness + point = (cycle_group_ct(element)); + scalar = (cycle_group_ct::cycle_scalar::from_witness(&composer, native_scalar)); + + EXPECT_EQ((point * scalar).get_value(), (element * native_scalar)); + + // 3: add entry where point is witness, scalar is constant + point = (cycle_group_ct::from_witness(&composer, element)); + EXPECT_EQ((point * scalar).get_value(), (element * native_scalar)); + + // 4: add entry where point is constant, scalar is constant + point = (cycle_group_ct(element)); + EXPECT_EQ((point * scalar).get_value(), (element * native_scalar)); + } + } + bool proof_result = composer.check_circuit(); EXPECT_EQ(proof_result, true); } +#pragma GCC diagnostic pop -} // namespace stdlib_cycle_group_tests \ No newline at end of file +} // namespace stdlib_cycle_group_tests From 0608a65220efe8ebb0831f0c80a9e4f0edc963d6 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Sun, 10 Sep 2023 12:38:43 +0000 Subject: [PATCH 11/50] wip --- .../src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp index 046bdf6b3f2..fb73174761e 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp @@ -23,6 +23,7 @@ template class pedersen_hash_refactor { public: // TODO(@suyash67) as part of refactor project, can we remove this and replace with `hash` // (i.e. simplify the name as we no longer have a need for `hash_single`) + // TODO update to new interface static field_t hash_multiple(const std::vector& in, size_t hash_index = 0, const std::string& domain_separator = grumpkin::g1::DEFAULT_DOMAIN_SEPARATOR, From afad6f03443f49cdcbc5f3d8bbbacdf2c7919ce8 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 15 Sep 2023 17:28:08 +0000 Subject: [PATCH 12/50] completed merge of master --- .../pedersen_hash/pedersen_refactor.hpp | 4 +- .../ecc/curves/grumpkin/grumpkin.cpp | 24 +- .../ecc/curves/grumpkin/grumpkin.hpp | 3 - .../ecc/curves/secp256k1/secp256k1.cpp | 12 - .../ecc/curves/secp256k1/secp256k1.hpp | 6 - .../ecc/curves/secp256r1/secp256r1.cpp | 12 - .../ecc/curves/secp256r1/secp256r1.hpp | 6 - .../ecc/groups/affine_element.hpp | 6 - .../ecc/groups/affine_element_impl.hpp | 15 +- .../barretenberg/ecc/groups/element_impl.hpp | 1525 ++++++++--------- .../hash/pedersen/pedersen_refactor.cpp | 10 +- .../hash/pedersen/pedersen_refactor.hpp | 14 +- 12 files changed, 786 insertions(+), 851 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp index 646e52abfa4..06c94ee6264 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp @@ -64,11 +64,11 @@ template class pedersen_hash_refactor { // (i.e. simplify the name as we no longer have a need for `hash_single`) static Fq hash_multiple(const std::vector& inputs, size_t hash_index = 0, - const generator_data* generator_context = nullptr); + const generator_data* generator_context = generator_data::get_default_generators()); static Fq hash(const std::vector& inputs, size_t hash_index = 0, - const generator_data* generator_context = nullptr); + const generator_data* generator_context = generator_data::get_default_generators()); }; extern template class pedersen_hash_refactor; diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp b/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp index 130822772f1..caa7f871fbc 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp @@ -3,26 +3,20 @@ namespace grumpkin { namespace { -<<<<<<< HEAD:circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp -// constexpr size_t max_num_generators = 1 << 10; -// static std::array generators; -// static bool init_generators = false; -======= constexpr size_t max_num_generators = 1 << 10; // NOLINTNEXTLINE TODO(@zac-williamson) #1806 get rid of need for these static variables in Pedersen refactor! static std::array generators; // NOLINTNEXTLINE TODO(@zac-williamson) #1806 get rid of need for these static variables in Pedersen refactor! static bool init_generators = false; ->>>>>>> origin/master:barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp } // namespace -// g1::affine_element get_generator(const size_t generator_index) -// { -// if (!init_generators) { -// generators = g1::derive_generators(); -// init_generators = true; -// } -// ASSERT(generator_index < max_num_generators); -// return generators[generator_index]; -// } +g1::affine_element get_generator(const size_t generator_index) +{ + if (!init_generators) { + generators = g1::derive_generators(); + init_generators = true; + } + ASSERT(generator_index < max_num_generators); + return generators[generator_index]; +} } // namespace grumpkin \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp b/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp index 3598c36f795..0bad58a8d51 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp @@ -30,11 +30,8 @@ struct GrumpkinG1Params { }; using g1 = barretenberg::group; -<<<<<<< HEAD:circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp -======= g1::affine_element get_generator(size_t generator_index); ->>>>>>> origin/master:barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp }; // namespace grumpkin namespace curve { diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp index eb09bbb0356..b2f5fa4f782 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp @@ -1,18 +1,6 @@ #include "./secp256k1.hpp" namespace secp256k1 { -<<<<<<< HEAD:circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp -======= -namespace { - -constexpr size_t max_num_generators = 1 << 10; -// NOLINTNEXTLINE TODO(@zac-williamson) #1806 get rid of need for these static variables in Pedersen refactor! -static std::array generators; -// NOLINTNEXTLINE TODO(@zac-williamson) #1806 get rid of need for these static variables in Pedersen refactor! -static bool init_generators = false; - -} // namespace ->>>>>>> origin/master:barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp /* In case where prime bit length is 256, the method produces a generator, but only with one less bit of randomness than the maximum possible, as the y coordinate in that case is determined by the x-coordinate. */ diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp index 5639b1ed13e..a2de49cd4c9 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp @@ -118,15 +118,9 @@ struct Secp256k1G1Params { fq(0x9C47D08FFB10D4B8UL, 0xFD17B448A6855419UL, 0x5DA4FBFC0E1108A8UL, 0x483ADA7726A3C465UL).to_montgomery_form(); }; -<<<<<<< HEAD:circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp -typedef barretenberg:: - group, barretenberg::field, Secp256k1G1Params> - g1; -======= using g1 = barretenberg:: group, barretenberg::field, Secp256k1G1Params>; g1::affine_element get_generator(size_t generator_index); ->>>>>>> origin/master:barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp } // namespace secp256k1 namespace curve { diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp index b3143fbe001..46875462194 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp @@ -1,18 +1,6 @@ #include "./secp256r1.hpp" namespace secp256r1 { -<<<<<<< HEAD:circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp -======= -namespace { - -constexpr size_t max_num_generators = 1 << 10; -// NOLINTNEXTLINE TODO(@zac-williamson) #1806 get rid of need for these static variables in Pedersen refactor! -static std::array generators; -// NOLINTNEXTLINE TODO(@zac-williamson) #1806 get rid of need for these static variables in Pedersen refactor! -static bool init_generators = false; - -} // namespace ->>>>>>> origin/master:barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp /* In case where prime bit length is 256, the method produces a generator, but only with one less bit of randomness than the maximum possible, as the y coordinate in that case is determined by the x-coordinate. */ diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp index 11dd1a6da79..e7bf6422c95 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp @@ -104,15 +104,9 @@ struct Secp256r1G1Params { fq(0xCBB6406837BF51F5, 0x2BCE33576B315ECE, 0x8EE7EB4A7C0F9E16, 0x4FE342E2FE1A7F9B).to_montgomery_form(); }; -<<<<<<< HEAD:circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp -typedef barretenberg:: - group, barretenberg::field, Secp256r1G1Params> - g1; -======= using g1 = barretenberg:: group, barretenberg::field, Secp256r1G1Params>; g1::affine_element get_generator(size_t generator_index); ->>>>>>> origin/master:barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp } // namespace secp256r1 namespace curve { diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp index 8b525064567..9deba2d3569 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp @@ -77,21 +77,15 @@ template class alignas(64) affine_el */ static affine_element random_element(numeric::random::Engine* engine = nullptr) noexcept; - static std::optional derive_from_x_coordinate(const Fq& x, bool sign_bit) noexcept; - /** * @brief Hash a seed value to curve. * * @return A point on the curve corresponding to the given seed */ -<<<<<<< HEAD:circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp - static affine_element hash_to_curve(const uint64_t seed) noexcept; -======= template > static affine_element hash_to_curve(uint64_t seed) noexcept; template > ->>>>>>> origin/master:barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp static affine_element hash_to_curve(const std::vector& seed) noexcept; constexpr bool operator==(const affine_element& other) const noexcept; diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp index 951e5e7a7de..095b30b3d82 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp @@ -201,7 +201,6 @@ constexpr bool affine_element::operator>(const affine_element& other) } template -<<<<<<< HEAD:circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp constexpr std::optional> affine_element::derive_from_x_coordinate( const Fq& x, bool sign_bit) noexcept { @@ -221,13 +220,9 @@ constexpr std::optional> affine_element::de } template -affine_element affine_element::hash_to_curve(uint64_t seed) noexcept -{ -======= template affine_element affine_element::hash_to_curve(uint64_t seed) noexcept { ->>>>>>> origin/master:barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp static_assert(static_cast(T::can_hash_to_curve)); Fq input(seed, 0, 0, 0); @@ -251,6 +246,7 @@ affine_element affine_element::hash_to_curve(uint64_t seed } template +template affine_element affine_element::hash_to_curve(const std::vector& seed) noexcept { std::vector target_seed(seed); @@ -302,28 +298,21 @@ affine_element affine_element::random_element(numeric::ran engine = &numeric::random::get_engine(); } - Fq yy; Fq x; Fq y; while (true) { // Sample a random x-coordinate and check if it satisfies curve equation. x = Fq::random_element(engine); // Negate the y-coordinate based on a randomly sampled bit. -<<<<<<< HEAD:circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp bool sign_bit = (engine->get_random_uint8() & 1) != 0; std::optional result = derive_from_x_coordinate(x, sign_bit); -======= - bool random_bit = (engine->get_random_uint8() & 1) != 0; - if (random_bit) { - y = -y; - } ->>>>>>> origin/master:barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp if (result.has_value()) { return result.value(); } } + throw_or_abort("affine_element::random_element error"); return affine_element(x, y); } diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp index cda68514434..2deae832391 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp @@ -1,953 +1,944 @@ #pragma once #include "barretenberg/ecc/groups/element.hpp" - -<<<<<<< HEAD:circuits/cpp/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp #include "element.hpp" - namespace barretenberg + +// NOLINTBEGIN(readability-implicit-bool-conversion, cppcoreguidelines-avoid-c-arrays) +namespace barretenberg::group_elements { +template +constexpr element::element(const Fq& a, const Fq& b, const Fq& c) noexcept + : x(a) + , y(b) + , z(c) +{} + +template +constexpr element::element(const element& other) noexcept + : x(other.x) + , y(other.y) + , z(other.z) +{} + +template +constexpr element::element(element&& other) noexcept + : x(other.x) + , y(other.y) + , z(other.z) +{} + +template +constexpr element::element(const affine_element& other) noexcept + : x(other.x) + , y(other.y) + , z(Fq::one()) +{} + +template +constexpr element& element::operator=(const element& other) noexcept { - namespace group_elements { -======= - // NOLINTBEGIN(readability-implicit-bool-conversion, cppcoreguidelines-avoid-c-arrays) - namespace barretenberg::group_elements { ->>>>>>> origin/master:barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp - template - constexpr element::element(const Fq& a, const Fq& b, const Fq& c) noexcept - : x(a) - , y(b) - , z(c) - {} - - template - constexpr element::element(const element& other) noexcept - : x(other.x) - , y(other.y) - , z(other.z) - {} - - template - constexpr element::element(element&& other) noexcept - : x(other.x) - , y(other.y) - , z(other.z) - {} - - template - constexpr element::element(const affine_element& other) noexcept - : x(other.x) - , y(other.y) - , z(Fq::one()) - {} - - template - constexpr element& element::operator=(const element& other) noexcept - { - if (this == &other) { - return *this; - } - x = other.x; - y = other.y; - z = other.z; + if (this == &other) { return *this; } + x = other.x; + y = other.y; + z = other.z; + return *this; +} + +template +constexpr element& element::operator=(element&& other) noexcept +{ + x = other.x; + y = other.y; + z = other.z; + return *this; +} - template - constexpr element& element::operator=(element&& other) noexcept - { - x = other.x; - y = other.y; - z = other.z; - return *this; +template constexpr element::operator affine_element() const noexcept +{ + if (is_point_at_infinity()) { + affine_element result; + result.x = Fq(0); + result.y = Fq(0); + result.self_set_infinity(); + return result; + } + Fq z_inv = z.invert(); + Fq zz_inv = z_inv.sqr(); + Fq zzz_inv = zz_inv * z_inv; + affine_element result(x * zz_inv, y * zzz_inv); + if (is_point_at_infinity()) { + result.self_set_infinity(); } + return result; +} - template - constexpr element::operator affine_element() const noexcept - { +template constexpr void element::self_dbl() noexcept +{ + if constexpr (Fq::modulus.data[3] >= 0x4000000000000000ULL) { if (is_point_at_infinity()) { - affine_element result; - result.x = Fq(0); - result.y = Fq(0); - result.self_set_infinity(); - return result; + return; } - Fq z_inv = z.invert(); - Fq zz_inv = z_inv.sqr(); - Fq zzz_inv = zz_inv * z_inv; - affine_element result(x * zz_inv, y * zzz_inv); - if (is_point_at_infinity()) { - result.self_set_infinity(); + } else { + if (x.is_msb_set_word()) { + return; } - return result; } - template constexpr void element::self_dbl() noexcept - { - if constexpr (Fq::modulus.data[3] >= 0x4000000000000000ULL) { - if (is_point_at_infinity()) { - return; - } - } else { - if (x.is_msb_set_word()) { - return; - } - } - - // T0 = x*x - Fq T0 = x.sqr(); - - // T1 = y*y - Fq T1 = y.sqr(); + // T0 = x*x + Fq T0 = x.sqr(); - // T2 = T2*T1 = y*y*y*y - Fq T2 = T1.sqr(); + // T1 = y*y + Fq T1 = y.sqr(); - // T1 = T1 + x = x + y*y - T1 += x; + // T2 = T2*T1 = y*y*y*y + Fq T2 = T1.sqr(); - // T1 = T1 * T1 - T1.self_sqr(); + // T1 = T1 + x = x + y*y + T1 += x; - // T3 = T0 + T2 = xx + y*y*y*y - Fq T3 = T0 + T2; + // T1 = T1 * T1 + T1.self_sqr(); - // T1 = T1 - T3 = x*x + y*y*y*y + 2*x*x*y*y*y*y - x*x - y*y*y*y = 2*x*x*y*y*y*y = 2*S - T1 -= T3; + // T3 = T0 + T2 = xx + y*y*y*y + Fq T3 = T0 + T2; - // T1 = 2T1 = 4*S - T1 += T1; + // T1 = T1 - T3 = x*x + y*y*y*y + 2*x*x*y*y*y*y - x*x - y*y*y*y = 2*x*x*y*y*y*y = 2*S + T1 -= T3; - // T3 = 3T0 - T3 = T0 + T0; - T3 += T0; - if constexpr (T::has_a) { - T3 += (T::a * z.sqr().sqr()); - } + // T1 = 2T1 = 4*S + T1 += T1; - // z2 = 2*y*z - z += z; - z *= y; + // T3 = 3T0 + T3 = T0 + T0; + T3 += T0; + if constexpr (T::has_a) { + T3 += (T::a * z.sqr().sqr()); + } - // T0 = 2T1 - T0 = T1 + T1; + // z2 = 2*y*z + z += z; + z *= y; - // x2 = T3*T3 - x = T3.sqr(); + // T0 = 2T1 + T0 = T1 + T1; - // x2 = x2 - 2T1 - x -= T0; + // x2 = T3*T3 + x = T3.sqr(); - // T2 = 8T2 - T2 += T2; - T2 += T2; - T2 += T2; + // x2 = x2 - 2T1 + x -= T0; - // y2 = T1 - x2 - y = T1 - x; + // T2 = 8T2 + T2 += T2; + T2 += T2; + T2 += T2; - // y2 = y2 * T3 - T2 - y *= T3; - y -= T2; - } + // y2 = T1 - x2 + y = T1 - x; - template constexpr element element::dbl() const noexcept - { - element result(*this); - result.self_dbl(); - return result; - } + // y2 = y2 * T3 - T2 + y *= T3; + y -= T2; +} - template - constexpr void element::self_mixed_add_or_sub(const affine_element& other, - const uint64_t predicate) noexcept - { - if constexpr (Fq::modulus.data[3] >= 0x4000000000000000ULL) { - if (is_point_at_infinity()) { +template constexpr element element::dbl() const noexcept +{ + element result(*this); + result.self_dbl(); + return result; +} + +template +constexpr void element::self_mixed_add_or_sub(const affine_element& other, + const uint64_t predicate) noexcept +{ + if constexpr (Fq::modulus.data[3] >= 0x4000000000000000ULL) { + if (is_point_at_infinity()) { + conditional_negate_affine(other, *(affine_element*)this, predicate); // NOLINT + z = Fq::one(); + return; + } + } else { + const bool edge_case_trigger = x.is_msb_set() || other.x.is_msb_set(); + if (edge_case_trigger) { + if (x.is_msb_set()) { conditional_negate_affine(other, *(affine_element*)this, predicate); // NOLINT z = Fq::one(); - return; - } - } else { - const bool edge_case_trigger = x.is_msb_set() || other.x.is_msb_set(); - if (edge_case_trigger) { - if (x.is_msb_set()) { - conditional_negate_affine(other, *(affine_element*)this, predicate); // NOLINT - z = Fq::one(); - } - return; } + return; } + } - // T0 = z1.z1 - Fq T0 = z.sqr(); - - // T1 = x2.t0 - x1 = x2.z1.z1 - x1 - Fq T1 = other.x * T0; - T1 -= x; - - // T2 = T0.z1 = z1.z1.z1 - // T2 = T2.y2 - y1 = y2.z1.z1.z1 - y1 - Fq T2 = z * T0; - T2 *= other.y; - T2.self_conditional_negate(predicate); - T2 -= y; - - if (__builtin_expect(T1.is_zero(), 0)) { - if (T2.is_zero()) { - // y2 equals y1, x2 equals x1, double x1 - self_dbl(); - return; - } - self_set_infinity(); + // T0 = z1.z1 + Fq T0 = z.sqr(); + + // T1 = x2.t0 - x1 = x2.z1.z1 - x1 + Fq T1 = other.x * T0; + T1 -= x; + + // T2 = T0.z1 = z1.z1.z1 + // T2 = T2.y2 - y1 = y2.z1.z1.z1 - y1 + Fq T2 = z * T0; + T2 *= other.y; + T2.self_conditional_negate(predicate); + T2 -= y; + + if (__builtin_expect(T1.is_zero(), 0)) { + if (T2.is_zero()) { + // y2 equals y1, x2 equals x1, double x1 + self_dbl(); return; } + self_set_infinity(); + return; + } - // T2 = 2T2 = 2(y2.z1.z1.z1 - y1) = R - // z3 = z1 + H - T2 += T2; - z += T1; + // T2 = 2T2 = 2(y2.z1.z1.z1 - y1) = R + // z3 = z1 + H + T2 += T2; + z += T1; - // T3 = T1*T1 = HH - Fq T3 = T1.sqr(); + // T3 = T1*T1 = HH + Fq T3 = T1.sqr(); - // z3 = z3 - z1z1 - HH - T0 += T3; + // z3 = z3 - z1z1 - HH + T0 += T3; - // z3 = (z1 + H)*(z1 + H) - z.self_sqr(); - z -= T0; + // z3 = (z1 + H)*(z1 + H) + z.self_sqr(); + z -= T0; - // T3 = 4HH - T3 += T3; - T3 += T3; + // T3 = 4HH + T3 += T3; + T3 += T3; - // T1 = T1*T3 = 4HHH - T1 *= T3; + // T1 = T1*T3 = 4HHH + T1 *= T3; - // T3 = T3 * x1 = 4HH*x1 - T3 *= x; + // T3 = T3 * x1 = 4HH*x1 + T3 *= x; - // T0 = 2T3 - T0 = T3 + T3; + // T0 = 2T3 + T0 = T3 + T3; - // T0 = T0 + T1 = 2(4HH*x1) + 4HHH - T0 += T1; - x = T2.sqr(); + // T0 = T0 + T1 = 2(4HH*x1) + 4HHH + T0 += T1; + x = T2.sqr(); - // x3 = x3 - T0 = R*R - 8HH*x1 -4HHH - x -= T0; + // x3 = x3 - T0 = R*R - 8HH*x1 -4HHH + x -= T0; - // T3 = T3 - x3 = 4HH*x1 - x3 - T3 -= x; + // T3 = T3 - x3 = 4HH*x1 - x3 + T3 -= x; - T1 *= y; - T1 += T1; + T1 *= y; + T1 += T1; - // T3 = T2 * T3 = R*(4HH*x1 - x3) - T3 *= T2; + // T3 = T2 * T3 = R*(4HH*x1 - x3) + T3 *= T2; - // y3 = T3 - T1 - y = T3 - T1; - } + // y3 = T3 - T1 + y = T3 - T1; +} - template - constexpr element element::operator+=(const affine_element& other) noexcept - { - if constexpr (Fq::modulus.data[3] >= 0x4000000000000000ULL) { - if (is_point_at_infinity()) { +template +constexpr element element::operator+=(const affine_element& other) noexcept +{ + if constexpr (Fq::modulus.data[3] >= 0x4000000000000000ULL) { + if (is_point_at_infinity()) { + *this = { other.x, other.y, Fq::one() }; + return *this; + } + } else { + const bool edge_case_trigger = x.is_msb_set() || other.x.is_msb_set(); + if (edge_case_trigger) { + if (x.is_msb_set()) { *this = { other.x, other.y, Fq::one() }; - return *this; - } - } else { - const bool edge_case_trigger = x.is_msb_set() || other.x.is_msb_set(); - if (edge_case_trigger) { - if (x.is_msb_set()) { - *this = { other.x, other.y, Fq::one() }; - } - return *this; } + return *this; } + } - // T0 = z1.z1 - Fq T0 = z.sqr(); + // T0 = z1.z1 + Fq T0 = z.sqr(); - // T1 = x2.t0 - x1 = x2.z1.z1 - x1 - Fq T1 = other.x * T0; - T1 -= x; + // T1 = x2.t0 - x1 = x2.z1.z1 - x1 + Fq T1 = other.x * T0; + T1 -= x; - // T2 = T0.z1 = z1.z1.z1 - // T2 = T2.y2 - y1 = y2.z1.z1.z1 - y1 - Fq T2 = z * T0; - T2 *= other.y; - T2 -= y; + // T2 = T0.z1 = z1.z1.z1 + // T2 = T2.y2 - y1 = y2.z1.z1.z1 - y1 + Fq T2 = z * T0; + T2 *= other.y; + T2 -= y; - if (__builtin_expect(T1.is_zero(), 0)) { - if (T2.is_zero()) { - self_dbl(); - return *this; - } - self_set_infinity(); + if (__builtin_expect(T1.is_zero(), 0)) { + if (T2.is_zero()) { + self_dbl(); return *this; } + self_set_infinity(); + return *this; + } - // T2 = 2T2 = 2(y2.z1.z1.z1 - y1) = R - // z3 = z1 + H - T2 += T2; - z += T1; + // T2 = 2T2 = 2(y2.z1.z1.z1 - y1) = R + // z3 = z1 + H + T2 += T2; + z += T1; - // T3 = T1*T1 = HH - Fq T3 = T1.sqr(); + // T3 = T1*T1 = HH + Fq T3 = T1.sqr(); - // z3 = z3 - z1z1 - HH - T0 += T3; + // z3 = z3 - z1z1 - HH + T0 += T3; - // z3 = (z1 + H)*(z1 + H) - z.self_sqr(); - z -= T0; + // z3 = (z1 + H)*(z1 + H) + z.self_sqr(); + z -= T0; - // T3 = 4HH - T3 += T3; - T3 += T3; + // T3 = 4HH + T3 += T3; + T3 += T3; - // T1 = T1*T3 = 4HHH - T1 *= T3; + // T1 = T1*T3 = 4HHH + T1 *= T3; - // T3 = T3 * x1 = 4HH*x1 - T3 *= x; + // T3 = T3 * x1 = 4HH*x1 + T3 *= x; - // T0 = 2T3 - T0 = T3 + T3; + // T0 = 2T3 + T0 = T3 + T3; - // T0 = T0 + T1 = 2(4HH*x1) + 4HHH - T0 += T1; - x = T2.sqr(); + // T0 = T0 + T1 = 2(4HH*x1) + 4HHH + T0 += T1; + x = T2.sqr(); - // x3 = x3 - T0 = R*R - 8HH*x1 -4HHH - x -= T0; + // x3 = x3 - T0 = R*R - 8HH*x1 -4HHH + x -= T0; - // T3 = T3 - x3 = 4HH*x1 - x3 - T3 -= x; + // T3 = T3 - x3 = 4HH*x1 - x3 + T3 -= x; - T1 *= y; - T1 += T1; + T1 *= y; + T1 += T1; - // T3 = T2 * T3 = R*(4HH*x1 - x3) - T3 *= T2; + // T3 = T2 * T3 = R*(4HH*x1 - x3) + T3 *= T2; - // y3 = T3 - T1 - y = T3 - T1; - return *this; - } + // y3 = T3 - T1 + y = T3 - T1; + return *this; +} + +template +constexpr element element::operator+(const affine_element& other) const noexcept +{ + element result(*this); + return (result += other); +} - template - constexpr element element::operator+(const affine_element& other) const noexcept - { - element result(*this); - return (result += other); - } - - template - constexpr element element::operator-=(const affine_element& other) noexcept - { - const affine_element to_add{ other.x, -other.y }; - return operator+=(to_add); - } - - template - constexpr element element::operator-(const affine_element& other) const noexcept - { - element result(*this); - return (result -= other); - } - - template - constexpr element element::operator+=(const element& other) noexcept - { - if constexpr (Fq::modulus.data[3] >= 0x4000000000000000ULL) { - bool p1_zero = is_point_at_infinity(); - bool p2_zero = other.is_point_at_infinity(); - if (__builtin_expect((p1_zero || p2_zero), 0)) { - if (p1_zero && !p2_zero) { - *this = other; - return *this; - } - if (p2_zero && !p1_zero) { - return *this; - } - self_set_infinity(); +template +constexpr element element::operator-=(const affine_element& other) noexcept +{ + const affine_element to_add{ other.x, -other.y }; + return operator+=(to_add); +} + +template +constexpr element element::operator-(const affine_element& other) const noexcept +{ + element result(*this); + return (result -= other); +} + +template +constexpr element element::operator+=(const element& other) noexcept +{ + if constexpr (Fq::modulus.data[3] >= 0x4000000000000000ULL) { + bool p1_zero = is_point_at_infinity(); + bool p2_zero = other.is_point_at_infinity(); + if (__builtin_expect((p1_zero || p2_zero), 0)) { + if (p1_zero && !p2_zero) { + *this = other; return *this; } - } else { - bool p1_zero = x.is_msb_set(); - bool p2_zero = other.x.is_msb_set(); - if (__builtin_expect((p1_zero || p2_zero), 0)) { - if (p1_zero && !p2_zero) { - *this = other; - return *this; - } - if (p2_zero && !p1_zero) { - return *this; - } - self_set_infinity(); + if (p2_zero && !p1_zero) { return *this; } + self_set_infinity(); + return *this; } - Fq Z1Z1(z.sqr()); - Fq Z2Z2(other.z.sqr()); - Fq S2(Z1Z1 * z); - Fq U2(Z1Z1 * other.x); - S2 *= other.y; - Fq U1(Z2Z2 * x); - Fq S1(Z2Z2 * other.z); - S1 *= y; - - Fq F(S2 - S1); - - Fq H(U2 - U1); - - if (__builtin_expect(H.is_zero(), 0)) { - if (F.is_zero()) { - self_dbl(); + } else { + bool p1_zero = x.is_msb_set(); + bool p2_zero = other.x.is_msb_set(); + if (__builtin_expect((p1_zero || p2_zero), 0)) { + if (p1_zero && !p2_zero) { + *this = other; + return *this; + } + if (p2_zero && !p1_zero) { return *this; } self_set_infinity(); return *this; } + } + Fq Z1Z1(z.sqr()); + Fq Z2Z2(other.z.sqr()); + Fq S2(Z1Z1 * z); + Fq U2(Z1Z1 * other.x); + S2 *= other.y; + Fq U1(Z2Z2 * x); + Fq S1(Z2Z2 * other.z); + S1 *= y; + + Fq F(S2 - S1); + + Fq H(U2 - U1); + + if (__builtin_expect(H.is_zero(), 0)) { + if (F.is_zero()) { + self_dbl(); + return *this; + } + self_set_infinity(); + return *this; + } - F += F; + F += F; - Fq I(H + H); - I.self_sqr(); + Fq I(H + H); + I.self_sqr(); - Fq J(H * I); + Fq J(H * I); - U1 *= I; + U1 *= I; - U2 = U1 + U1; - U2 += J; + U2 = U1 + U1; + U2 += J; - x = F.sqr(); + x = F.sqr(); - x -= U2; + x -= U2; - J *= S1; - J += J; + J *= S1; + J += J; - y = U1 - x; + y = U1 - x; - y *= F; + y *= F; - y -= J; + y -= J; - z += other.z; + z += other.z; - Z1Z1 += Z2Z2; + Z1Z1 += Z2Z2; - z.self_sqr(); - z -= Z1Z1; - z *= H; - return *this; - } + z.self_sqr(); + z -= Z1Z1; + z *= H; + return *this; +} - template - constexpr element element::operator+(const element& other) const noexcept - { - element result(*this); - return (result += other); - } +template +constexpr element element::operator+(const element& other) const noexcept +{ + element result(*this); + return (result += other); +} - template - constexpr element element::operator-=(const element& other) noexcept - { - const element to_add{ other.x, -other.y, other.z }; - return operator+=(to_add); - } +template +constexpr element element::operator-=(const element& other) noexcept +{ + const element to_add{ other.x, -other.y, other.z }; + return operator+=(to_add); +} - template - constexpr element element::operator-(const element& other) const noexcept - { - element result(*this); - return (result -= other); - } +template +constexpr element element::operator-(const element& other) const noexcept +{ + element result(*this); + return (result -= other); +} - template constexpr element element::operator-() const noexcept - { - return { x, -y, z }; - } +template constexpr element element::operator-() const noexcept +{ + return { x, -y, z }; +} - template - element element::operator*(const Fr& exponent) const noexcept - { - if constexpr (T::USE_ENDOMORPHISM) { - return mul_with_endomorphism(exponent); - } - return mul_without_endomorphism(exponent); +template +element element::operator*(const Fr& exponent) const noexcept +{ + if constexpr (T::USE_ENDOMORPHISM) { + return mul_with_endomorphism(exponent); } + return mul_without_endomorphism(exponent); +} - template - element element::operator*=(const Fr& exponent) noexcept - { - *this = operator*(exponent); - return *this; - } +template element element::operator*=(const Fr& exponent) noexcept +{ + *this = operator*(exponent); + return *this; +} - template constexpr element element::normalize() const noexcept - { - const affine_element converted = *this; - return element(converted); - } +template constexpr element element::normalize() const noexcept +{ + const affine_element converted = *this; + return element(converted); +} - template element element::infinity() - { - element e; - e.self_set_infinity(); - return e; - } +template element element::infinity() +{ + element e; + e.self_set_infinity(); + return e; +} - template - constexpr element element::set_infinity() const noexcept - { - element result(*this); - result.self_set_infinity(); - return result; - } +template constexpr element element::set_infinity() const noexcept +{ + element result(*this); + result.self_set_infinity(); + return result; +} - template constexpr void element::self_set_infinity() noexcept - { - if constexpr (Fq::modulus.data[3] >= 0x4000000000000000ULL) { - // We set the value of x equal to modulus to represent inifinty - x.data[0] = Fq::modulus.data[0]; - x.data[1] = Fq::modulus.data[1]; - x.data[2] = Fq::modulus.data[2]; - x.data[3] = Fq::modulus.data[3]; - } else { - x.self_set_msb(); - } +template constexpr void element::self_set_infinity() noexcept +{ + if constexpr (Fq::modulus.data[3] >= 0x4000000000000000ULL) { + // We set the value of x equal to modulus to represent inifinty + x.data[0] = Fq::modulus.data[0]; + x.data[1] = Fq::modulus.data[1]; + x.data[2] = Fq::modulus.data[2]; + x.data[3] = Fq::modulus.data[3]; + } else { + x.self_set_msb(); } +} - template constexpr bool element::is_point_at_infinity() const noexcept - { - if constexpr (Fq::modulus.data[3] >= 0x4000000000000000ULL) { - // We check if the value of x is equal to modulus to represent inifinty - return ((x.data[0] ^ Fq::modulus.data[0]) | (x.data[1] ^ Fq::modulus.data[1]) | - (x.data[2] ^ Fq::modulus.data[2]) | (x.data[3] ^ Fq::modulus.data[3])) == 0; - } else { - return (x.is_msb_set()); - } +template constexpr bool element::is_point_at_infinity() const noexcept +{ + if constexpr (Fq::modulus.data[3] >= 0x4000000000000000ULL) { + // We check if the value of x is equal to modulus to represent inifinty + return ((x.data[0] ^ Fq::modulus.data[0]) | (x.data[1] ^ Fq::modulus.data[1]) | + (x.data[2] ^ Fq::modulus.data[2]) | (x.data[3] ^ Fq::modulus.data[3])) == 0; + } else { + return (x.is_msb_set()); } +} - template constexpr bool element::on_curve() const noexcept - { - if (is_point_at_infinity()) { - return true; - } - // We specify the point at inifinity not by (0 \lambda 0), so z should not be 0 - if (z.is_zero()) { - return false; - } - Fq zz = z.sqr(); - Fq zzzz = zz.sqr(); - Fq bz_6 = zzzz * zz * T::b; - if constexpr (T::has_a) { - bz_6 += (x * T::a) * zzzz; - } - Fq xxx = x.sqr() * x + bz_6; - Fq yy = y.sqr(); - return (xxx == yy); +template constexpr bool element::on_curve() const noexcept +{ + if (is_point_at_infinity()) { + return true; + } + // We specify the point at inifinity not by (0 \lambda 0), so z should not be 0 + if (z.is_zero()) { + return false; + } + Fq zz = z.sqr(); + Fq zzzz = zz.sqr(); + Fq bz_6 = zzzz * zz * T::b; + if constexpr (T::has_a) { + bz_6 += (x * T::a) * zzzz; } + Fq xxx = x.sqr() * x + bz_6; + Fq yy = y.sqr(); + return (xxx == yy); +} - template - constexpr bool element::operator==(const element& other) const noexcept - { - // If one of points is not on curve, we have no business comparing them. - if ((!on_curve()) || (!other.on_curve())) { - return false; - } - bool am_infinity = is_point_at_infinity(); - bool is_infinity = other.is_point_at_infinity(); - bool both_infinity = am_infinity && is_infinity; - // If just one is infinity, then they are obviously not equal. - if ((!both_infinity) && (am_infinity || is_infinity)) { - return false; - } - const Fq lhs_zz = z.sqr(); - const Fq lhs_zzz = lhs_zz * z; - const Fq rhs_zz = other.z.sqr(); - const Fq rhs_zzz = rhs_zz * other.z; - - const Fq lhs_x = x * rhs_zz; - const Fq lhs_y = y * rhs_zzz; - - const Fq rhs_x = other.x * lhs_zz; - const Fq rhs_y = other.y * lhs_zzz; - return both_infinity || ((lhs_x == rhs_x) && (lhs_y == rhs_y)); - } - - template - element element::random_element(numeric::random::Engine* engine) noexcept - { - if constexpr (T::can_hash_to_curve) { - element result = random_coordinates_on_curve(engine); - result.z = Fq::random_element(engine); - Fq zz = result.z.sqr(); - Fq zzz = zz * result.z; - result.x *= zz; - result.y *= zzz; - return result; - } else { - Fr scalar = Fr::random_element(engine); - return (element{ T::one_x, T::one_y, Fq::one() } * scalar); - } +template +constexpr bool element::operator==(const element& other) const noexcept +{ + // If one of points is not on curve, we have no business comparing them. + if ((!on_curve()) || (!other.on_curve())) { + return false; + } + bool am_infinity = is_point_at_infinity(); + bool is_infinity = other.is_point_at_infinity(); + bool both_infinity = am_infinity && is_infinity; + // If just one is infinity, then they are obviously not equal. + if ((!both_infinity) && (am_infinity || is_infinity)) { + return false; } + const Fq lhs_zz = z.sqr(); + const Fq lhs_zzz = lhs_zz * z; + const Fq rhs_zz = other.z.sqr(); + const Fq rhs_zzz = rhs_zz * other.z; - template - element element::mul_without_endomorphism(const Fr& exponent) const noexcept - { - const uint256_t converted_scalar(exponent); + const Fq lhs_x = x * rhs_zz; + const Fq lhs_y = y * rhs_zzz; - if (converted_scalar == 0) { - element result{ Fq::zero(), Fq::zero(), Fq::zero() }; - result.self_set_infinity(); - return result; - } + const Fq rhs_x = other.x * lhs_zz; + const Fq rhs_y = other.y * lhs_zzz; + return both_infinity || ((lhs_x == rhs_x) && (lhs_y == rhs_y)); +} - element work_element(*this); - const uint64_t maximum_set_bit = converted_scalar.get_msb(); - // This is simpler and doublings of infinity should be fast. We should think if we want to defend against the - // timing leak here (if used with ECDSA it can sometimes lead to private key compromise) - for (uint64_t i = maximum_set_bit - 1; i < maximum_set_bit; --i) { - work_element.self_dbl(); - if (converted_scalar.get_bit(i)) { - work_element += *this; - } - } - return work_element; +template +element element::random_element(numeric::random::Engine* engine) noexcept +{ + if constexpr (T::can_hash_to_curve) { + element result = random_coordinates_on_curve(engine); + result.z = Fq::random_element(engine); + Fq zz = result.z.sqr(); + Fq zzz = zz * result.z; + result.x *= zz; + result.y *= zzz; + return result; + } else { + Fr scalar = Fr::random_element(engine); + return (element{ T::one_x, T::one_y, Fq::one() } * scalar); } +} + +template +element element::mul_without_endomorphism(const Fr& exponent) const noexcept +{ + const uint256_t converted_scalar(exponent); - template - element element::mul_with_endomorphism(const Fr& exponent) const noexcept - { - const Fr converted_scalar = exponent.from_montgomery_form(); + if (converted_scalar == 0) { + element result{ Fq::zero(), Fq::zero(), Fq::zero() }; + result.self_set_infinity(); + return result; + } - if (converted_scalar.is_zero()) { - element result{ Fq::zero(), Fq::zero(), Fq::zero() }; - result.self_set_infinity(); - return result; + element work_element(*this); + const uint64_t maximum_set_bit = converted_scalar.get_msb(); + // This is simpler and doublings of infinity should be fast. We should think if we want to defend against the + // timing leak here (if used with ECDSA it can sometimes lead to private key compromise) + for (uint64_t i = maximum_set_bit - 1; i < maximum_set_bit; --i) { + work_element.self_dbl(); + if (converted_scalar.get_bit(i)) { + work_element += *this; } + } + return work_element; +} - constexpr size_t lookup_size = 8; - constexpr size_t num_rounds = 32; - constexpr size_t num_wnaf_bits = 4; - std::array lookup_table; +template +element element::mul_with_endomorphism(const Fr& exponent) const noexcept +{ + const Fr converted_scalar = exponent.from_montgomery_form(); - element d2 = element(*this); - d2.self_dbl(); - lookup_table[0] = element(*this); - for (size_t i = 1; i < lookup_size; ++i) { - lookup_table[i] = lookup_table[i - 1] + d2; - } + if (converted_scalar.is_zero()) { + element result{ Fq::zero(), Fq::zero(), Fq::zero() }; + result.self_set_infinity(); + return result; + } - uint64_t wnaf_table[num_rounds * 2]; - Fr endo_scalar; - Fr::split_into_endomorphism_scalars(converted_scalar, endo_scalar, *(Fr*)&endo_scalar.data[2]); // NOLINT + constexpr size_t lookup_size = 8; + constexpr size_t num_rounds = 32; + constexpr size_t num_wnaf_bits = 4; + std::array lookup_table; - bool skew = false; - bool endo_skew = false; + element d2 = element(*this); + d2.self_dbl(); + lookup_table[0] = element(*this); + for (size_t i = 1; i < lookup_size; ++i) { + lookup_table[i] = lookup_table[i - 1] + d2; + } - wnaf::fixed_wnaf(&endo_scalar.data[0], &wnaf_table[0], skew, 0, 2, num_wnaf_bits); - wnaf::fixed_wnaf(&endo_scalar.data[2], &wnaf_table[1], endo_skew, 0, 2, num_wnaf_bits); + uint64_t wnaf_table[num_rounds * 2]; + Fr endo_scalar; + Fr::split_into_endomorphism_scalars(converted_scalar, endo_scalar, *(Fr*)&endo_scalar.data[2]); // NOLINT - element work_element{ T::one_x, T::one_y, Fq::one() }; - work_element.self_set_infinity(); + bool skew = false; + bool endo_skew = false; - uint64_t wnaf_entry = 0; - uint64_t index = 0; - bool sign = false; - Fq beta = Fq::cube_root_of_unity(); + wnaf::fixed_wnaf(&endo_scalar.data[0], &wnaf_table[0], skew, 0, 2, num_wnaf_bits); + wnaf::fixed_wnaf(&endo_scalar.data[2], &wnaf_table[1], endo_skew, 0, 2, num_wnaf_bits); - for (size_t i = 0; i < num_rounds * 2; ++i) { - wnaf_entry = wnaf_table[i]; - index = wnaf_entry & 0x0fffffffU; - sign = static_cast((wnaf_entry >> 31) & 1); - const bool is_odd = ((i & 1) == 1); - auto to_add = lookup_table[static_cast(index)]; - to_add.y.self_conditional_negate(sign ^ is_odd); - if (is_odd) { - to_add.x *= beta; - } - work_element += to_add; + element work_element{ T::one_x, T::one_y, Fq::one() }; + work_element.self_set_infinity(); - if (i != ((2 * num_rounds) - 1) && is_odd) { - for (size_t j = 0; j < 4; ++j) { - work_element.self_dbl(); - } - } + uint64_t wnaf_entry = 0; + uint64_t index = 0; + bool sign = false; + Fq beta = Fq::cube_root_of_unity(); + + for (size_t i = 0; i < num_rounds * 2; ++i) { + wnaf_entry = wnaf_table[i]; + index = wnaf_entry & 0x0fffffffU; + sign = static_cast((wnaf_entry >> 31) & 1); + const bool is_odd = ((i & 1) == 1); + auto to_add = lookup_table[static_cast(index)]; + to_add.y.self_conditional_negate(sign ^ is_odd); + if (is_odd) { + to_add.x *= beta; } + work_element += to_add; - auto temporary = -lookup_table[0]; - if (skew) { - work_element += temporary; + if (i != ((2 * num_rounds) - 1) && is_odd) { + for (size_t j = 0; j < 4; ++j) { + work_element.self_dbl(); + } } + } - temporary = { lookup_table[0].x * beta, lookup_table[0].y, lookup_table[0].z }; + auto temporary = -lookup_table[0]; + if (skew) { + work_element += temporary; + } - if (endo_skew) { - work_element += temporary; - } + temporary = { lookup_table[0].x * beta, lookup_table[0].y, lookup_table[0].z }; - return work_element; + if (endo_skew) { + work_element += temporary; } - template - std::vector> element::batch_mul_with_endomorphism( - const std::vector>& points, const Fr& exponent) noexcept - { - typedef affine_element affine_element; - const size_t num_points = points.size(); - std::vector scratch_space(num_points); + return work_element; +} - // we can mutate rhs but NOT lhs! - // output is stored in rhs - const auto batch_affine_add = [num_points, &scratch_space](const affine_element* lhs, affine_element* rhs) { - Fq batch_inversion_accumulator = Fq::one(); +template +std::vector> element::batch_mul_with_endomorphism( + const std::vector>& points, const Fr& exponent) noexcept +{ + typedef affine_element affine_element; + const size_t num_points = points.size(); + std::vector scratch_space(num_points); - for (size_t i = 0; i < num_points; i += 1) { - scratch_space[i] = lhs[i].x + rhs[i].x; // x2 + x1 - rhs[i].x -= lhs[i].x; // x2 - x1 - rhs[i].y -= lhs[i].y; // y2 - y1 - rhs[i].y *= batch_inversion_accumulator; // (y2 - y1)*accumulator_old - batch_inversion_accumulator *= (rhs[i].x); - } - batch_inversion_accumulator = batch_inversion_accumulator.invert(); - - for (size_t i = (num_points)-1; i < num_points; i -= 1) { - rhs[i].y *= batch_inversion_accumulator; // update accumulator - batch_inversion_accumulator *= rhs[i].x; - rhs[i].x = rhs[i].y.sqr(); - rhs[i].x = rhs[i].x - (scratch_space[i]); // x3 = lambda_squared - x2 - // - x1 - scratch_space[i] = lhs[i].x - rhs[i].x; - scratch_space[i] *= rhs[i].y; - rhs[i].y = scratch_space[i] - lhs[i].y; - } - }; + // we can mutate rhs but NOT lhs! + // output is stored in rhs + const auto batch_affine_add = [num_points, &scratch_space](const affine_element* lhs, affine_element* rhs) { + Fq batch_inversion_accumulator = Fq::one(); - // double the elements in lhs - const auto batch_affine_double = [num_points, &scratch_space](affine_element* lhs) { - Fq batch_inversion_accumulator = Fq::one(); + for (size_t i = 0; i < num_points; i += 1) { + scratch_space[i] = lhs[i].x + rhs[i].x; // x2 + x1 + rhs[i].x -= lhs[i].x; // x2 - x1 + rhs[i].y -= lhs[i].y; // y2 - y1 + rhs[i].y *= batch_inversion_accumulator; // (y2 - y1)*accumulator_old + batch_inversion_accumulator *= (rhs[i].x); + } + batch_inversion_accumulator = batch_inversion_accumulator.invert(); - for (size_t i = 0; i < num_points; i += 1) { + for (size_t i = (num_points)-1; i < num_points; i -= 1) { + rhs[i].y *= batch_inversion_accumulator; // update accumulator + batch_inversion_accumulator *= rhs[i].x; + rhs[i].x = rhs[i].y.sqr(); + rhs[i].x = rhs[i].x - (scratch_space[i]); // x3 = lambda_squared - x2 + // - x1 + scratch_space[i] = lhs[i].x - rhs[i].x; + scratch_space[i] *= rhs[i].y; + rhs[i].y = scratch_space[i] - lhs[i].y; + } + }; - scratch_space[i] = lhs[i].x.sqr(); - scratch_space[i] = scratch_space[i] + scratch_space[i] + scratch_space[i]; + // double the elements in lhs + const auto batch_affine_double = [num_points, &scratch_space](affine_element* lhs) { + Fq batch_inversion_accumulator = Fq::one(); - scratch_space[i] *= batch_inversion_accumulator; + for (size_t i = 0; i < num_points; i += 1) { - batch_inversion_accumulator *= (lhs[i].y + lhs[i].y); - } - batch_inversion_accumulator = batch_inversion_accumulator.invert(); + scratch_space[i] = lhs[i].x.sqr(); + scratch_space[i] = scratch_space[i] + scratch_space[i] + scratch_space[i]; - Fq temp; - for (size_t i = (num_points)-1; i < num_points; i -= 1) { + scratch_space[i] *= batch_inversion_accumulator; - scratch_space[i] *= batch_inversion_accumulator; - batch_inversion_accumulator *= (lhs[i].y + lhs[i].y); + batch_inversion_accumulator *= (lhs[i].y + lhs[i].y); + } + batch_inversion_accumulator = batch_inversion_accumulator.invert(); - temp = lhs[i].x; - lhs[i].x = scratch_space[i].sqr() - (lhs[i].x + lhs[i].x); - lhs[i].y = scratch_space[i] * (temp - lhs[i].x) - lhs[i].y; - } - }; + Fq temp; + for (size_t i = (num_points)-1; i < num_points; i -= 1) { - // Compute wnaf for scalar - const Fr converted_scalar = exponent.from_montgomery_form(); + scratch_space[i] *= batch_inversion_accumulator; + batch_inversion_accumulator *= (lhs[i].y + lhs[i].y); - if (converted_scalar.is_zero()) { - affine_element result{ Fq::zero(), Fq::zero() }; - result.self_set_infinity(); - std::vector results; - for (size_t i = 0; i < num_points; ++i) { - results.emplace_back(result); - } - return results; + temp = lhs[i].x; + lhs[i].x = scratch_space[i].sqr() - (lhs[i].x + lhs[i].x); + lhs[i].y = scratch_space[i] * (temp - lhs[i].x) - lhs[i].y; } + }; - constexpr size_t lookup_size = 8; - constexpr size_t num_rounds = 32; - constexpr size_t num_wnaf_bits = 4; - std::array, lookup_size> lookup_table; - for (auto& table : lookup_table) { - table.resize(num_points); - } - std::vector temp_point_vector(num_points); + // Compute wnaf for scalar + const Fr converted_scalar = exponent.from_montgomery_form(); + + if (converted_scalar.is_zero()) { + affine_element result{ Fq::zero(), Fq::zero() }; + result.self_set_infinity(); + std::vector results; for (size_t i = 0; i < num_points; ++i) { - temp_point_vector[i] = points[i]; - lookup_table[0][i] = points[i]; + results.emplace_back(result); } - batch_affine_double(&temp_point_vector[0]); - for (size_t j = 1; j < lookup_size; ++j) { + return results; + } - for (size_t i = 0; i < num_points; ++i) { - lookup_table[j][i] = lookup_table[j - 1][i]; - } - batch_affine_add(&temp_point_vector[0], &lookup_table[j][0]); - } + constexpr size_t lookup_size = 8; + constexpr size_t num_rounds = 32; + constexpr size_t num_wnaf_bits = 4; + std::array, lookup_size> lookup_table; + for (auto& table : lookup_table) { + table.resize(num_points); + } + std::vector temp_point_vector(num_points); + for (size_t i = 0; i < num_points; ++i) { + temp_point_vector[i] = points[i]; + lookup_table[0][i] = points[i]; + } + batch_affine_double(&temp_point_vector[0]); + for (size_t j = 1; j < lookup_size; ++j) { - uint64_t wnaf_table[num_rounds * 2]; - Fr endo_scalar; - Fr::split_into_endomorphism_scalars(converted_scalar, endo_scalar, *(Fr*)&endo_scalar.data[2]); // NOLINT - - bool skew = false; - bool endo_skew = false; - - wnaf::fixed_wnaf(&endo_scalar.data[0], &wnaf_table[0], skew, 0, 2, num_wnaf_bits); - wnaf::fixed_wnaf(&endo_scalar.data[2], &wnaf_table[1], endo_skew, 0, 2, num_wnaf_bits); - - std::vector work_elements(num_points); - - uint64_t wnaf_entry = 0; - uint64_t index = 0; - bool sign = 0; - Fq beta = Fq::cube_root_of_unity(); - - for (size_t i = 0; i < 2; ++i) { - for (size_t j = 0; j < num_points; ++j) { - wnaf_entry = wnaf_table[i]; - index = wnaf_entry & 0x0fffffffU; - sign = static_cast((wnaf_entry >> 31) & 1); - const bool is_odd = ((i & 1) == 1); - auto to_add = lookup_table[static_cast(index)][j]; - to_add.y.self_conditional_negate(sign ^ is_odd); - if (is_odd) { - to_add.x *= beta; - } - if (i == 0) { - work_elements[j] = to_add; - } else { - temp_point_vector[j] = to_add; - } - } + for (size_t i = 0; i < num_points; ++i) { + lookup_table[j][i] = lookup_table[j - 1][i]; } - batch_affine_add(&temp_point_vector[0], &work_elements[0]); + batch_affine_add(&temp_point_vector[0], &lookup_table[j][0]); + } + + uint64_t wnaf_table[num_rounds * 2]; + Fr endo_scalar; + Fr::split_into_endomorphism_scalars(converted_scalar, endo_scalar, *(Fr*)&endo_scalar.data[2]); // NOLINT - for (size_t i = 2; i < num_rounds * 2; ++i) { + bool skew = false; + bool endo_skew = false; + + wnaf::fixed_wnaf(&endo_scalar.data[0], &wnaf_table[0], skew, 0, 2, num_wnaf_bits); + wnaf::fixed_wnaf(&endo_scalar.data[2], &wnaf_table[1], endo_skew, 0, 2, num_wnaf_bits); + + std::vector work_elements(num_points); + + uint64_t wnaf_entry = 0; + uint64_t index = 0; + bool sign = 0; + Fq beta = Fq::cube_root_of_unity(); + + for (size_t i = 0; i < 2; ++i) { + for (size_t j = 0; j < num_points; ++j) { wnaf_entry = wnaf_table[i]; index = wnaf_entry & 0x0fffffffU; sign = static_cast((wnaf_entry >> 31) & 1); const bool is_odd = ((i & 1) == 1); - if (!is_odd) { - for (size_t k = 0; k < 4; ++k) { - batch_affine_double(&work_elements[0]); - } + auto to_add = lookup_table[static_cast(index)][j]; + to_add.y.self_conditional_negate(sign ^ is_odd); + if (is_odd) { + to_add.x *= beta; } - for (size_t j = 0; j < num_points; ++j) { - auto to_add = lookup_table[static_cast(index)][j]; - to_add.y.self_conditional_negate(sign ^ is_odd); - if (is_odd) { - to_add.x *= beta; - } + if (i == 0) { + work_elements[j] = to_add; + } else { temp_point_vector[j] = to_add; } - batch_affine_add(&temp_point_vector[0], &work_elements[0]); } - - if (skew) { - for (size_t j = 0; j < num_points; ++j) { - temp_point_vector[j] = -lookup_table[0][j]; + } + batch_affine_add(&temp_point_vector[0], &work_elements[0]); + + for (size_t i = 2; i < num_rounds * 2; ++i) { + wnaf_entry = wnaf_table[i]; + index = wnaf_entry & 0x0fffffffU; + sign = static_cast((wnaf_entry >> 31) & 1); + const bool is_odd = ((i & 1) == 1); + if (!is_odd) { + for (size_t k = 0; k < 4; ++k) { + batch_affine_double(&work_elements[0]); } - batch_affine_add(&temp_point_vector[0], &work_elements[0]); } - - if (endo_skew) { - for (size_t j = 0; j < num_points; ++j) { - temp_point_vector[j] = lookup_table[0][j]; - temp_point_vector[j].x *= beta; + for (size_t j = 0; j < num_points; ++j) { + auto to_add = lookup_table[static_cast(index)][j]; + to_add.y.self_conditional_negate(sign ^ is_odd); + if (is_odd) { + to_add.x *= beta; } - batch_affine_add(&temp_point_vector[0], &work_elements[0]); + temp_point_vector[j] = to_add; } + batch_affine_add(&temp_point_vector[0], &work_elements[0]); + } - return work_elements; + if (skew) { + for (size_t j = 0; j < num_points; ++j) { + temp_point_vector[j] = -lookup_table[0][j]; + } + batch_affine_add(&temp_point_vector[0], &work_elements[0]); } - template - void element::conditional_negate_affine(const affine_element& in, - affine_element& out, - const uint64_t predicate) noexcept - { - out = { in.x, predicate ? -in.y : in.y }; + if (endo_skew) { + for (size_t j = 0; j < num_points; ++j) { + temp_point_vector[j] = lookup_table[0][j]; + temp_point_vector[j].x *= beta; + } + batch_affine_add(&temp_point_vector[0], &work_elements[0]); } - template - void element::batch_normalize(element* elements, const size_t num_elements) noexcept - { - std::vector temporaries; - temporaries.reserve(num_elements * 2); - Fq accumulator = Fq::one(); + return work_elements; +} - // Iterate over the points, computing the product of their z-coordinates. - // At each iteration, store the currently-accumulated z-coordinate in `temporaries` - for (size_t i = 0; i < num_elements; ++i) { - temporaries.emplace_back(accumulator); - if (!elements[i].is_point_at_infinity()) { - accumulator *= elements[i].z; - } - } - // For the rest of this method we refer to the product of all z-coordinates as the 'global' z-coordinate - // Invert the global z-coordinate and store in `accumulator` - accumulator = accumulator.invert(); - - /** - * We now proceed to iterate back down the array of points. - * At each iteration we update the accumulator to contain the z-coordinate of the currently worked-upon - *z-coordinate. We can then multiply this accumulator with `temporaries`, to get a scalar that is equal to the - *inverse of the z-coordinate of the point at the next iteration cycle e.g. Imagine we have 4 points, such that: - * - * accumulator = 1 / z.data[0]*z.data[1]*z.data[2]*z.data[3] - * temporaries[3] = z.data[0]*z.data[1]*z.data[2] - * temporaries[2] = z.data[0]*z.data[1] - * temporaries[1] = z.data[0] - * temporaries[0] = 1 - * - * At the first iteration, accumulator * temporaries[3] = z.data[0]*z.data[1]*z.data[2] / - *z.data[0]*z.data[1]*z.data[2]*z.data[3] = (1 / z.data[3]) We then update accumulator, such that: - * - * accumulator = accumulator * z.data[3] = 1 / z.data[0]*z.data[1]*z.data[2] - * - * At the second iteration, accumulator * temporaries[2] = z.data[0]*z.data[1] / z.data[0]*z.data[1]*z.data[2] = - *(1 z.data[2]) And so on, until we have computed every z-inverse! - * - * We can then convert out of Jacobian form (x = X / Z^2, y = Y / Z^3) with 4 muls and 1 square. - **/ - for (size_t i = num_elements - 1; i < num_elements; --i) { - if (!elements[i].is_point_at_infinity()) { - Fq z_inv = accumulator * temporaries[i]; - Fq zz_inv = z_inv.sqr(); - elements[i].x *= zz_inv; - elements[i].y *= (zz_inv * z_inv); - accumulator *= elements[i].z; - } - elements[i].z = Fq::one(); +template +void element::conditional_negate_affine(const affine_element& in, + affine_element& out, + const uint64_t predicate) noexcept +{ + out = { in.x, predicate ? -in.y : in.y }; +} + +template +void element::batch_normalize(element* elements, const size_t num_elements) noexcept +{ + std::vector temporaries; + temporaries.reserve(num_elements * 2); + Fq accumulator = Fq::one(); + + // Iterate over the points, computing the product of their z-coordinates. + // At each iteration, store the currently-accumulated z-coordinate in `temporaries` + for (size_t i = 0; i < num_elements; ++i) { + temporaries.emplace_back(accumulator); + if (!elements[i].is_point_at_infinity()) { + accumulator *= elements[i].z; } } + // For the rest of this method we refer to the product of all z-coordinates as the 'global' z-coordinate + // Invert the global z-coordinate and store in `accumulator` + accumulator = accumulator.invert(); + + /** + * We now proceed to iterate back down the array of points. + * At each iteration we update the accumulator to contain the z-coordinate of the currently worked-upon + *z-coordinate. We can then multiply this accumulator with `temporaries`, to get a scalar that is equal to the + *inverse of the z-coordinate of the point at the next iteration cycle e.g. Imagine we have 4 points, such that: + * + * accumulator = 1 / z.data[0]*z.data[1]*z.data[2]*z.data[3] + * temporaries[3] = z.data[0]*z.data[1]*z.data[2] + * temporaries[2] = z.data[0]*z.data[1] + * temporaries[1] = z.data[0] + * temporaries[0] = 1 + * + * At the first iteration, accumulator * temporaries[3] = z.data[0]*z.data[1]*z.data[2] / + *z.data[0]*z.data[1]*z.data[2]*z.data[3] = (1 / z.data[3]) We then update accumulator, such that: + * + * accumulator = accumulator * z.data[3] = 1 / z.data[0]*z.data[1]*z.data[2] + * + * At the second iteration, accumulator * temporaries[2] = z.data[0]*z.data[1] / z.data[0]*z.data[1]*z.data[2] = + *(1 z.data[2]) And so on, until we have computed every z-inverse! + * + * We can then convert out of Jacobian form (x = X / Z^2, y = Y / Z^3) with 4 muls and 1 square. + **/ + for (size_t i = num_elements - 1; i < num_elements; --i) { + if (!elements[i].is_point_at_infinity()) { + Fq z_inv = accumulator * temporaries[i]; + Fq zz_inv = z_inv.sqr(); + elements[i].x *= zz_inv; + elements[i].y *= (zz_inv * z_inv); + accumulator *= elements[i].z; + } + elements[i].z = Fq::one(); + } +} - template - template - element element::random_coordinates_on_curve(numeric::random::Engine* engine) noexcept - { - bool found_one = false; - Fq yy; - Fq x; - Fq y; - while (!found_one) { - x = Fq::random_element(engine); - yy = x.sqr() * x + T::b; - if constexpr (T::has_a) { - yy += (x * T::a); - } - auto [found_root, y1] = yy.sqrt(); - y = y1; - found_one = found_root; +template +template +element element::random_coordinates_on_curve(numeric::random::Engine* engine) noexcept +{ + bool found_one = false; + Fq yy; + Fq x; + Fq y; + while (!found_one) { + x = Fq::random_element(engine); + yy = x.sqr() * x + T::b; + if constexpr (T::has_a) { + yy += (x * T::a); } - return { x, y, Fq::one() }; + auto [found_root, y1] = yy.sqrt(); + y = y1; + found_one = found_root; } + return { x, y, Fq::one() }; +} - } // namespace barretenberg::group_elements - // NOLINTEND(readability-implicit-bool-conversion, cppcoreguidelines-avoid-c-arrays) +} // namespace barretenberg::group_elements +// NOLINTEND(readability-implicit-bool-conversion, cppcoreguidelines-avoid-c-arrays) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp index c5412ab19d1..000ce66d3ad 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp @@ -10,7 +10,7 @@ using namespace proof_system; template field_t pedersen_hash_refactor::hash_multiple(const std::vector& inputs, const size_t hash_index, - const std::string& domain_separator, + const generator_data* generator_context, const bool /*unused*/) { @@ -18,7 +18,7 @@ field_t pedersen_hash_refactor::hash_multiple(const std::vector& using cycle_scalar = typename cycle_group::cycle_scalar; using Curve = typename C::EmbeddedCurve; - auto base_points = grumpkin::g1::get_generators(inputs.size(), hash_index, domain_separator); + auto base_points = generator_context->conditional_extend(inputs.size() + hash_index).generators; std::vector scalars; std::vector points; @@ -27,7 +27,7 @@ field_t pedersen_hash_refactor::hash_multiple(const std::vector& for (size_t i = 0; i < inputs.size(); ++i) { scalars.emplace_back(inputs[i]); // constructs constant cycle_group objects (non-witness) - points.emplace_back(base_points[i]); + points.emplace_back(base_points[i + hash_index]); } auto result = cycle_group::batch_mul(scalars, points); @@ -37,10 +37,10 @@ field_t pedersen_hash_refactor::hash_multiple(const std::vector& template field_t pedersen_hash_refactor::hash(const std::vector& in, size_t hash_index, - const std::string& domain_separator, + const generator_data* generator_context, bool validate_inputs_in_field) { - return hash_multiple(in, hash_index, domain_separator, validate_inputs_in_field); + return hash_multiple(in, hash_index, generator_context, validate_inputs_in_field); } INSTANTIATE_STDLIB_TYPE(pedersen_hash_refactor); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp index fb73174761e..d9c28f3e64a 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp @@ -1,8 +1,9 @@ #pragma once -#include "../../primitives/circuit_builders/circuit_builders_fwd.hpp" #include "../../primitives/field/field.hpp" #include "../../primitives/point/point.hpp" -#include "barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp" +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" + +#include "../../primitives/circuit_builders/circuit_builders.hpp" namespace proof_system::plonk::stdlib { @@ -19,6 +20,11 @@ template class pedersen_hash_refactor { using field_t = stdlib::field_t; using point = stdlib::point; using bool_t = stdlib::bool_t; + using EmbeddedCurve = typename ComposerContext::EmbeddedCurve; + // template + // using EmbeddedCurve = + // std::conditional_t, curve::BN254, curve::Grumpkin>; + using generator_data = crypto::generator_data; public: // TODO(@suyash67) as part of refactor project, can we remove this and replace with `hash` @@ -26,12 +32,12 @@ template class pedersen_hash_refactor { // TODO update to new interface static field_t hash_multiple(const std::vector& in, size_t hash_index = 0, - const std::string& domain_separator = grumpkin::g1::DEFAULT_DOMAIN_SEPARATOR, + const generator_data* generator_context = generator_data::get_default_generators(), bool validate_inputs_in_field = true); static field_t hash(const std::vector& in, size_t hash_index = 0, - const std::string& domain_separator = grumpkin::g1::DEFAULT_DOMAIN_SEPARATOR, + const generator_data* generator_context = generator_data::get_default_generators(), bool validate_inputs_in_field = true); }; From 43086faa93989efe9594ca8f661aa9d53cda4804 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 15 Sep 2023 17:43:55 +0000 Subject: [PATCH 13/50] revert formatting --- .../proof_system/plookup_tables/aes128.hpp | 49 ++++++++------- .../proof_system/plookup_tables/blake2s.hpp | 13 ++-- .../proof_system/plookup_tables/dummy.hpp | 6 +- .../plookup_tables/keccak/keccak_chi.hpp | 8 ++- .../plookup_tables/keccak/keccak_input.hpp | 12 ++-- .../plookup_tables/keccak/keccak_output.hpp | 11 ++-- .../plookup_tables/keccak/keccak_rho.hpp | 38 +++++------ .../plookup_tables/keccak/keccak_theta.hpp | 8 ++- .../non_native_group_generator.cpp | 50 ++++++++------- .../non_native_group_generator.hpp | 63 +++++++++---------- .../proof_system/plookup_tables/sha256.hpp | 15 ++--- .../proof_system/plookup_tables/sparse.hpp | 18 +++--- .../proof_system/plookup_tables/types.hpp | 40 +++++------- .../proof_system/plookup_tables/uint.hpp | 6 +- 14 files changed, 169 insertions(+), 168 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/aes128.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/aes128.hpp index bd05f6f8345..7c714effd43 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/aes128.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/aes128.hpp @@ -8,15 +8,16 @@ #include "sparse.hpp" #include "types.hpp" -namespace plookup::aes128_tables { +namespace plookup { +namespace aes128_tables { static constexpr uint64_t AES_BASE = 9; -static constexpr std::array aes_normalization_table{ +static constexpr uint64_t aes_normalization_table[AES_BASE]{ 1, 0, 0, 0, 0, 0, 0, 0, 0, }; inline std::array get_aes_sparse_values_from_key(const std::array key) { - const auto sparse = numeric::map_into_sparse_form(static_cast(key[0])); + const auto sparse = numeric::map_into_sparse_form(uint64_t(key[0])); return { barretenberg::fr(sparse), barretenberg::fr(0) }; } @@ -29,10 +30,10 @@ inline BasicTable generate_aes_sparse_table(BasicTableId id, const size_t table_ table.use_twin_keys = true; for (uint64_t i = 0; i < table.size; ++i) { uint64_t left = i; - const auto right = numeric::map_into_sparse_form(static_cast(i)); - table.column_1.emplace_back(left); - table.column_2.emplace_back(0); - table.column_3.emplace_back(right); + const auto right = numeric::map_into_sparse_form((uint8_t)i); + table.column_1.emplace_back(barretenberg::fr(left)); + table.column_2.emplace_back(barretenberg::fr(0)); + table.column_3.emplace_back(barretenberg::fr(right)); } table.get_values_from_key = &get_aes_sparse_values_from_key; @@ -55,21 +56,21 @@ inline BasicTable generate_aes_sparse_normalization_table(BasicTableId id, const table.table_index = table_index; for (uint64_t i = 0; i < AES_BASE; ++i) { uint64_t i_raw = i * AES_BASE * AES_BASE * AES_BASE; - uint64_t i_normalized = static_cast((i & 1UL) == 1UL) * AES_BASE * AES_BASE * AES_BASE; + uint64_t i_normalized = ((i & 1UL) == 1UL) * AES_BASE * AES_BASE * AES_BASE; for (uint64_t j = 0; j < AES_BASE; ++j) { uint64_t j_raw = j * AES_BASE * AES_BASE; - uint64_t j_normalized = static_cast((j & 1UL) == 1UL) * AES_BASE * AES_BASE; + uint64_t j_normalized = ((j & 1UL) == 1UL) * AES_BASE * AES_BASE; for (uint64_t k = 0; k < AES_BASE; ++k) { uint64_t k_raw = k * AES_BASE; - uint64_t k_normalized = static_cast((k & 1UL) == 1UL) * AES_BASE; + uint64_t k_normalized = ((k & 1UL) == 1UL) * AES_BASE; for (uint64_t m = 0; m < AES_BASE; ++m) { uint64_t m_raw = m; - auto m_normalized = static_cast((m & 1UL) == 1UL); + uint64_t m_normalized = ((m & 1UL) == 1UL); uint64_t left = i_raw + j_raw + k_raw + m_raw; uint64_t right = i_normalized + j_normalized + k_normalized + m_normalized; table.column_1.emplace_back(left); table.column_2.emplace_back(right); - table.column_3.emplace_back(0); + table.column_3.emplace_back(barretenberg::fr(0)); } } } @@ -126,10 +127,10 @@ inline MultiTable get_aes_input_table(const MultiTableId id = AES_INPUT) inline std::array get_aes_sbox_values_from_key(const std::array key) { const auto byte = numeric::map_from_sparse_form(key[0]); - uint8_t sbox_value = crypto::aes128::sbox[static_cast(byte)]; - uint8_t swizzled = (static_cast(sbox_value << 1) ^ static_cast(((sbox_value >> 7) & 1) * 0x1b)); + uint8_t sbox_value = crypto::aes128::sbox[(uint8_t)byte]; + uint8_t swizzled = ((uint8_t)(sbox_value << 1) ^ (uint8_t)(((sbox_value >> 7) & 1) * 0x1b)); return { barretenberg::fr(numeric::map_into_sparse_form(sbox_value)), - barretenberg::fr(numeric::map_into_sparse_form(static_cast(sbox_value ^ swizzled))) }; + barretenberg::fr(numeric::map_into_sparse_form((uint8_t)(sbox_value ^ swizzled))) }; } inline BasicTable generate_aes_sbox_table(BasicTableId id, const size_t table_index) @@ -140,16 +141,15 @@ inline BasicTable generate_aes_sbox_table(BasicTableId id, const size_t table_in table.size = 256; table.use_twin_keys = false; for (uint64_t i = 0; i < table.size; ++i) { - const auto first = numeric::map_into_sparse_form(static_cast(i)); - uint8_t sbox_value = crypto::aes128::sbox[static_cast(i)]; - uint8_t swizzled = - (static_cast(sbox_value << 1) ^ static_cast(((sbox_value >> 7) & 1) * 0x1b)); + const auto first = numeric::map_into_sparse_form((uint8_t)i); + uint8_t sbox_value = crypto::aes128::sbox[(uint8_t)i]; + uint8_t swizzled = ((uint8_t)(sbox_value << 1) ^ (uint8_t)(((sbox_value >> 7) & 1) * 0x1b)); const auto second = numeric::map_into_sparse_form(sbox_value); - const auto third = numeric::map_into_sparse_form(static_cast(sbox_value ^ swizzled)); + const auto third = numeric::map_into_sparse_form((uint8_t)(sbox_value ^ swizzled)); - table.column_1.emplace_back(first); - table.column_2.emplace_back(second); - table.column_3.emplace_back(third); + table.column_1.emplace_back(barretenberg::fr(first)); + table.column_2.emplace_back(barretenberg::fr(second)); + table.column_3.emplace_back(barretenberg::fr(third)); } table.get_values_from_key = get_aes_sbox_values_from_key; @@ -173,4 +173,5 @@ inline MultiTable get_aes_sbox_table(const MultiTableId id = AES_SBOX) } return table; } -} // namespace plookup::aes128_tables +} // namespace aes128_tables +} // namespace plookup diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/blake2s.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/blake2s.hpp index 159be79de08..39df081ca01 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/blake2s.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/blake2s.hpp @@ -5,7 +5,8 @@ #include "sparse.hpp" #include "types.hpp" -namespace plookup::blake2s_tables { +namespace plookup { +namespace blake2s_tables { static constexpr size_t BITS_IN_LAST_SLICE = 5UL; static constexpr size_t SIZE_OF_LAST_SLICE = (1UL << BITS_IN_LAST_SLICE); @@ -20,8 +21,8 @@ inline std::array get_xor_rotate_values_from_key(const std: { uint64_t filtered_key0 = filter ? key[0] & 3ULL : key[0]; uint64_t filtered_key1 = filter ? key[1] & 3ULL : key[1]; - return { uint256_t{ numeric::rotate32(static_cast(filtered_key0) ^ static_cast(filtered_key1), - static_cast(num_rotated_output_bits)) }, + return { uint256_t(numeric::rotate32(uint32_t(filtered_key0) ^ uint32_t(filtered_key1), + uint32_t(num_rotated_output_bits))), 0ULL }; } @@ -49,8 +50,7 @@ inline BasicTable generate_xor_rotate_table(BasicTableId id, const size_t table_ j_copy &= 3ULL; } table.column_3.emplace_back( - uint256_t{ numeric::rotate32(static_cast(i_copy) ^ static_cast(j_copy), - static_cast(num_rotated_output_bits)) }); + uint256_t(numeric::rotate32(uint32_t(i_copy) ^ uint32_t(j_copy), uint32_t(num_rotated_output_bits)))); } } @@ -215,4 +215,5 @@ inline MultiTable get_blake2s_xor_rotate_7_table(const MultiTableId id = BLAKE_X return table; } -} // namespace plookup::blake2s_tables +} // namespace blake2s_tables +} // namespace plookup diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/dummy.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/dummy.hpp index 81c491f6e7c..e44ea72e8fb 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/dummy.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/dummy.hpp @@ -9,7 +9,8 @@ #include "types.hpp" -namespace plookup::dummy_tables { +namespace plookup { +namespace dummy_tables { /** * @brief Lookup the value corresponding to a specific key @@ -92,4 +93,5 @@ inline MultiTable get_honk_dummy_multitable() table.get_table_values.emplace_back(&get_value_from_key); return table; } -} // namespace plookup::dummy_tables \ No newline at end of file +} // namespace dummy_tables +} // namespace plookup \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_chi.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_chi.hpp index 396d4acbae0..5db06fc8e78 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_chi.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_chi.hpp @@ -4,7 +4,8 @@ #include "barretenberg/common/constexpr_utils.hpp" #include "barretenberg/numeric/bitop/pow.hpp" -namespace plookup::keccak_tables { +namespace plookup { +namespace keccak_tables { /** * @brief Generates plookup tables required for CHI round of Keccak hash function @@ -58,7 +59,7 @@ namespace plookup::keccak_tables { class Chi { public: // 1 + 2a - b + c => a xor (~b & c) - static constexpr std::array CHI_NORMALIZATION_TABLE{ + static constexpr uint64_t CHI_NORMALIZATION_TABLE[5]{ 0, 0, 1, 1, 0, }; @@ -248,4 +249,5 @@ class Chi { return table; } }; -} // namespace plookup::keccak_tables +} // namespace keccak_tables +} // namespace plookup diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_input.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_input.hpp index 57ea1b06bd3..ce161d24ebf 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_input.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_input.hpp @@ -5,7 +5,8 @@ #include "barretenberg/numeric/bitop/pow.hpp" #include "barretenberg/numeric/bitop/sparse_form.hpp" -namespace plookup::keccak_tables { +namespace plookup { +namespace keccak_tables { /** * @brief Generates plookup tables used convert 64-bit integers into a sparse representation used for Keccak hash @@ -64,9 +65,9 @@ class KeccakInput { for (uint64_t i = 0; i < table.size; ++i) { const uint64_t source = i; const auto target = numeric::map_into_sparse_form(source); - table.column_1.emplace_back(source); - table.column_2.emplace_back(target); - table.column_3.emplace_back(source >> msb_shift); + table.column_1.emplace_back(barretenberg::fr(source)); + table.column_2.emplace_back(barretenberg::fr(target)); + table.column_3.emplace_back(barretenberg::fr(source >> msb_shift)); } table.get_values_from_key = &get_keccak_input_values; @@ -139,4 +140,5 @@ class KeccakInput { } }; -} // namespace plookup::keccak_tables +} // namespace keccak_tables +} // namespace plookup diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_output.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_output.hpp index 8a5483ad489..3aaebfcdc35 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_output.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_output.hpp @@ -7,7 +7,8 @@ #include "../sparse.hpp" #include "../types.hpp" -namespace plookup::keccak_tables { +namespace plookup { +namespace keccak_tables { /** * @brief Converts a base-11 sparse integer representation into a regular base-2 binary integer. @@ -24,9 +25,6 @@ class KeccakOutput { static constexpr uint64_t EFFECTIVE_BASE = 2; static constexpr size_t TABLE_BITS = 8; - // We're doing some degenerate compile-time work with this C-array that can't be done with std::array, - // We pass it as a uint64_t* template parameter, no easy way to do that with std::array - // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) static constexpr uint64_t OUTPUT_NORMALIZATION_TABLE[2]{ 0, 1 }; /** @@ -109,7 +107,7 @@ class KeccakOutput { table.get_values_from_key = &sparse_tables::get_sparse_normalization_values; table.column_1_step_size = barretenberg::fr(numeric::pow64(static_cast(BASE), TABLE_BITS)); - table.column_2_step_size = barretenberg::fr((static_cast(1) << TABLE_BITS)); + table.column_2_step_size = barretenberg::fr(((uint64_t)1 << TABLE_BITS)); table.column_3_step_size = 0; return table; } @@ -173,4 +171,5 @@ class KeccakOutput { } }; -} // namespace plookup::keccak_tables +} // namespace keccak_tables +} // namespace plookup diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_rho.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_rho.hpp index 70d56a0dd66..d29f4009b05 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_rho.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_rho.hpp @@ -4,7 +4,8 @@ #include "barretenberg/common/constexpr_utils.hpp" #include "barretenberg/numeric/bitop/pow.hpp" -namespace plookup::keccak_tables { +namespace plookup { +namespace keccak_tables { /** * @brief Generate the plookup tables used for the RHO round of the Keccak hash algorithm @@ -70,7 +71,7 @@ template class Rho { 0, 1, 62, 28, 27, 36, 44, 6, 55, 20, 3, 10, 43, 25, 39, 41, 45, 15, 21, 8, 18, 2, 61, 56, 14, }; - static constexpr std::array RHO_NORMALIZATION_TABLE{ + static constexpr uint64_t RHO_NORMALIZATION_TABLE[3]{ 0, 1, 0, @@ -99,7 +100,7 @@ template class Rho { base_shift *= BASE; } - return { barretenberg::fr(accumulator), barretenberg::fr{ accumulator / divisor } }; + return { barretenberg::fr(accumulator), barretenberg::fr(accumulator / divisor) }; } /** @@ -240,9 +241,9 @@ template class Rho { MultiTable table; table.id = id; - table.column_1_step_sizes.emplace_back(1); - table.column_2_step_sizes.emplace_back(1); - table.column_3_step_sizes.emplace_back(1); + table.column_1_step_sizes.push_back(1); + table.column_2_step_sizes.push_back(1); + table.column_3_step_sizes.push_back(1); // generate table selector values for the 'right' slice barretenberg::constexpr_for<0, num_right_tables, 1>([&] { @@ -253,18 +254,18 @@ template class Rho { constexpr uint64_t scaled_base = numeric::pow64(BASE, bit_slice); if (i == num_right_tables - 1) { - table.column_1_step_sizes.emplace_back(scaled_base); - table.column_2_step_sizes.emplace_back(0); - table.column_3_step_sizes.emplace_back(0); + table.column_1_step_sizes.push_back(scaled_base); + table.column_2_step_sizes.push_back(0); + table.column_3_step_sizes.push_back(0); } else { - table.column_1_step_sizes.emplace_back(scaled_base); - table.column_2_step_sizes.emplace_back(scaled_base); - table.column_3_step_sizes.emplace_back(0); + table.column_1_step_sizes.push_back(scaled_base); + table.column_2_step_sizes.push_back(scaled_base); + table.column_3_step_sizes.push_back(0); } table.slice_sizes.push_back(scaled_base); table.get_table_values.emplace_back(&get_rho_renormalization_values); - table.lookup_ids.push_back(static_cast(static_cast(KECCAK_RHO_1) + (bit_slice - 1))); + table.lookup_ids.push_back((BasicTableId)((size_t)KECCAK_RHO_1 + (bit_slice - 1))); }); // generate table selector values for the 'left' slice @@ -277,18 +278,19 @@ template class Rho { constexpr uint64_t scaled_base = numeric::pow64(BASE, bit_slice); if (i != num_left_tables - 1) { - table.column_1_step_sizes.emplace_back(scaled_base); - table.column_2_step_sizes.emplace_back(scaled_base); - table.column_3_step_sizes.emplace_back(0); + table.column_1_step_sizes.push_back(scaled_base); + table.column_2_step_sizes.push_back(scaled_base); + table.column_3_step_sizes.push_back(0); } table.slice_sizes.push_back(scaled_base); table.get_table_values.emplace_back(&get_rho_renormalization_values); - table.lookup_ids.push_back(static_cast(static_cast(KECCAK_RHO_1) + (bit_slice - 1))); + table.lookup_ids.push_back((BasicTableId)((size_t)KECCAK_RHO_1 + (bit_slice - 1))); }); return table; } }; -} // namespace plookup::keccak_tables +} // namespace keccak_tables +} // namespace plookup diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_theta.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_theta.hpp index e93c8bf726e..f7ed27f908a 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_theta.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_theta.hpp @@ -4,7 +4,8 @@ #include "barretenberg/common/constexpr_utils.hpp" #include "barretenberg/numeric/bitop/pow.hpp" -namespace plookup::keccak_tables { +namespace plookup { +namespace keccak_tables { /** * @brief Generates plookup tables required for THETA round of Keccak hash function @@ -54,7 +55,7 @@ class Theta { static constexpr size_t TABLE_BITS = 4; static constexpr uint64_t BASE = 11; - static constexpr std::array THETA_NORMALIZATION_TABLE{ + static constexpr uint64_t THETA_NORMALIZATION_TABLE[11]{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, }; @@ -250,4 +251,5 @@ class Theta { return table; } }; -} // namespace plookup::keccak_tables +} // namespace keccak_tables +} // namespace plookup diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.cpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.cpp index cb105b80dc3..5a70295deaa 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.cpp @@ -1,6 +1,7 @@ #include "non_native_group_generator.hpp" -namespace plookup::ecc_generator_tables { +namespace plookup { +namespace ecc_generator_tables { /** * Init 8-bit generator lookup tables @@ -30,9 +31,9 @@ template void ecc_generator_table::init_generator_tables() auto beta = G1::Fq::cube_root_of_unity(); for (size_t i = 0; i < 256; ++i) { - auto endo_x = static_cast(point_table[i].x * beta); - auto x = static_cast(point_table[i].x); - auto y = static_cast(point_table[i].y); + uint256_t endo_x = static_cast(point_table[i].x * beta); + uint256_t x = static_cast(point_table[i].x); + uint256_t y = static_cast(point_table[i].y); const uint256_t SHIFT = uint256_t(1) << 68; const uint256_t MASK = SHIFT - 1; @@ -59,16 +60,18 @@ template void ecc_generator_table::init_generator_tables() uint256_t y2 = y & MASK; y = y >> 68; uint256_t y3 = y & MASK; - ecc_generator_table::generator_xlo_table[i] = std::make_pair(x0, x1); - ecc_generator_table::generator_xhi_table[i] = std::make_pair(x2, x3); - ecc_generator_table::generator_endo_xlo_table[i] = std::make_pair(endox0, endox1); - ecc_generator_table::generator_endo_xhi_table[i] = std::make_pair(endox2, endox3); - ecc_generator_table::generator_ylo_table[i] = std::make_pair(y0, y1); - ecc_generator_table::generator_yhi_table[i] = std::make_pair(y2, y3); - ecc_generator_table::generator_xyprime_table[i] = std::make_pair( - barretenberg::fr{ uint256_t(point_table[i].x) }, barretenberg::fr{ uint256_t(point_table[i].y) }); - ecc_generator_table::generator_endo_xyprime_table[i] = std::make_pair( - barretenberg::fr{ uint256_t(point_table[i].x * beta) }, barretenberg::fr{ uint256_t(point_table[i].y) }); + ecc_generator_table::generator_xlo_table[i] = std::make_pair(x0, x1); + ecc_generator_table::generator_xhi_table[i] = std::make_pair(x2, x3); + ecc_generator_table::generator_endo_xlo_table[i] = + std::make_pair(endox0, endox1); + ecc_generator_table::generator_endo_xhi_table[i] = + std::make_pair(endox2, endox3); + ecc_generator_table::generator_ylo_table[i] = std::make_pair(y0, y1); + ecc_generator_table::generator_yhi_table[i] = std::make_pair(y2, y3); + ecc_generator_table::generator_xyprime_table[i] = std::make_pair( + barretenberg::fr(uint256_t(point_table[i].x)), barretenberg::fr(uint256_t(point_table[i].y))); + ecc_generator_table::generator_endo_xyprime_table[i] = std::make_pair( + barretenberg::fr(uint256_t(point_table[i].x * beta)), barretenberg::fr(uint256_t(point_table[i].y))); } init = true; } @@ -92,7 +95,7 @@ template std::array ecc_generator_table::get_xlo_values(const std::array key) { init_generator_tables(); - const auto index = static_cast(key[0]); + const size_t index = static_cast(key[0]); return { ecc_generator_table::generator_xlo_table[index].first, ecc_generator_table::generator_xlo_table[index].second }; } @@ -104,7 +107,7 @@ template std::array ecc_generator_table::get_xhi_values(const std::array key) { init_generator_tables(); - const auto index = static_cast(key[0]); + const size_t index = static_cast(key[0]); return { ecc_generator_table::generator_xhi_table[index].first, ecc_generator_table::generator_xhi_table[index].second }; } @@ -116,7 +119,7 @@ template std::array ecc_generator_table::get_xlo_endo_values(const std::array key) { init_generator_tables(); - const auto index = static_cast(key[0]); + const size_t index = static_cast(key[0]); return { ecc_generator_table::generator_endo_xlo_table[index].first, ecc_generator_table::generator_endo_xlo_table[index].second }; } @@ -128,7 +131,7 @@ template std::array ecc_generator_table::get_xhi_endo_values(const std::array key) { init_generator_tables(); - const auto index = static_cast(key[0]); + const size_t index = static_cast(key[0]); return { ecc_generator_table::generator_endo_xhi_table[index].first, ecc_generator_table::generator_endo_xhi_table[index].second }; } @@ -140,7 +143,7 @@ template std::array ecc_generator_table::get_ylo_values(const std::array key) { init_generator_tables(); - const auto index = static_cast(key[0]); + const size_t index = static_cast(key[0]); return { ecc_generator_table::generator_ylo_table[index].first, ecc_generator_table::generator_ylo_table[index].second }; } @@ -152,7 +155,7 @@ template std::array ecc_generator_table::get_yhi_values(const std::array key) { init_generator_tables(); - const auto index = static_cast(key[0]); + const size_t index = static_cast(key[0]); return { ecc_generator_table::generator_yhi_table[index].first, ecc_generator_table::generator_yhi_table[index].second }; } @@ -164,7 +167,7 @@ template std::array ecc_generator_table::get_xyprime_values(const std::array key) { init_generator_tables(); - const auto index = static_cast(key[0]); + const size_t index = static_cast(key[0]); return { ecc_generator_table::generator_xyprime_table[index].first, ecc_generator_table::generator_xyprime_table[index].second }; } @@ -176,7 +179,7 @@ template std::array ecc_generator_table::get_xyprime_endo_values(const std::array key) { init_generator_tables(); - const auto index = static_cast(key[0]); + const size_t index = static_cast(key[0]); return { ecc_generator_table::generator_endo_xyprime_table[index].first, ecc_generator_table::generator_endo_xyprime_table[index].second }; } @@ -491,4 +494,5 @@ MultiTable ecc_generator_table::get_xyprime_endo_table(const MultiTableId id template class ecc_generator_table; template class ecc_generator_table; -} // namespace plookup::ecc_generator_tables \ No newline at end of file +} // namespace ecc_generator_tables +} // namespace plookup \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.hpp index 79eed4ef66f..b579d770264 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/non_native_group_generator.hpp @@ -6,17 +6,16 @@ #include "barretenberg/ecc/curves/secp256k1/secp256k1.hpp" #include -namespace plookup::ecc_generator_tables { +namespace plookup { +namespace ecc_generator_tables { template class ecc_generator_table { public: - using element = typename G1::element; + typedef typename G1::element element; /** * Store arrays of precomputed 8-bit lookup tables for generator point coordinates (and their endomorphism *equivalents) **/ - // TODO(@zac-williamson) convert these into static const class members - // NOLINTBEGIN(cppcoreguidelines-avoid-non-const-global-variables) inline static std::array, 256> generator_endo_xlo_table; inline static std::array, 256> generator_endo_xhi_table; inline static std::array, 256> generator_xlo_table; @@ -26,39 +25,39 @@ template class ecc_generator_table { inline static std::array, 256> generator_xyprime_table; inline static std::array, 256> generator_endo_xyprime_table; inline static bool init = false; - // NOLINTEND(cppcoreguidelines-avoid-non-const-global-variables) static void init_generator_tables(); - static size_t convert_position_to_shifted_naf(size_t position); - static size_t convert_shifted_naf_to_position(size_t shifted_naf); - static std::array get_xlo_endo_values(std::array key); - static std::array get_xhi_endo_values(std::array key); - static std::array get_xlo_values(std::array key); - static std::array get_xhi_values(std::array key); - static std::array get_ylo_values(std::array key); - static std::array get_yhi_values(std::array key); - static std::array get_xyprime_values(std::array key); - static std::array get_xyprime_endo_values(std::array key); - static BasicTable generate_xlo_table(BasicTableId id, size_t table_index); - static BasicTable generate_xhi_table(BasicTableId id, size_t table_index); - static BasicTable generate_xlo_endo_table(BasicTableId id, size_t table_index); - static BasicTable generate_xhi_endo_table(BasicTableId id, size_t table_index); - static BasicTable generate_ylo_table(BasicTableId id, size_t table_index); - static BasicTable generate_yhi_table(BasicTableId id, size_t table_index); - static BasicTable generate_xyprime_table(BasicTableId id, size_t table_index); - static BasicTable generate_xyprime_endo_table(BasicTableId id, size_t table_index); - static MultiTable get_xlo_table(MultiTableId id, BasicTableId basic_id); - static MultiTable get_xhi_table(MultiTableId id, BasicTableId basic_id); - static MultiTable get_xlo_endo_table(MultiTableId id, BasicTableId basic_id); - static MultiTable get_xhi_endo_table(MultiTableId id, BasicTableId basic_id); - static MultiTable get_ylo_table(MultiTableId id, BasicTableId basic_id); - static MultiTable get_yhi_table(MultiTableId id, BasicTableId basic_id); - static MultiTable get_xyprime_table(MultiTableId id, BasicTableId basic_id); - static MultiTable get_xyprime_endo_table(MultiTableId id, BasicTableId basic_id); + static size_t convert_position_to_shifted_naf(const size_t position); + static size_t convert_shifted_naf_to_position(const size_t shifted_naf); + static std::array get_xlo_endo_values(const std::array key); + static std::array get_xhi_endo_values(const std::array key); + static std::array get_xlo_values(const std::array key); + static std::array get_xhi_values(const std::array key); + static std::array get_ylo_values(const std::array key); + static std::array get_yhi_values(const std::array key); + static std::array get_xyprime_values(const std::array key); + static std::array get_xyprime_endo_values(const std::array key); + static BasicTable generate_xlo_table(BasicTableId id, const size_t table_index); + static BasicTable generate_xhi_table(BasicTableId id, const size_t table_index); + static BasicTable generate_xlo_endo_table(BasicTableId id, const size_t table_index); + static BasicTable generate_xhi_endo_table(BasicTableId id, const size_t table_index); + static BasicTable generate_ylo_table(BasicTableId id, const size_t table_index); + static BasicTable generate_yhi_table(BasicTableId id, const size_t table_index); + static BasicTable generate_xyprime_table(BasicTableId id, const size_t table_index); + static BasicTable generate_xyprime_endo_table(BasicTableId id, const size_t table_index); + static MultiTable get_xlo_table(const MultiTableId id, const BasicTableId basic_id); + static MultiTable get_xhi_table(const MultiTableId id, const BasicTableId basic_id); + static MultiTable get_xlo_endo_table(const MultiTableId id, const BasicTableId basic_id); + static MultiTable get_xhi_endo_table(const MultiTableId id, const BasicTableId basic_id); + static MultiTable get_ylo_table(const MultiTableId id, const BasicTableId basic_id); + static MultiTable get_yhi_table(const MultiTableId id, const BasicTableId basic_id); + static MultiTable get_xyprime_table(const MultiTableId id, const BasicTableId basic_id); + static MultiTable get_xyprime_endo_table(const MultiTableId id, const BasicTableId basic_id); }; extern template class ecc_generator_table; extern template class ecc_generator_table; -} // namespace plookup::ecc_generator_tables +} // namespace ecc_generator_tables +} // namespace plookup diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sha256.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sha256.hpp index 33e3d9c57fc..b738571db15 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sha256.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sha256.hpp @@ -8,11 +8,9 @@ #include "sparse.hpp" #include "types.hpp" -namespace plookup::sha256_tables { +namespace plookup { +namespace sha256_tables { -// We're doing some degenerate compile-time work with this C-array that can't be done with std::array, -// We pass it as a uint64_t* template parameter, no easy way to do that with std::array -// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) static constexpr uint64_t choose_normalization_table[28]{ /* xor result = 0 */ 0, // e + 2f + 3g = 0 => e = 0, f = 0, g = 0 => t = 0 @@ -48,9 +46,6 @@ static constexpr uint64_t choose_normalization_table[28]{ 2, // e + 2f + 3g = 6 => e = 1, f = 1, g = 1 => t = 1 }; -// We're doing some degenerate compile-time work with this C-array that can't be done with std::array, -// We pass it as a uint64_t* template parameter, no easy way to do that with std::array -// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) static constexpr uint64_t majority_normalization_table[16]{ /* xor result = 0 */ 0, // a + b + c = 0 => (a & b) ^ (a & c) ^ (b & c) = 0 @@ -74,9 +69,6 @@ static constexpr uint64_t majority_normalization_table[16]{ 2, }; -// We're doing some degenerate compile-time work with this C-array that can't be done with std::array, -// We pass it as a uint64_t* template parameter, no easy way to do that with std::array -// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) static constexpr uint64_t witness_extension_normalization_table[16]{ /* xor result = 0 */ 0, @@ -431,4 +423,5 @@ inline MultiTable get_majority_input_table(const MultiTableId id = SHA256_MAJ_IN return table; } -} // namespace plookup::sha256_tables +} // namespace sha256_tables +} // namespace plookup diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sparse.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sparse.hpp index 6a9cf3a12a9..006f9a3c7b0 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sparse.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/sparse.hpp @@ -7,7 +7,8 @@ #include "barretenberg/numeric/bitop/rotate.hpp" #include "barretenberg/numeric/bitop/sparse_form.hpp" -namespace plookup::sparse_tables { +namespace plookup { +namespace sparse_tables { template inline std::array get_sparse_table_with_rotation_values(const std::array key) @@ -15,7 +16,7 @@ inline std::array get_sparse_table_with_rotation_values(con const auto t0 = numeric::map_into_sparse_form(key[0]); barretenberg::fr t1; if constexpr (num_rotated_bits > 0) { - t1 = numeric::map_into_sparse_form(numeric::rotate32(static_cast(key[0]), num_rotated_bits)); + t1 = numeric::map_into_sparse_form(numeric::rotate32((uint32_t)key[0], num_rotated_bits)); } else { t1 = t0; } @@ -34,12 +35,12 @@ inline BasicTable generate_sparse_table_with_rotation(BasicTableId id, const siz for (uint64_t i = 0; i < table.size; ++i) { const uint64_t source = i; const auto target = numeric::map_into_sparse_form(source); - table.column_1.emplace_back(source); + table.column_1.emplace_back(barretenberg::fr(source)); table.column_2.emplace_back(barretenberg::fr(target)); if constexpr (num_rotated_bits > 0) { const auto rotated = - numeric::map_into_sparse_form(numeric::rotate32(static_cast(source), num_rotated_bits)); + numeric::map_into_sparse_form(numeric::rotate32((uint32_t)source, num_rotated_bits)); table.column_3.emplace_back(barretenberg::fr(rotated)); } else { table.column_3.emplace_back(barretenberg::fr(target)); @@ -97,21 +98,22 @@ inline BasicTable generate_sparse_normalization_table(BasicTableId id, const siz const auto& limbs = accumulator.get_limbs(); uint64_t key = 0; for (size_t j = 0; j < num_bits; ++j) { - const auto table_idx = static_cast(limbs[j]); + const size_t table_idx = static_cast(limbs[j]); key += ((base_table[table_idx]) << static_cast(j)); } table.column_1.emplace_back(accumulator.get_sparse_value()); table.column_2.emplace_back(key); - table.column_3.emplace_back(0); + table.column_3.emplace_back(barretenberg::fr(0)); accumulator += to_add; } table.get_values_from_key = &get_sparse_normalization_values; table.column_1_step_size = barretenberg::fr(table.size); - table.column_2_step_size = barretenberg::fr{ (static_cast(1) << num_bits) }; + table.column_2_step_size = barretenberg::fr(((uint64_t)1 << num_bits)); table.column_3_step_size = barretenberg::fr(0); return table; } -} // namespace plookup::sparse_tables +} // namespace sparse_tables +} // namespace plookup diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp index d17aa27b810..e7fd4e400ef 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp @@ -1,11 +1,10 @@ #pragma once #include -#include #include -#include "./fixed_base/fixed_base_params.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" + namespace plookup { enum BasicTableId { @@ -53,11 +52,7 @@ enum BasicTableId { BLAKE_XOR_ROTATE1, BLAKE_XOR_ROTATE2, BLAKE_XOR_ROTATE4, - FIXED_BASE_0_0, - FIXED_BASE_1_0 = FIXED_BASE_0_0 + FixedBaseParams::NUM_TABLES_PER_LO_MULTITABLE, - FIXED_BASE_2_0 = FIXED_BASE_1_0 + FixedBaseParams::NUM_TABLES_PER_HI_MULTITABLE, - FIXED_BASE_3_0 = FIXED_BASE_2_0 + FixedBaseParams::NUM_TABLES_PER_LO_MULTITABLE, - PEDERSEN_29_SMALL = FIXED_BASE_3_0 + FixedBaseParams::NUM_TABLES_PER_HI_MULTITABLE, + PEDERSEN_29_SMALL, PEDERSEN_28, PEDERSEN_27, PEDERSEN_26, @@ -120,10 +115,6 @@ enum MultiTableId { PEDERSEN_LEFT_LO, PEDERSEN_RIGHT_HI, PEDERSEN_RIGHT_LO, - FIXED_BASE_LEFT_LO, - FIXED_BASE_LEFT_HI, - FIXED_BASE_RIGHT_LO, - FIXED_BASE_RIGHT_HI, UINT32_XOR, UINT32_AND, BN254_XLO, @@ -157,7 +148,6 @@ enum MultiTableId { }; struct MultiTable { - ~MultiTable() = default; // Coefficients are accumulated products of corresponding step sizes until that point std::vector column_1_coefficients; std::vector column_2_coefficients; @@ -168,17 +158,17 @@ struct MultiTable { std::vector column_1_step_sizes; std::vector column_2_step_sizes; std::vector column_3_step_sizes; - using table_out = std::array; - using table_in = std::array; + typedef std::array table_out; + typedef std::array table_in; std::vector get_table_values; private: void init_step_sizes() { const size_t num_lookups = column_1_coefficients.size(); - column_1_step_sizes.emplace_back(1); - column_2_step_sizes.emplace_back(1); - column_3_step_sizes.emplace_back(1); + column_1_step_sizes.emplace_back(barretenberg::fr(1)); + column_2_step_sizes.emplace_back(barretenberg::fr(1)); + column_3_step_sizes.emplace_back(barretenberg::fr(1)); std::vector coefficient_inverses(column_1_coefficients.begin(), column_1_coefficients.end()); std::copy(column_2_coefficients.begin(), column_2_coefficients.end(), std::back_inserter(coefficient_inverses)); @@ -210,17 +200,17 @@ struct MultiTable { } init_step_sizes(); } - MultiTable(std::vector col_1_coeffs, - std::vector col_2_coeffs, - std::vector col_3_coeffs) - : column_1_coefficients(std::move(col_1_coeffs)) - , column_2_coefficients(std::move(col_2_coeffs)) - , column_3_coefficients(std::move(col_3_coeffs)) + MultiTable(const std::vector& col_1_coeffs, + const std::vector& col_2_coeffs, + const std::vector& col_3_coeffs) + : column_1_coefficients(col_1_coeffs) + , column_2_coefficients(col_2_coeffs) + , column_3_coefficients(col_3_coeffs) { init_step_sizes(); } - MultiTable() = default; + MultiTable(){}; MultiTable(const MultiTable& other) = default; MultiTable(MultiTable&& other) = default; @@ -305,7 +295,7 @@ struct BasicTable { return key[0] < other.key[0] || ((key[0] == other.key[0]) && key[1] < other.key[1]); } - [[nodiscard]] std::array to_sorted_list_components(const bool use_two_keys) const + std::array to_sorted_list_components(const bool use_two_keys) const { return { barretenberg::fr(key[0]), diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/uint.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/uint.hpp index 9c059e4c3f8..05ccb4275e6 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/uint.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/uint.hpp @@ -4,7 +4,8 @@ #include "barretenberg/numeric/bitop/rotate.hpp" -namespace plookup::uint_tables { +namespace plookup { +namespace uint_tables { template inline std::array get_xor_rotate_values_from_key(const std::array key) @@ -102,4 +103,5 @@ inline MultiTable get_uint32_and_table(const MultiTableId id = UINT32_AND) return table; } -} // namespace plookup::uint_tables +} // namespace uint_tables +} // namespace plookup From 918410c3529de04e49f20733fbbb97053ab2a77e Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 15 Sep 2023 18:22:25 +0000 Subject: [PATCH 14/50] removed extra ecc gate methods from standard/turbo circuit builder --- .../crypto/pedersen_commitment/pedersen.hpp | 6 +- .../crypto/pedersen_hash/pedersen.cpp | 11 +- .../crypto/pedersen_hash/pedersen.hpp | 8 +- .../ecc/curves/secp256k1/secp256k1.cpp | 27 +- .../ecc/curves/secp256r1/secp256r1.cpp | 18 ++ .../arithmetization/gate_data.hpp | 1 - .../circuit_builder/circuit_builder_base.hpp | 3 - .../standard_circuit_builder.cpp | 231 ------------------ .../standard_circuit_builder.hpp | 2 - .../circuit_builder/turbo_circuit_builder.cpp | 230 ----------------- .../circuit_builder/turbo_circuit_builder.hpp | 13 +- .../circuit_builder/ultra_circuit_builder.hpp | 4 +- .../proof_system/plookup_tables/types.hpp | 11 +- .../stdlib/primitives/field/field.hpp | 2 - .../stdlib/primitives/group/cycle_group.cpp | 141 ++++++++--- .../stdlib/primitives/group/cycle_group.hpp | 20 +- 16 files changed, 185 insertions(+), 543 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.hpp index 80add981996..3571016ebd7 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.hpp @@ -4,7 +4,8 @@ #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include -namespace crypto::pedersen_commitment { +namespace crypto { +namespace pedersen_commitment { grumpkin::g1::element commit_single(const barretenberg::fr& in, generators::generator_index_t const& index); @@ -25,4 +26,5 @@ grumpkin::fq compress_native(const std::vector& input, const size_t has grumpkin::fq compress_native(const std::vector>& input_pairs); -} // namespace crypto::pedersen_commitment \ No newline at end of file +} // namespace pedersen_commitment +} // namespace crypto diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp index 70d64fb6cb0..6debd1b9ff3 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp @@ -4,7 +4,8 @@ #include #endif -namespace crypto::pedersen_hash { +namespace crypto { +namespace pedersen_hash { using namespace generators; @@ -20,7 +21,7 @@ grumpkin::g1::element hash_single(const barretenberg::fr& in, generator_index_t const fixed_base_ladder* ladder = gen_data.get_hash_ladder(num_bits); - std::array wnaf_entries = { 0 }; + uint64_t wnaf_entries[num_quads + 2] = { 0 }; bool skew = false; barretenberg::wnaf::fixed_wnaf(&scalar_multiplier.data[0], &wnaf_entries[0], skew, 0); @@ -63,7 +64,9 @@ grumpkin::fq hash_multiple(const std::vector& inputs, const size_t r = out[i] + r; } grumpkin::g1::affine_element result = - r.is_point_at_infinity() ? grumpkin::g1::affine_element(0, 0) : static_cast(r); + r.is_point_at_infinity() ? grumpkin::g1::affine_element(0, 0) : grumpkin::g1::affine_element(r); return result.x; } -} // namespace crypto::pedersen_hash \ No newline at end of file + +} // namespace pedersen_hash +} // namespace crypto \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp index 5219b31c31c..40bdfc7ff8d 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp @@ -4,10 +4,12 @@ #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include -namespace crypto::pedersen_hash { +namespace crypto { +namespace pedersen_hash { grumpkin::g1::element hash_single(const barretenberg::fr& in, generators::generator_index_t const& index); -grumpkin::fq hash_multiple(const std::vector& inputs, size_t hash_index = 0); +grumpkin::fq hash_multiple(const std::vector& inputs, const size_t hash_index = 0); -} // namespace crypto::pedersen_hash +} // namespace pedersen_hash +} // namespace crypto diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp index b2f5fa4f782..b199208cec9 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp @@ -1,16 +1,25 @@ #include "./secp256k1.hpp" namespace secp256k1 { +namespace { + +constexpr size_t max_num_generators = 1 << 10; +// NOLINTNEXTLINE TODO(@zac-williamson) #1806 get rid of need for these static variables in Pedersen refactor! +static std::array generators; +// NOLINTNEXTLINE TODO(@zac-williamson) #1806 get rid of need for these static variables in Pedersen refactor! +static bool init_generators = false; + +} // namespace /* In case where prime bit length is 256, the method produces a generator, but only with one less bit of randomness than the maximum possible, as the y coordinate in that case is determined by the x-coordinate. */ -// g1::affine_element get_generator(const size_t generator_index) -// { -// if (!init_generators) { -// generators = g1::derive_generators(); -// init_generators = true; -// } -// ASSERT(generator_index < max_num_generators); -// return generators[generator_index]; -// } +g1::affine_element get_generator(const size_t generator_index) +{ + if (!init_generators) { + generators = g1::derive_generators(); + init_generators = true; + } + ASSERT(generator_index < max_num_generators); + return generators[generator_index]; +} } // namespace secp256k1 \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp index 46875462194..061bbd2c2fd 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp @@ -1,7 +1,25 @@ #include "./secp256r1.hpp" namespace secp256r1 { +namespace { + +constexpr size_t max_num_generators = 1 << 10; +// NOLINTNEXTLINE TODO(@zac-williamson) #1806 get rid of need for these static variables in Pedersen refactor! +static std::array generators; +// NOLINTNEXTLINE TODO(@zac-williamson) #1806 get rid of need for these static variables in Pedersen refactor! +static bool init_generators = false; + +} // namespace /* In case where prime bit length is 256, the method produces a generator, but only with one less bit of randomness than the maximum possible, as the y coordinate in that case is determined by the x-coordinate. */ +g1::affine_element get_generator(const size_t generator_index) +{ + if (!init_generators) { + generators = g1::derive_generators(); + init_generators = true; + } + ASSERT(generator_index < max_num_generators); + return generators[generator_index]; +} } // namespace secp256r1 \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/gate_data.hpp b/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/gate_data.hpp index 8e96dd04b6a..80909226332 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/gate_data.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/gate_data.hpp @@ -127,7 +127,6 @@ template struct ecc_add_gate_ { FF endomorphism_coefficient; FF sign_coefficient; }; - template struct ecc_dbl_gate_ { uint32_t x1; uint32_t y1; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp index a37148a5116..cb226fc5255 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp @@ -96,9 +96,6 @@ template class CircuitBuilderBase { virtual void create_mul_gate(const mul_triple_& in) = 0; virtual void create_bool_gate(const uint32_t a) = 0; virtual void create_poly_gate(const poly_triple_& in) = 0; - virtual void create_ecc_add_gate(const ecc_add_gate_& in) = 0; - virtual void create_ecc_dbl_gate(const ecc_dbl_gate_& in) = 0; - virtual size_t get_num_constant_gates() const = 0; /** diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp index 5865c588cbe..c97775f9021 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp @@ -243,237 +243,6 @@ template void StandardCircuitBuilder_::create_poly_gate(const ++this->num_gates; } -/** - * @brief Create a gate where we validate an elliptic curve point addition - * (x1, y1) + (x2, y2) = (x3, y3) - * N.B. uses incomplete addition formula. Use with caution - * @tparam FF - * @param in - */ -template void StandardCircuitBuilder_::create_ecc_add_gate(const ecc_add_gate_& in) -{ - const auto sign_coefficient = in.sign_coefficient; - const auto x1 = this->get_variable(in.x1); - const auto x2 = this->get_variable(in.x2); - const auto x3 = this->get_variable(in.x3); - const auto y1 = this->get_variable(in.y1); - const auto y2 = sign_coefficient * this->get_variable(in.y2); - - bool collision = x2 == x1; - if (collision) { - this->failure("create_ecc_add_gate incomplete formula collision"); - } - const auto lambda_v = collision ? 0 : (y2 - y1) / (x2 - x1); - const auto lambda = this->add_variable(lambda_v); - - // (x2 - x1) * lambda - y2 + y1 = 0 - const auto x2_minus_x1_v = x2 - x1; - const auto x2_minus_x1 = this->add_variable(x2_minus_x1_v); - create_poly_gate({ - .a = in.x2, - .b = in.x1, - .c = x2_minus_x1, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); - const auto t2_v = lambda_v * x2_minus_x1_v; - const auto t2 = this->add_variable(t2_v); - create_poly_gate({ - .a = lambda, - .b = x2_minus_x1, - .c = t2, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - create_poly_gate({ - .a = t2, - .b = in.y2, - .c = in.y1, - .q_m = 0, - .q_l = 1, - .q_r = -sign_coefficient, - .q_o = 1, - .q_c = 0, - }); - - // lambda * lambda - x2 - x1 = x3 - const auto x2_plus_x1_v = x2 + x1; - const auto x2_plus_x1 = this->add_variable(x2_plus_x1_v); - create_poly_gate({ - .a = in.x2, - .b = in.x1, - .c = x2_plus_x1, - .q_m = 0, - .q_l = 1, - .q_r = 1, - .q_o = -1, - .q_c = 0, - }); - const auto lambda_sqr_v = lambda_v * lambda_v; - const auto lambda_sqr = this->add_variable(lambda_sqr_v); - create_poly_gate({ - .a = lambda, - .b = lambda, - .c = lambda_sqr, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - create_poly_gate({ - .a = lambda_sqr, - .b = x2_plus_x1, - .c = in.x3, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); - - // lambda * (x1 - x3) - y1 - y3 = 0 - const auto x1_sub_x3_v = x1 - x3; - const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); - create_poly_gate({ - .a = in.x1, - .b = in.x3, - .c = x1_sub_x3, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); - const auto lambda_mul_x1_sub_x3_v = lambda_v * x1_sub_x3_v; - const auto lambda_mul_x1_sub_x3 = this->add_variable(lambda_mul_x1_sub_x3_v); - create_poly_gate({ - .a = lambda, - .b = x1_sub_x3, - .c = lambda_mul_x1_sub_x3, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - create_poly_gate({ - .a = lambda_mul_x1_sub_x3, - .b = in.y1, - .c = in.y3, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); -} - -/** - * @brief Create a gate where we validate an elliptic curve point doubling - * (x1, y1) * 2 = (x3, y3) - * @tparam FF - * @param in - */ -template void StandardCircuitBuilder_::create_ecc_dbl_gate(const ecc_dbl_gate_& in) -{ - const auto x1 = this->get_variable(in.x1); - const auto x3 = this->get_variable(in.x3); - const auto y1 = this->get_variable(in.y1); - - // lambda = 3x^2 / 2y - const auto three_x1_sqr_v = x1 * x1 * 3; - const auto three_x1_sqr = this->add_variable(three_x1_sqr_v); - create_poly_gate({ - .a = in.x1, - .b = in.x1, - .c = three_x1_sqr, - .q_m = 3, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - const auto lambda_v = three_x1_sqr_v / (y1 + y1); - const auto lambda = this->add_variable(lambda_v); - create_poly_gate({ - .a = lambda, - .b = in.y1, - .c = three_x1_sqr, - .q_m = 2, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - - // lambda * lambda - 2x1 - x3 = 0 - const auto lambda_sqr_v = lambda_v * lambda_v; - const auto lambda_sqr = this->add_variable(lambda_sqr_v); - create_poly_gate({ - .a = lambda, - .b = lambda, - .c = lambda_sqr, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - create_poly_gate({ - .a = lambda_sqr, - .b = in.x1, - .c = in.x3, - .q_m = 0, - .q_l = 1, - .q_r = -2, - .q_o = -1, - .q_c = 0, - }); - - // lambda * (x1 - x3) - y1 - y3 = 0 - const auto x1_sub_x3_v = x1 - x3; - const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); - create_poly_gate({ - .a = in.x1, - .b = in.x3, - .c = x1_sub_x3, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); - const auto lambda_mul_x1_sub_x3_v = lambda_v * x1_sub_x3_v; - const auto lambda_mul_x1_sub_x3 = this->add_variable(lambda_mul_x1_sub_x3_v); - create_poly_gate({ - .a = lambda, - .b = x1_sub_x3, - .c = lambda_mul_x1_sub_x3, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - create_poly_gate({ - .a = lambda_mul_x1_sub_x3, - .b = in.y1, - .c = in.y3, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); -} - template std::vector StandardCircuitBuilder_::decompose_into_base4_accumulators(const uint32_t witness_index, const size_t num_bits, diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.hpp index 6e76ad74298..9c97bacd9b0 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.hpp @@ -83,8 +83,6 @@ template class StandardCircuitBuilder_ : public CircuitBuilderBase void create_fixed_group_add_gate_with_init(const fixed_group_add_quad_& in, const fixed_group_init_quad_& init); void create_fixed_group_add_gate_final(const add_quad_& in); - void create_ecc_add_gate(const ecc_add_gate_& in) override; - void create_ecc_dbl_gate(const ecc_dbl_gate_& in) override; fixed_group_add_quad_ previous_add_quad; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.cpp index 8f7372ff727..ba7a5d213ce 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.cpp @@ -372,236 +372,6 @@ template void TurboCircuitBuilder_::fix_witness(const uint32_t ++this->num_gates; } -/** - * @brief Create a gate where we validate an elliptic curve point addition - * (x1, y1) + (x2, y2) = (x3, y3) - * N.B. uses incomplete addition formula. Use with caution - * @tparam FF - * @param in - */ -template void TurboCircuitBuilder_::create_ecc_add_gate(const ecc_add_gate_& in) -{ - const auto sign_coefficient = in.sign_coefficient; - const auto x1 = this->get_variable(in.x1); - const auto x2 = this->get_variable(in.x2); - const auto x3 = this->get_variable(in.x3); - const auto y1 = this->get_variable(in.y1); - const auto y2 = sign_coefficient * this->get_variable(in.y2); - - bool collision = x2 == x1; - if (collision) { - this->failure("create_ecc_add_gate incomplete formula collision"); - } - const auto lambda_v = collision ? 0 : (y2 - y1) / (x2 - x1); - const auto lambda = this->add_variable(lambda_v); - - // (x2 - x1) * lambda - y2 + y1 = 0 - const auto x2_minus_x1_v = x2 - x1; - const auto x2_minus_x1 = this->add_variable(x2_minus_x1_v); - create_poly_gate({ - .a = in.x2, - .b = in.x1, - .c = x2_minus_x1, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); - const auto t2_v = lambda_v * x2_minus_x1_v; - const auto t2 = this->add_variable(t2_v); - create_poly_gate({ - .a = lambda, - .b = x2_minus_x1, - .c = t2, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - create_poly_gate({ - .a = t2, - .b = in.y2, - .c = in.y1, - .q_m = 0, - .q_l = 1, - .q_r = -sign_coefficient, - .q_o = 1, - .q_c = 0, - }); - - // lambda * lambda - x2 - x1 = x3 - const auto x2_plus_x1_v = x2 + x1; - const auto x2_plus_x1 = this->add_variable(x2_plus_x1_v); - create_poly_gate({ - .a = in.x2, - .b = in.x1, - .c = x2_plus_x1, - .q_m = 0, - .q_l = 1, - .q_r = 1, - .q_o = -1, - .q_c = 0, - }); - const auto lambda_sqr_v = lambda_v * lambda_v; - const auto lambda_sqr = this->add_variable(lambda_sqr_v); - create_poly_gate({ - .a = lambda, - .b = lambda, - .c = lambda_sqr, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - create_poly_gate({ - .a = lambda_sqr, - .b = x2_plus_x1, - .c = in.x3, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); - - // lambda * (x1 - x3) - y1 - y3 = 0 - const auto x1_sub_x3_v = x1 - x3; - const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); - create_poly_gate({ - .a = in.x1, - .b = in.x3, - .c = x1_sub_x3, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); - const auto lambda_mul_x1_sub_x3_v = lambda_v * x1_sub_x3_v; - const auto lambda_mul_x1_sub_x3 = this->add_variable(lambda_mul_x1_sub_x3_v); - create_poly_gate({ - .a = lambda, - .b = x1_sub_x3, - .c = lambda_mul_x1_sub_x3, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - create_poly_gate({ - .a = lambda_mul_x1_sub_x3, - .b = in.y1, - .c = in.y3, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); -} - -/** - * @brief Create a gate where we validate an elliptic curve point doubling - * (x1, y1) * 2 = (x3, y3) - * @tparam FF - * @param in - */ -template void TurboCircuitBuilder_::create_ecc_dbl_gate(const ecc_dbl_gate_& in) -{ - const auto x1 = this->get_variable(in.x1); - const auto x3 = this->get_variable(in.x3); - const auto y1 = this->get_variable(in.y1); - - // lambda = 3x^2 / 2y - const auto three_x1_sqr_v = x1 * x1 * 3; - const auto three_x1_sqr = this->add_variable(three_x1_sqr_v); - create_poly_gate({ - .a = in.x1, - .b = in.x1, - .c = three_x1_sqr, - .q_m = 3, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - const auto lambda_v = three_x1_sqr_v / (y1 + y1); - const auto lambda = this->add_variable(lambda_v); - create_poly_gate({ - .a = lambda, - .b = in.y1, - .c = three_x1_sqr, - .q_m = 2, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - - // lambda * lambda - 2x3 - x3 = 0 - const auto lambda_sqr_v = lambda_v * lambda_v; - const auto lambda_sqr = this->add_variable(lambda_sqr_v); - create_poly_gate({ - .a = lambda, - .b = lambda, - .c = lambda_sqr, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - create_poly_gate({ - .a = lambda_sqr, - .b = in.x1, - .c = in.x3, - .q_m = 0, - .q_l = 1, - .q_r = -2, - .q_o = -1, - .q_c = 0, - }); - - // lambda * (x1 - x3) - y1 = 0 - const auto x1_sub_x3_v = x1 - x3; - const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); - create_poly_gate({ - .a = in.x1, - .b = in.x3, - .c = x1_sub_x3, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); - const auto lambda_mul_x1_sub_x3_v = lambda_v * x1_sub_x3_v; - const auto lambda_mul_x1_sub_x3 = this->add_variable(lambda_mul_x1_sub_x3_v); - create_poly_gate({ - .a = lambda, - .b = x1_sub_x3, - .c = lambda_mul_x1_sub_x3, - .q_m = 1, - .q_l = 0, - .q_r = 0, - .q_o = -1, - .q_c = 0, - }); - create_poly_gate({ - .a = lambda_mul_x1_sub_x3, - .b = in.y1, - .c = in.y3, - .q_m = 0, - .q_l = 1, - .q_r = -1, - .q_o = -1, - .q_c = 0, - }); -} /** * Create a constraint placing the witness in 2^{num_bits} range. * diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.hpp index ca2247b3fb6..7ab81feef2d 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/turbo_circuit_builder.hpp @@ -54,23 +54,20 @@ template class TurboCircuitBuilder_ : public CircuitBuilderBase& in) override; + void create_add_gate(const add_triple_& in); void create_big_add_gate(const add_quad_& in); void create_big_add_gate_with_bit_extraction(const add_quad_& in); void create_big_mul_gate(const mul_quad_& in); void create_balanced_add_gate(const add_quad_& in); - void create_mul_gate(const mul_triple_& in) override; - void create_bool_gate(const uint32_t a) override; - void create_poly_gate(const poly_triple_& in) override; + void create_mul_gate(const mul_triple_& in); + void create_bool_gate(const uint32_t a); + void create_poly_gate(const poly_triple_& in); void create_fixed_group_add_gate(const fixed_group_add_quad_& in); void create_fixed_group_add_gate_with_init(const fixed_group_add_quad_& in, const fixed_group_init_quad_& init); void create_fixed_group_add_gate_final(const add_quad_& in); - void create_ecc_add_gate(const ecc_add_gate_& in) override; - void create_ecc_dbl_gate(const ecc_dbl_gate_& in) override; - void fix_witness(const uint32_t witness_index, const FF& witness_value); FF arithmetic_gate_evaluation(const size_t index, const FF alpha_base); @@ -103,7 +100,7 @@ template class TurboCircuitBuilder_ : public CircuitBuilderBase class UltraCircuitBuilder_ : public CircuitBuilderBase& in) override; void create_bool_gate(const uint32_t a) override; void create_poly_gate(const poly_triple_& in) override; - void create_ecc_add_gate(const ecc_add_gate_& in) override; - void create_ecc_dbl_gate(const ecc_dbl_gate_& in) override; + void create_ecc_add_gate(const ecc_add_gate_& in); + void create_ecc_dbl_gate(const ecc_dbl_gate_& in); void fix_witness(const uint32_t witness_index, const FF& witness_value); diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp index e7fd4e400ef..8cfbc59d31f 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp @@ -3,6 +3,7 @@ #include #include +#include "./fixed_base/fixed_base_params.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" namespace plookup { @@ -52,7 +53,11 @@ enum BasicTableId { BLAKE_XOR_ROTATE1, BLAKE_XOR_ROTATE2, BLAKE_XOR_ROTATE4, - PEDERSEN_29_SMALL, + FIXED_BASE_0_0, + FIXED_BASE_1_0 = FIXED_BASE_0_0 + FixedBaseParams::NUM_TABLES_PER_LO_MULTITABLE, + FIXED_BASE_2_0 = FIXED_BASE_1_0 + FixedBaseParams::NUM_TABLES_PER_HI_MULTITABLE, + FIXED_BASE_3_0 = FIXED_BASE_2_0 + FixedBaseParams::NUM_TABLES_PER_LO_MULTITABLE, + PEDERSEN_29_SMALL = FIXED_BASE_3_0 + FixedBaseParams::NUM_TABLES_PER_HI_MULTITABLE, PEDERSEN_28, PEDERSEN_27, PEDERSEN_26, @@ -115,6 +120,10 @@ enum MultiTableId { PEDERSEN_LEFT_LO, PEDERSEN_RIGHT_HI, PEDERSEN_RIGHT_LO, + FIXED_BASE_LEFT_LO, + FIXED_BASE_LEFT_HI, + FIXED_BASE_RIGHT_LO, + FIXED_BASE_RIGHT_HI, UINT32_XOR, UINT32_AND, BN254_XLO, diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp index 9b53fa57788..d21c39a0519 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp @@ -292,8 +292,6 @@ template class field_t { uint32_t get_witness_index() const { return witness_index; } - // std::vector> decompose_into_slices(size_t num_bits = 256, size_t slice_bits = 1) const; - std::vector> decompose_into_bits( size_t num_bits = 256, std::function(ComposerContext* ctx, uint64_t, uint256_t)> get_bit = diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index f4a18727874..d06dba16100 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -168,15 +168,33 @@ template void cycle_group::validate_is_on_curve() } /** - * @brief Evaluates a doubling + * @brief Evaluates a doubling. Does not use Ultra double gate * * @tparam Composer * @return cycle_group */ -template cycle_group cycle_group::dbl() const +template +cycle_group cycle_group::dbl() const + requires IsNotUltraArithmetic { - // n.b. if p1 is point at infinity, calling p1.dbl() does not give us an output that satisfies the double gate :o) - // (native code just checks out of the dbl() method if point is at infinity) + auto lambda = (x * x * 3) / (y + y); + auto x3 = lambda.madd(lambda, -x - x); + auto y3 = lambda.madd(x - x3, -y); + return cycle_group(x3, y3, false); +} + +/** + * @brief Evaluates a doubling. Uses Ultra double gate + * + * @tparam Composer + * @return cycle_group + */ +template +cycle_group cycle_group::dbl() const + requires IsUltraArithmetic +{ + // n.b. if p1 is point at infinity, calling p1.dbl() does not give us an output that satisfies the double gate + // :o) (native code just checks out of the dbl() method if point is at infinity) auto x1 = x.get_value(); auto y1 = y.get_value(); auto lambda = (x1 * x1 * 3) / (y1 + y1); @@ -209,6 +227,32 @@ template cycle_group cycle_group::dbl() * @brief Will evaluate ECC point addition over `*this` and `other`. * Incomplete addition formula edge cases are *NOT* checked! * Only use this method if you know the x-coordinates of the operands cannot collide + * Standard version that does not use ecc group gate + * + * @tparam Composer + * @param other + * @return cycle_group + */ +template +cycle_group cycle_group::unconditional_add(const cycle_group& other) const + requires IsNotUltraArithmetic +{ + auto x_diff = other.x - x; + auto y_diff = other.y - y; + // unconditional add so do not check divisor is zero + // (this also makes it much easier to test failure cases as this does not segfault!) + auto lambda = y_diff.divide_no_zero_check(x_diff); + auto x3 = lambda.madd(lambda, -other.x - x); + auto y3 = lambda.madd(x - x3, -y); + cycle_group result(x3, y3, false); + return result; +} + +/** + * @brief Will evaluate ECC point addition over `*this` and `other`. + * Incomplete addition formula edge cases are *NOT* checked! + * Only use this method if you know the x-coordinates of the operands cannot collide + * Ultra version that uses ecc group gate * * @tparam Composer * @param other @@ -216,6 +260,7 @@ template cycle_group cycle_group::dbl() */ template cycle_group cycle_group::unconditional_add(const cycle_group& other) const + requires IsUltraArithmetic { auto context = get_context(other); @@ -267,42 +312,46 @@ cycle_group cycle_group::unconditional_add(const cycle_group template cycle_group cycle_group::unconditional_subtract(const cycle_group& other) const { - auto context = get_context(other); + if constexpr (!IS_ULTRA) { + return unconditional_add(-other); + } else { + auto context = get_context(other); - const bool lhs_constant = is_constant(); - const bool rhs_constant = other.is_constant(); + const bool lhs_constant = is_constant(); + const bool rhs_constant = other.is_constant(); - if (lhs_constant && !rhs_constant) { - auto lhs = cycle_group::from_constant_witness(context, get_value()); - return lhs.unconditional_subtract(other); - } - if (!lhs_constant && rhs_constant) { - auto rhs = cycle_group::from_constant_witness(context, other.get_value()); - return unconditional_subtract(rhs); - } - auto p1 = get_value(); - auto p2 = other.get_value(); - AffineElement p3(Element(p1) - Element(p2)); - if (lhs_constant && rhs_constant) { - return cycle_group(p3); - } - field_t r_x(witness_t(context, p3.x)); - field_t r_y(witness_t(context, p3.y)); - cycle_group result(r_x, r_y, false); - - proof_system::ecc_add_gate_ add_gate{ - .x1 = x.get_witness_index(), - .y1 = y.get_witness_index(), - .x2 = other.x.get_witness_index(), - .y2 = other.y.get_witness_index(), - .x3 = result.x.get_witness_index(), - .y3 = result.y.get_witness_index(), - .endomorphism_coefficient = 1, - .sign_coefficient = -1, - }; - context->create_ecc_add_gate(add_gate); + if (lhs_constant && !rhs_constant) { + auto lhs = cycle_group::from_constant_witness(context, get_value()); + return lhs.unconditional_subtract(other); + } + if (!lhs_constant && rhs_constant) { + auto rhs = cycle_group::from_constant_witness(context, other.get_value()); + return unconditional_subtract(rhs); + } + auto p1 = get_value(); + auto p2 = other.get_value(); + AffineElement p3(Element(p1) - Element(p2)); + if (lhs_constant && rhs_constant) { + return cycle_group(p3); + } + field_t r_x(witness_t(context, p3.x)); + field_t r_y(witness_t(context, p3.y)); + cycle_group result(r_x, r_y, false); + + proof_system::ecc_add_gate_ add_gate{ + .x1 = x.get_witness_index(), + .y1 = y.get_witness_index(), + .x2 = other.x.get_witness_index(), + .y2 = other.y.get_witness_index(), + .x3 = result.x.get_witness_index(), + .y3 = result.y.get_witness_index(), + .endomorphism_coefficient = 1, + .sign_coefficient = -1, + }; + context->create_ecc_add_gate(add_gate); - return result; + return result; + } } /** @@ -458,6 +507,20 @@ template cycle_group cycle_group::operat return result; } +/** + * @brief Negates a point + * + * @tparam Composer + * @param other + * @return cycle_group + */ +template cycle_group cycle_group::operator-() const +{ + cycle_group result(*this); + result.y = -y; + return result; +} + template cycle_group& cycle_group::operator+=(const cycle_group& other) { *this = *this + other; @@ -835,7 +898,7 @@ typename cycle_group::batch_mul_internal_output cycle_group: const std::span scalars, const std::span base_points, [[maybe_unused]] const std::span off) - requires SupportsLookupTables + requires IsUltraArithmetic { ASSERT(scalars.size() == base_points.size()); @@ -916,7 +979,7 @@ typename cycle_group::batch_mul_internal_output cycle_group: const std::span scalars, const std::span base_points, const std::span offset_generators) - requires DoesNotSupportLookupTables + requires IsNotUltraArithmetic { ASSERT(scalars.size() == base_points.size()); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index 06261048fa7..d0233650c5e 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -11,9 +11,9 @@ namespace proof_system::plonk::stdlib { template -concept SupportsLookupTables = (Composer::CIRCUIT_TYPE == CircuitType::ULTRA); +concept IsUltraArithmetic = (Composer::CIRCUIT_TYPE == CircuitType::ULTRA); template -concept DoesNotSupportLookupTables = (Composer::CIRCUIT_TYPE != CircuitType::ULTRA); +concept IsNotUltraArithmetic = (Composer::CIRCUIT_TYPE != CircuitType::ULTRA); /** * @brief cycle_group represents a group Element of the proving system's embedded curve @@ -155,13 +155,20 @@ template class cycle_group { bool_t is_point_at_infinity() const { return _is_infinity; } void set_point_at_infinity(const bool_t& is_infinity) { _is_infinity = is_infinity; } void validate_is_on_curve() const; - cycle_group dbl() const; - cycle_group unconditional_add(const cycle_group& other) const; + cycle_group dbl() const + requires IsUltraArithmetic; + cycle_group dbl() const + requires IsNotUltraArithmetic; + cycle_group unconditional_add(const cycle_group& other) const + requires IsUltraArithmetic; + cycle_group unconditional_add(const cycle_group& other) const + requires IsNotUltraArithmetic; cycle_group unconditional_subtract(const cycle_group& other) const; cycle_group constrained_unconditional_add(const cycle_group& other) const; cycle_group constrained_unconditional_subtract(const cycle_group& other) const; cycle_group operator+(const cycle_group& other) const; cycle_group operator-(const cycle_group& other) const; + cycle_group operator-() const; cycle_group& operator+=(const cycle_group& other); cycle_group& operator-=(const cycle_group& other); static cycle_group batch_mul(const std::vector& scalars, @@ -177,6 +184,7 @@ template class cycle_group { private: bool_t _is_infinity; bool _is_constant; + static batch_mul_internal_output _variable_base_batch_mul_internal(std::span scalars, std::span base_points, std::span offset_generators, @@ -185,11 +193,11 @@ template class cycle_group { static batch_mul_internal_output _fixed_base_batch_mul_internal(std::span scalars, std::span base_points, std::span offset_generators) - requires SupportsLookupTables; + requires IsUltraArithmetic; static batch_mul_internal_output _fixed_base_batch_mul_internal(std::span scalars, std::span base_points, std::span offset_generators) - requires DoesNotSupportLookupTables; + requires IsNotUltraArithmetic; }; template From 0db1732a2bf1d917aea3db9813282128eb70bf5a Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 15 Sep 2023 18:24:43 +0000 Subject: [PATCH 15/50] wip --- .../circuit_builder/circuit_builder_base.hpp | 4 ---- .../circuit_builder/standard_circuit_builder.cpp | 2 -- .../stdlib/hash/pedersen/pedersen_refactor.hpp | 9 +++++---- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp index cb226fc5255..5422cca43d3 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp @@ -1,7 +1,6 @@ #pragma once #include "barretenberg/common/slab_allocator.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" -#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include "barretenberg/proof_system/arithmetization/arithmetization.hpp" #include "barretenberg/proof_system/arithmetization/gate_data.hpp" #include "barretenberg/serialize/cbind.hpp" @@ -15,9 +14,6 @@ static constexpr uint32_t DUMMY_TAG = 0; template class CircuitBuilderBase { public: using FF = typename Arithmetization::FF; - using EmbeddedCurve = - std::conditional_t, curve::BN254, curve::Grumpkin>; - static constexpr size_t NUM_WIRES = Arithmetization::NUM_WIRES; // Keeping NUM_WIRES, at least temporarily, for backward compatibility static constexpr size_t program_width = Arithmetization::NUM_WIRES; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp index c97775f9021..285eed8283c 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/standard_circuit_builder.cpp @@ -505,8 +505,6 @@ template bool StandardCircuitBuilder_::check_circuit() gate_sum = q_m[i] * left * right + q_1[i] * left + q_2[i] * right + q_3[i] * output + q_c[i]; if (!gate_sum.is_zero()) { info("gate number", i); - info("l, r, o = ", left, ", ", right, ", ", output); - info("wl,wr,wo = ", w_l[i], ", ", w_r[i], ", ", w_o[i]); return false; } } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp index d9c28f3e64a..37c982799d6 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp @@ -20,10 +20,11 @@ template class pedersen_hash_refactor { using field_t = stdlib::field_t; using point = stdlib::point; using bool_t = stdlib::bool_t; - using EmbeddedCurve = typename ComposerContext::EmbeddedCurve; - // template - // using EmbeddedCurve = - // std::conditional_t, curve::BN254, curve::Grumpkin>; + using EmbeddedCurve = + std::conditional_t, + curve::BN254, + curve::Grumpkin>; + using generator_data = crypto::generator_data; public: From 98e52613e0709991bf7a0c49b8b2c71eb0941981 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 15 Sep 2023 21:12:18 +0000 Subject: [PATCH 16/50] PR changes Added TODOs for code that needs to be culled post-refactor removed custom MSM algorithms from schnorr.tcc + reduced constraint cost by ~50% --- .../generators/fixed_base_scalar_mul.hpp | 3 + .../crypto/generators/generator_data.cpp | 2 + .../crypto/generators/generator_data.hpp | 2 + .../crypto/generators/generator_data.test.cpp | 2 + .../crypto/pedersen_commitment/c_bind.cpp | 2 + .../crypto/pedersen_commitment/c_bind.hpp | 2 + .../crypto/pedersen_commitment/pedersen.cpp | 2 + .../crypto/pedersen_commitment/pedersen.hpp | 2 + .../pedersen_commitment/pedersen_lookup.cpp | 2 + .../pedersen_commitment/pedersen_lookup.hpp | 2 + .../pedersen_lookup.test.cpp | 2 + .../pedersen_commitment/pedersen_refactor.cpp | 2 + .../pedersen_commitment/pedersen_refactor.hpp | 3 + .../crypto/pedersen_hash/c_bind.cpp | 2 + .../crypto/pedersen_hash/c_bind.hpp | 2 + .../crypto/pedersen_hash/pedersen.cpp | 2 + .../crypto/pedersen_hash/pedersen.hpp | 3 + .../crypto/pedersen_hash/pedersen_lookup.cpp | 2 + .../crypto/pedersen_hash/pedersen_lookup.hpp | 1 + .../pedersen_hash/pedersen_refactor.cpp | 2 + .../pedersen_hash/pedersen_refactor.hpp | 3 + .../barretenberg/crypto/schnorr/schnorr.tcc | 4 +- .../ecc/curves/grumpkin/grumpkin.cpp | 2 + .../ecc/curves/grumpkin/grumpkin.hpp | 2 + .../ecc/curves/secp256k1/secp256k1.cpp | 2 + .../ecc/curves/secp256k1/secp256k1.hpp | 3 + .../ecc/curves/secp256r1/secp256r1.cpp | 2 + .../ecc/curves/secp256r1/secp256r1.hpp | 2 + .../cpp/src/barretenberg/ecc/groups/group.hpp | 2 + .../circuit_builder/circuit_builder_base.hpp | 4 + .../fixed_base/fixed_base_params.hpp | 1 + .../proof_system/plookup_tables/pedersen.hpp | 2 + .../proof_system/plookup_tables/types.hpp | 2 + .../stdlib/encryption/schnorr/schnorr.cpp | 289 ++---------------- .../stdlib/encryption/schnorr/schnorr.hpp | 21 +- .../encryption/schnorr/schnorr.test.cpp | 288 ++++++++--------- .../hash/pedersen/pedersen_refactor.cpp | 7 +- .../hash/pedersen/pedersen_refactor.hpp | 8 +- .../stdlib/primitives/group/cycle_group.cpp | 147 +++++++-- .../stdlib/primitives/group/cycle_group.hpp | 42 ++- .../primitives/group/cycle_group.test.cpp | 6 +- .../stdlib/primitives/group/group.hpp | 1 + .../stdlib/primitives/group/group.test.cpp | 2 + 43 files changed, 410 insertions(+), 474 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/generators/fixed_base_scalar_mul.hpp b/barretenberg/cpp/src/barretenberg/crypto/generators/fixed_base_scalar_mul.hpp index 555b8837f18..1fdd669d778 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/generators/fixed_base_scalar_mul.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/generators/fixed_base_scalar_mul.hpp @@ -1,4 +1,7 @@ #pragma once + +// TODO(@zac-williamson #2341 delete this file once we migrate to new pedersen hash standard) + #include "./generator_data.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.cpp b/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.cpp index bcc500653bf..b8910ed897b 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.cpp @@ -1,5 +1,7 @@ #include "./generator_data.hpp" +// TODO(@zac-williamson #2341 delete this file once we migrate to new pedersen hash standard) + namespace crypto { namespace generators { namespace { diff --git a/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.hpp b/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.hpp index 34b1d107df7..999b802ccc4 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.hpp @@ -1,4 +1,6 @@ #pragma once + +// TODO(@zac-williamson #2341 delete this file once we migrate to new pedersen hash standard) #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include #include diff --git a/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.test.cpp index 8c6257be9f0..45b5b5f461f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/generators/generator_data.test.cpp @@ -1,3 +1,5 @@ +// TODO(@zac-williamson #2341 delete this file once we migrate to new pedersen hash standard) + #include "./generator_data.hpp" #include "./fixed_base_scalar_mul.hpp" #include "barretenberg/common/streams.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.cpp index 1a174cb64fd..63b9b4834dd 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.cpp @@ -1,3 +1,5 @@ +// TODO(@zac-wiliamson #2341 delete this file and rename c_bind_new to c_bind once we have migrated to new hash standard + #include "c_bind.hpp" #include "barretenberg/common/mem.hpp" #include "barretenberg/common/serialize.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.hpp index 26d5308df70..19b5de4404c 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.hpp @@ -1,3 +1,5 @@ +// TODO(@zac-wiliamson #2341 delete this file and rename c_bind_new to c_bind once we have migrated to new hash standard + #pragma once #include "barretenberg/common/mem.hpp" #include "barretenberg/common/serialize.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.cpp index ae410af0197..924b3bb4b08 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.cpp @@ -1,3 +1,5 @@ +// TODO(@zac-wiliamson #2341 delete this file once we migrate to new hash standard + #include "./pedersen.hpp" #include "./convert_buffer_to_field.hpp" #include "barretenberg/common/throw_or_abort.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.hpp index 3571016ebd7..82493dedc14 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen.hpp @@ -1,3 +1,5 @@ +// TODO(@zac-wiliamson #2341 delete this file once we migrate to new hash standard + #pragma once #include "../generators/fixed_base_scalar_mul.hpp" #include "../generators/generator_data.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_lookup.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_lookup.cpp index 5e6288e8dfa..1310afe8a33 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_lookup.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_lookup.cpp @@ -1,3 +1,5 @@ +// TODO(@zac-wiliamson #2341 delete this file once we migrate to new hash standard + #include "./pedersen_lookup.hpp" #include "../pedersen_hash/pedersen_lookup.hpp" #include "./convert_buffer_to_field.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_lookup.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_lookup.hpp index b77fac9688d..a0c4c50e02c 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_lookup.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_lookup.hpp @@ -1,5 +1,7 @@ #pragma once +// TODO(@zac-wiliamson #2341 delete this file once we migrate to new hash standard + #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" namespace crypto { diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_lookup.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_lookup.test.cpp index 49ca4825ab1..a83f903953d 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_lookup.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_lookup.test.cpp @@ -1,3 +1,5 @@ +// TODO(@zac-wiliamson #2341 delete this file once we migrate to new hash standard + #include "barretenberg/numeric/bitop/get_msb.hpp" #include "barretenberg/numeric/random/engine.hpp" #include diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp index 487fc0d7f37..52021bf6e8e 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp @@ -1,3 +1,5 @@ +// TODO(@zac-wiliamson #2341 rename to pedersen.cpp once we migrate to new hash standard) + #include "./pedersen_refactor.hpp" #include "./convert_buffer_to_field.hpp" #include "barretenberg/common/serialize.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp index dabb481d5e1..3afc8f8e06c 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp @@ -1,4 +1,7 @@ #pragma once + +// TODO(@zac-wiliamson #2341 rename to pedersen.hpp once we migrate to new hash standard) + #include "../generators/fixed_base_scalar_mul.hpp" #include "../generators/generator_data.hpp" #include "barretenberg/ecc/curves/bn254/bn254.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind.cpp index cf6ae337e76..be902124647 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind.cpp @@ -1,3 +1,5 @@ +// TODO(@zac-wiliamson #2341 delete this file and rename c_bind_new to c_bind once we have migrated to new hash standard + #include "barretenberg/common/mem.hpp" #include "barretenberg/common/serialize.hpp" #include "barretenberg/common/streams.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind.hpp index d9b8c8735f9..ca063950401 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind.hpp @@ -1,4 +1,6 @@ #pragma once +// TODO(@zac-wiliamson #2341 delete this file and rename c_bind_new to c_bind once we have migrated to new hash standard + #include "barretenberg/common/wasm_export.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp index 6debd1b9ff3..ca3797cc16d 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.cpp @@ -1,3 +1,5 @@ +// TODO(@zac-wiliamson #2341 delete this file once we migrate to new hash standard + #include "./pedersen.hpp" #include #ifndef NO_OMP_MULTITHREADING diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp index 40bdfc7ff8d..1cedec07b4a 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen.hpp @@ -1,4 +1,7 @@ #pragma once + +// TODO(@zac-wiliamson #2341 delete this file once we migrate to new hash standard + #include "../generators/fixed_base_scalar_mul.hpp" #include "../generators/generator_data.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_lookup.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_lookup.cpp index 980b41a2259..3c1cc5eb835 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_lookup.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_lookup.cpp @@ -1,3 +1,5 @@ +// TODO(@zac-wiliamson #2341 delete this file once we migrate to new hash standard + #include "./pedersen_lookup.hpp" #include diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_lookup.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_lookup.hpp index 9a019a8547c..5e390776d90 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_lookup.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_lookup.hpp @@ -1,4 +1,5 @@ #pragma once +// TODO(@zac-wiliamson #2341 delete this file once we migrate to new hash standard #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.cpp index 1c55ba37207..681b12f64e2 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.cpp @@ -4,6 +4,8 @@ #include #endif +// TODO(@zac-wiliamson #2341 rename to pedersen.cpp once we migrate to new hash standard) + namespace crypto { using namespace generators; diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp index 06c94ee6264..0e8628d3b06 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp @@ -1,4 +1,7 @@ #pragma once + +// TODO(@zac-wiliamson #2341 rename to pedersen.hpp once we migrate to new hash standard) + #include "../generators/fixed_base_scalar_mul.hpp" #include "../generators/generator_data.hpp" #include "../pedersen_commitment/pedersen_refactor.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/schnorr/schnorr.tcc b/barretenberg/cpp/src/barretenberg/crypto/schnorr/schnorr.tcc index 6984479398e..e8200a45e82 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/schnorr/schnorr.tcc +++ b/barretenberg/cpp/src/barretenberg/crypto/schnorr/schnorr.tcc @@ -1,7 +1,7 @@ #pragma once #include "barretenberg/crypto/hmac/hmac.hpp" -#include "barretenberg/crypto/pedersen_commitment/pedersen.hpp" +#include "barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp" #include "schnorr.hpp" @@ -43,7 +43,7 @@ static auto generate_schnorr_challenge(const std::string& message, { using Fq = typename G1::coordinate_field; // create challenge message pedersen_commitment(R.x, pubkey) - Fq compressed_keys = crypto::pedersen_commitment::compress_native({ R.x, pubkey.x, pubkey.y }); + Fq compressed_keys = crypto::pedersen_hash_refactor::hash({ R.x, pubkey.x, pubkey.y }); std::vector e_buffer; write(e_buffer, compressed_keys); std::copy(message.begin(), message.end(), std::back_inserter(e_buffer)); diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp b/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp index caa7f871fbc..d49057edda5 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.cpp @@ -10,6 +10,8 @@ static std::array generators; static bool init_generators = false; } // namespace +// TODO(@zac-wiliamson #2341 remove this method once we migrate to new hash standard (derive_generators_secure is +// curve-agnostic) g1::affine_element get_generator(const size_t generator_index) { if (!init_generators) { diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp b/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp index 0bad58a8d51..b351be77359 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp @@ -30,6 +30,8 @@ struct GrumpkinG1Params { }; using g1 = barretenberg::group; +// TODO(@zac-wiliamson #2341 remove this method once we migrate to new hash standard (derive_generators_secure is +// curve-agnostic) g1::affine_element get_generator(size_t generator_index); }; // namespace grumpkin diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp index b199208cec9..6c3f7366c2d 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.cpp @@ -13,6 +13,8 @@ static bool init_generators = false; /* In case where prime bit length is 256, the method produces a generator, but only with one less bit of randomness than the maximum possible, as the y coordinate in that case is determined by the x-coordinate. */ +// TODO(@zac-wiliamson #2341 remove this method once we migrate to new hash standard (derive_generators_secure is +// curve-agnostic) g1::affine_element get_generator(const size_t generator_index) { if (!init_generators) { diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp index a2de49cd4c9..aa245e32f6e 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp @@ -120,6 +120,9 @@ struct Secp256k1G1Params { using g1 = barretenberg:: group, barretenberg::field, Secp256k1G1Params>; + +// TODO(@zac-wiliamson #2341 remove this method once we migrate to new hash standard (derive_generators_secure is +// curve-agnostic) g1::affine_element get_generator(size_t generator_index); } // namespace secp256k1 diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp index 061bbd2c2fd..f5409d30436 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.cpp @@ -13,6 +13,8 @@ static bool init_generators = false; /* In case where prime bit length is 256, the method produces a generator, but only with one less bit of randomness than the maximum possible, as the y coordinate in that case is determined by the x-coordinate. */ +// TODO(@zac-wiliamson #2341 remove this method once we migrate to new hash standard (derive_generators_secure is +// curve-agnostic) g1::affine_element get_generator(const size_t generator_index) { if (!init_generators) { diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp index e7bf6422c95..653fa457435 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp @@ -106,6 +106,8 @@ struct Secp256r1G1Params { using g1 = barretenberg:: group, barretenberg::field, Secp256r1G1Params>; +// TODO(@zac-wiliamson #2341 remove this method once we migrate to new hash standard (derive_generators_secure is +// curve-agnostic) g1::affine_element get_generator(size_t generator_index); } // namespace secp256r1 diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp index 7cad218cf13..846b2fda55d 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp @@ -46,6 +46,8 @@ template static inline auto derive_generators() { std::array generators; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp index 5422cca43d3..cb226fc5255 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp @@ -1,6 +1,7 @@ #pragma once #include "barretenberg/common/slab_allocator.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include "barretenberg/proof_system/arithmetization/arithmetization.hpp" #include "barretenberg/proof_system/arithmetization/gate_data.hpp" #include "barretenberg/serialize/cbind.hpp" @@ -14,6 +15,9 @@ static constexpr uint32_t DUMMY_TAG = 0; template class CircuitBuilderBase { public: using FF = typename Arithmetization::FF; + using EmbeddedCurve = + std::conditional_t, curve::BN254, curve::Grumpkin>; + static constexpr size_t NUM_WIRES = Arithmetization::NUM_WIRES; // Keeping NUM_WIRES, at least temporarily, for backward compatibility static constexpr size_t program_width = Arithmetization::NUM_WIRES; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp index b70233de735..c6f44c09346 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp @@ -1,5 +1,6 @@ #pragma once +#include "barretenberg/plonk/proof_system/constants.hpp" #include #include #include diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/pedersen.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/pedersen.hpp index e9f2e5490c2..0bafaead4d0 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/pedersen.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/pedersen.hpp @@ -1,5 +1,7 @@ #pragma once +// TODO(@zac-wiliamson #2341 delete this file once we migrate to new hash standard + #include "./types.hpp" #include "barretenberg/crypto/pedersen_hash/pedersen_lookup.hpp" diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp index 8cfbc59d31f..514cebd6d02 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/types.hpp @@ -57,6 +57,7 @@ enum BasicTableId { FIXED_BASE_1_0 = FIXED_BASE_0_0 + FixedBaseParams::NUM_TABLES_PER_LO_MULTITABLE, FIXED_BASE_2_0 = FIXED_BASE_1_0 + FixedBaseParams::NUM_TABLES_PER_HI_MULTITABLE, FIXED_BASE_3_0 = FIXED_BASE_2_0 + FixedBaseParams::NUM_TABLES_PER_LO_MULTITABLE, + // TODO(@zac-wiliamson #2341 remove PEDERSEN basic tables) PEDERSEN_29_SMALL = FIXED_BASE_3_0 + FixedBaseParams::NUM_TABLES_PER_HI_MULTITABLE, PEDERSEN_28, PEDERSEN_27, @@ -116,6 +117,7 @@ enum MultiTableId { AES_NORMALIZE, AES_INPUT, AES_SBOX, + // TODO(@zac-wiliamson #2341 remove PEDERSEN_LEFT/RIGHT/HI/LO) PEDERSEN_LEFT_HI, PEDERSEN_LEFT_LO, PEDERSEN_RIGHT_HI, diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.cpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.cpp index 52f7e698d4f..0466e0acb73 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.cpp @@ -1,69 +1,13 @@ #include "schnorr.hpp" #include "barretenberg/crypto/pedersen_commitment/pedersen.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" -#include "barretenberg/stdlib/commitment/pedersen/pedersen.hpp" #include "barretenberg/stdlib/hash/blake2s/blake2s.hpp" +#include "barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp" +#include "barretenberg/stdlib/primitives/bigfield/bigfield.hpp" +#include "barretenberg/stdlib/primitives/group/cycle_group.hpp" #include -namespace proof_system::plonk { -namespace stdlib { -namespace schnorr { - -/** - * @brief Expand a 128-bits integer in a form amenable to doing elliptic curve arithmetic in circuits. - * - * @details The output wnaf_record records the expansion coefficients - * limb % 129 = 2^128 + 2^127 w_1 + ... + 2 w_127 + w_128 - skew - * where each w_i lies in {-1, 1} and skew is 0 or 1. The boolean `skew` could also be called `is_even`; the even - * 129-bit non-negative integers are those with skew == 1, while the odd ones have skew==0. - * - * @warning While it is possible to express any 129-bit value in this form, this function only works correctly - * on 128-bit values, since the same is true for fixed_wnaf<129, 1, 1>. This is illusrated in the tests. - * - * - * TurboPLONK: ~260 gates. - */ -template wnaf_record convert_field_into_wnaf(C* context, const field_t& limb) -{ - constexpr size_t num_wnaf_bits = 129; - uint256_t value = limb.get_value(); - - bool skew = false; - uint64_t wnaf_entries[129] = { 0 }; - - // compute wnaf representation of value natively - barretenberg::wnaf::fixed_wnaf(&value.data[0], &wnaf_entries[0], skew, 0); - - std::vector> wnaf_bits; - bool_t wnaf_skew(witness_t(context, skew)); - field_t two(context, 2); - field_t one(context, 1); - field_t accumulator(context, 1); - - // set accumulator = 2^{128} + \sum_{i=0}^{127} 2^i w_{128-i}, where w_i = 2 * wnaf_entries[i+1] - 1 - for (size_t i = 0; i < 128; ++i) { - // accumulator = 2 * accumulator + 1 (resp. -1) if the 32nd bit of wnaf_entries[i+1] is 0 (resp. 1). - - // extract sign bit of wnaf_entries[i+1] (32nd entry in list of bits) - uint64_t predicate = (wnaf_entries[i + 1] >> 31U) & 1U; - // type of !predicate below is bool - bool_t wnaf_bit = witness_t(context, !predicate); - wnaf_bits.push_back(wnaf_bit); - - // !predicate == false ~> -1; true ~> +1 - accumulator = accumulator + accumulator; - accumulator = accumulator + (field_t(wnaf_bit) * two - one); - } - - // subtract 1 from accumulator if there is skew - accumulator = accumulator - field_t(wnaf_skew); - - accumulator.assert_equal(limb); - wnaf_record result; - result.bits = wnaf_bits; - result.skew = wnaf_skew; - return result; -} +namespace proof_system::plonk::stdlib::schnorr { /** * @brief Instantiate a witness containing the signature (s, e) as a quadruple of @@ -71,205 +15,26 @@ template wnaf_record convert_field_into_wnaf(C* context, const f */ template signature_bits convert_signature(C* context, const crypto::schnorr::signature& signature) { - signature_bits sig{ - field_t(), - field_t(), - field_t(), - field_t(), - }; + using cycle_scalar = typename cycle_group::cycle_scalar; uint256_t s_bigint(0); uint256_t e_bigint(0); - - for (size_t i = 0; i < 32; ++i) { - for (size_t j = 7; j < 8; --j) { - uint8_t s_shift = static_cast(signature.s[i] >> j); - uint8_t e_shift = static_cast(signature.e[i] >> j); - bool s_bit = (s_shift & 1U) == 1U; - bool e_bit = (e_shift & 1U) == 1U; - s_bigint += s_bigint; - e_bigint += e_bigint; - - s_bigint += static_cast(s_bit); - e_bigint += static_cast(e_bit); - } - } - - sig.s_lo = witness_t(context, s_bigint.slice(0, 128)); - sig.s_hi = witness_t(context, s_bigint.slice(128, 256)); - sig.e_lo = witness_t(context, e_bigint.slice(0, 128)); - sig.e_hi = witness_t(context, e_bigint.slice(128, 256)); - + const uint8_t* s_ptr = &signature.s[0]; + const uint8_t* e_ptr = &signature.e[0]; + numeric::read(s_ptr, s_bigint); + numeric::read(e_ptr, e_bigint); + signature_bits sig{ .s = cycle_scalar::from_witness_bitstring(context, s_bigint, 256), + .e = cycle_scalar::from_witness_bitstring(context, e_bigint, 256) }; return sig; } -/** - * @brief Compute [(low_bits + 2^128 high_bits)]pub_key. - * - * @details This method cannot handle the case where either of low_bits, high_bits is zero. - * This assumption is backed by a constraint (see the tests for an illustration). - */ -template -point variable_base_mul(const point& pub_key, const field_t& low_bits, const field_t& high_bits) -{ - C* context = pub_key.x.context; - - // N.B. this method does not currently work if low_bits == 0 or high_bits == 0 - field_t zero_test = (low_bits * high_bits); - zero_test.assert_is_not_zero(); - - const auto low_wnaf = stdlib::schnorr::convert_field_into_wnaf(context, low_bits); - const auto high_wnaf = stdlib::schnorr::convert_field_into_wnaf(context, high_bits); - // current_accumulator is pub_key, so init is true, so high_output is [high_wnaf]pub_key - point high_output = stdlib::schnorr::variable_base_mul(pub_key, pub_key, high_wnaf); - // compute output = [low_wnaf]pub_key + [2^128]high_output. - point output = stdlib::schnorr::variable_base_mul(pub_key, high_output, low_wnaf); - return output; -} - -/** - * @brief Multiply a point of Grumpkin by a scalar described as a wnaf record, possibly offsetting by another point. - * - * @param pub_key A point of Grumpkin known to the prover in terms of the generator grumpkin::g1::one. - * @param current_accumulator A point of the curve that will remain unchanged. - * @param wnaf A wnaf_record, a collection of bool_t's typically recording an expansion of an element of - * field_t in the form 2^{128} + 2^{127} w_1 + ... + 2 w_127 + w_128 - skew. - * - * @details Let W be the scalar represented by wnaf. If pub_key = ± current_accumulator, this function returns - * [W]pub_key. Otherwise, it returns [W]pub_key + [2^128]current_accumulator. These two cases are distinguished - * between a boolean `init`. The idea here is that, if `pub_key==±current_accumulator`, then the function is being - * called for the first time. - * - * @warning This function should not be used on its own, as its security depends on the manner in which it is - * expected to be used. - */ -template -point variable_base_mul(const point& pub_key, const point& current_accumulator, const wnaf_record& wnaf) -{ - // Check if the pub_key is a points on the curve. - pub_key.on_curve(); - - // The account circuit constrains `pub_key` to lie on Grumpkin. Presently, the only values that are passed in the - // second argument as `current_accumulator` are `pub_key` and a point which is the output of the present function. - // We therefore assume that `current_accumulator` lies on Grumpkin as well. - grumpkin::g1::affine_element pub_key_native(pub_key.x.get_value(), pub_key.y.get_value()); - grumpkin::g1::affine_element current_accumulator_native(current_accumulator.x.get_value(), - current_accumulator.y.get_value()); - - field_t two(pub_key.x.context, 2); - - // Various elliptic curve point additions that follow assume that the two points are distinct and not mutually - // inverse. collision_offset is chosen to prevent a malicious prover from exploiting this assumption. - grumpkin::g1::affine_element collision_offset = crypto::generators::get_generator_data(DEFAULT_GEN_1).generator; - grumpkin::g1::affine_element collision_end = collision_offset * grumpkin::fr(uint256_t(1) << 129); - - const bool init = current_accumulator.x.get_value() == pub_key.x.get_value(); - - // if init == true, check pub_key != collision_offset (ruling out 3 other points at the same time), - // if init == false we assume this has already been checked in an earlier call wherein init==true. - if (init) { - field_t zero_test = ((pub_key.x - collision_offset.x) * (pub_key.y - collision_offset.y)); - zero_test.assert_is_not_zero("pub_key and collision_offset have a coordinate in common."); - } else { - // Check if the current_accumulator is a point on the curve only if init is false. - current_accumulator.on_curve(); - } - - point accumulator{ collision_offset.x, collision_offset.y }; - - /* - * Let w_i = 2 wnaf.bits[i-1] - 1 for i = 1, ..., 128. - * The integer represented by the digits w_i and a skew bit `skew` in {0, 1} is - * W := 2^{128} + 2^{127} w_1 + ... + 2 w_127 + w_128 - skew - * = 2^{128} + \sum_{k=0}^{127}2^{k}w_{128-k} - skew. - * When init == true, the for loop that follows sets - * accumulator = [W+skew]pub_key + [2^{129}]collision_offset - * When init == false, the for loop that follows sets - * accumulator = [W+skew]pub_key + [2^{129}]collision_offset + [2^{128}]current_accumulator. - * We describe the accumulation process in the loop. - * - * Defining w_{-1} = 0, W_{0} = 1, and W_{i+1} = 2 W_{i} + w_i for i = 1, ..., 128, we have - * W_1 = 2 + w_0 - * W_2 = 4 + 2 w_0 + w_1 - * W_i = 2^i + 2^{i-1} w_0 + ... + 2 w_{i-2} + w_{i-1} - * W_128 = W + skew - * - * Let A_0 = collision_offset. For i = 0, ..., 127, let - * A_{i+1} = 2^{i+1} collision_offset + [W_{i}]pub_key and A'_{i+1} = A_{i+1} + [2^{i}]current_accumulator. - * Suppose we are at the end of the loop with loop variable i. - * - If `init==true`, then the value of `accumulator` is A_{i+i}. - * - If `init==false`, then the value of `accumulator` is A'_{i+1}. - * In both cases, setting the final accumulator value is that claimed above. - * - * Note that all divisons are safe, i.e., failing contsraints will be imposed if any denominator is zero. - */ - for (size_t i = 0; i < 129; ++i) { - if (!init && i == 1) { - // set accumulator = accumulator + current_accumulator. - field_t x1 = accumulator.x; - field_t y1 = accumulator.y; - - field_t x2 = current_accumulator.x; - field_t y2 = current_accumulator.y; - - field_t lambda1 = (y2 - y1) / (x2 - x1); - field_t x3 = lambda1.madd(lambda1, -(x2 + x1)); - field_t y3 = lambda1.madd((x1 - x3), -y1); - accumulator.x = x3; - accumulator.y = y3; - } - - // if i == 0: set accumulator = [2]accumulator + pub_key - // otherwise, set accumulator = [2]accumulator + [w_i]pub_key. - - // // Set P_3 = accumulator + pub_key or P_3 = accumulator - pub_key, depending on the current wnaf bit. - - field_t x1 = accumulator.x; - field_t y1 = accumulator.y; - - field_t x2 = (i == 0) ? pub_key.x : pub_key.x; - field_t y2 = (i == 0) ? pub_key.y : pub_key.y.madd(field_t(wnaf.bits[i - 1]) * two, -pub_key.y); - field_t lambda1 = (y2 - y1) / (x2 - x1); - field_t x3 = lambda1.madd(lambda1, -(x2 + x1)); - - // // Set P_4 = P_3 + accumulator. - // // We save gates by not using the formula lambda2 = (y3 - y1) / (x3 - x1), which would require computing - // // y_3. Instead we use another formula for lambda2 derived using the substitution y3 = lambda1(x1 - x3) - y1. - field_t lambda2 = -lambda1 - (y1 * two) / (x3 - x1); - field_t x4 = lambda2.madd(lambda2, -(x3 + x1)); - field_t y4 = lambda2.madd(x1 - x4, -y1); - - accumulator.x = x4; - accumulator.y = y4; - } - - // At this point, accumulator is [W + skew]pub + [2^{129}]collision_mask. - // If wnaf_skew, subtract pub_key frorm accumulator. - field_t add_lambda = (accumulator.y + pub_key.y) / (accumulator.x - pub_key.x); - field_t x_add = add_lambda.madd(add_lambda, -(accumulator.x + pub_key.x)); - field_t y_add = add_lambda.madd((pub_key.x - x_add), pub_key.y); - bool_t add_predicate = wnaf.skew; - accumulator.x = ((x_add - accumulator.x).madd(field_t(add_predicate), accumulator.x)); - accumulator.y = ((y_add - accumulator.y).madd(field_t(add_predicate), accumulator.y)); - - // subtract [2^{129}]collision_offset from accumulator. - point collision_mask{ collision_end.x, -collision_end.y }; - - field_t lambda = (accumulator.y - collision_mask.y) / (accumulator.x - collision_mask.x); - field_t x3 = lambda.madd(lambda, -(collision_mask.x + accumulator.x)); - field_t y3 = lambda.madd(collision_mask.x - x3, -collision_mask.y); - - accumulator.x = x3; - accumulator.y = y3; - return accumulator; -} - /** * @brief Make the computations needed to verify a signature (s, e), i.e., compute * e' = hash(([s]g + [e]pub).x | message) and return e'. * - * @details TurboPlonk: ~10850 gates (~4k for variable_base_mul, ~6k for blake2s) for a string of length < 32. + * @details UltraPlonk: ~5318 gates, excluding gates required to init the UltraPlonk range check + * (~1.5k for fixed/variable_base_mul (hmm high), ~4k for blake2s) for a string of length = 34. */ template std::array, 2> verify_signature_internal(const byte_array& message, @@ -277,9 +42,12 @@ std::array, 2> verify_signature_internal(const byte_array& message const signature_bits& sig) { // Compute [s]g, where s = (s_lo, s_hi) and g = G1::one. - point R_1 = group::fixed_base_scalar_mul(sig.s_lo, sig.s_hi); + cycle_group g1(grumpkin::g1::one); + auto R_1 = g1 * sig.s; + // Compute [e]pub, where e = (e_lo, e_hi) - point R_2 = variable_base_mul(pub_key, sig.e_lo, sig.e_hi); + cycle_group key(pub_key.x, pub_key.y, false); + auto R_2 = key * sig.e; // check R_1 != R_2 (R_1.x - R_2.x).assert_is_not_zero("Cannot add points in Schnorr verification."); @@ -289,14 +57,15 @@ std::array, 2> verify_signature_internal(const byte_array& message // build input (pedersen(([s]g + [e]pub).x | pub.x | pub.y) | message) to hash function // pedersen hash ([r].x | pub.x) to make sure the size of `hash_input` is <= 64 bytes for a 32 byte message - byte_array hash_input(stdlib::pedersen_commitment::compress({ x_3, pub_key.x, pub_key.y })); + byte_array hash_input(stdlib::pedersen_hash_refactor::hash({ x_3, key.x, key.y })); hash_input.write(message); // compute e' = hash(([s]g + [e]pub).x | message) byte_array output = blake2s(hash_input); - - field_t output_hi(output.slice(0, 16)); - field_t output_lo(output.slice(16, 16)); + static constexpr size_t LO_BYTES = cycle_group::cycle_scalar::LO_BITS / 8; + static constexpr size_t HI_BYTES = 32 - LO_BYTES; + field_t output_hi(output.slice(0, LO_BYTES)); + field_t output_lo(output.slice(LO_BYTES, HI_BYTES)); return { output_lo, output_hi }; } @@ -311,8 +80,8 @@ template void verify_signature(const byte_array& message, const point& pub_key, const signature_bits& sig) { auto [output_lo, output_hi] = verify_signature_internal(message, pub_key, sig); - output_lo.assert_equal(sig.e_lo, "verify signature failed"); - output_hi.assert_equal(sig.e_hi, "verify signature failed"); + output_lo.assert_equal(sig.e.lo, "verify signature failed"); + output_hi.assert_equal(sig.e.hi, "verify signature failed"); } /** @@ -326,16 +95,12 @@ bool_t signature_verification_result(const byte_array& message, const signature_bits& sig) { auto [output_lo, output_hi] = verify_signature_internal(message, pub_key, sig); - bool_t valid = (output_lo == sig.e_lo) && (output_hi == sig.e_hi); + bool_t valid = (output_lo == sig.e.lo) && (output_hi == sig.e.hi); return valid; } -INSTANTIATE_STDLIB_METHOD(VARIABLE_BASE_MUL) -INSTANTIATE_STDLIB_METHOD(CONVERT_FIELD_INTO_WNAF) INSTANTIATE_STDLIB_METHOD(VERIFY_SIGNATURE_INTERNAL) INSTANTIATE_STDLIB_METHOD(VERIFY_SIGNATURE) INSTANTIATE_STDLIB_METHOD(SIGNATURE_VERIFICATION_RESULT) INSTANTIATE_STDLIB_METHOD(CONVERT_SIGNATURE) -} // namespace schnorr -} // namespace stdlib -} // namespace proof_system::plonk +} // namespace proof_system::plonk::stdlib::schnorr diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.hpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.hpp index 082799a8ac6..fdff62ea9c0 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.hpp @@ -6,30 +6,17 @@ #include "../../primitives/point/point.hpp" #include "../../primitives/witness/witness.hpp" #include "barretenberg/crypto/schnorr/schnorr.hpp" +#include "barretenberg/stdlib/primitives/group/cycle_group.hpp" namespace proof_system::plonk { namespace stdlib { namespace schnorr { template struct signature_bits { - field_t s_lo; - field_t s_hi; - field_t e_lo; - field_t e_hi; + typename cycle_group::cycle_scalar s; + typename cycle_group::cycle_scalar e; }; -template struct wnaf_record { - std::vector> bits; - bool_t skew; -}; - -template wnaf_record convert_field_into_wnaf(C* context, const field_t& limb); - -template -point variable_base_mul(const point& pub_key, const point& current_accumulator, const wnaf_record& scalar); -template -point variable_base_mul(const point& pub_key, const field_t& low_bits, const field_t& high_bits); - template signature_bits convert_signature(C* context, const crypto::schnorr::signature& sig); template @@ -68,8 +55,6 @@ bool_t signature_verification_result(const byte_array& message, #define CONVERT_SIGNATURE(circuit_type) \ signature_bits convert_signature(circuit_type*, const crypto::schnorr::signature&) -EXTERN_STDLIB_METHOD(VARIABLE_BASE_MUL) -EXTERN_STDLIB_METHOD(CONVERT_FIELD_INTO_WNAF) EXTERN_STDLIB_METHOD(VERIFY_SIGNATURE_INTERNAL) EXTERN_STDLIB_METHOD(VERIFY_SIGNATURE) EXTERN_STDLIB_METHOD(SIGNATURE_VERIFICATION_RESULT) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp index 9e7110e844c..cfd7f6ca6fc 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp @@ -19,162 +19,162 @@ using field_ct = field_t; using point_ct = point; using witness_ct = witness_t; -auto run_scalar_mul_test = [](grumpkin::fr scalar_mont, bool expect_verify) { - Composer composer = Composer(); +// auto run_scalar_mul_test = [](grumpkin::fr scalar_mont, bool expect_verify) { +// Composer composer = Composer(); - grumpkin::fr scalar = scalar_mont.from_montgomery_form(); +// grumpkin::fr scalar = scalar_mont.from_montgomery_form(); - uint256_t scalar_low{ scalar.data[0], scalar.data[1], 0ULL, 0ULL }; - uint256_t scalar_high{ scalar.data[2], scalar.data[3], 0ULL, 0ULL }; +// uint256_t scalar_low{ scalar.data[0], scalar.data[1], 0ULL, 0ULL }; +// uint256_t scalar_high{ scalar.data[2], scalar.data[3], 0ULL, 0ULL }; - field_ct input_lo = witness_ct(&composer, scalar_low); - field_ct input_hi = witness_ct(&composer, scalar_high); +// field_ct input_lo = witness_ct(&composer, scalar_low); +// field_ct input_hi = witness_ct(&composer, scalar_high); - grumpkin::g1::element expected = grumpkin::g1::one * scalar_mont; - expected = expected.normalize(); - point_ct point_input{ witness_ct(&composer, grumpkin::g1::affine_one.x), - witness_ct(&composer, grumpkin::g1::affine_one.y) }; +// grumpkin::g1::element expected = grumpkin::g1::one * scalar_mont; +// expected = expected.normalize(); +// point_ct point_input{ witness_ct(&composer, grumpkin::g1::affine_one.x), +// witness_ct(&composer, grumpkin::g1::affine_one.y) }; - point_ct output = variable_base_mul(point_input, input_lo, input_hi); +// point_ct output = variable_base_mul(point_input, input_lo, input_hi); - if (expect_verify) { - EXPECT_EQ(output.x.get_value(), expected.x); - EXPECT_EQ(output.y.get_value(), expected.y); - }; +// if (expect_verify) { +// EXPECT_EQ(output.x.get_value(), expected.x); +// EXPECT_EQ(output.y.get_value(), expected.y); +// }; - info("composer gates = ", composer.get_num_gates()); +// info("composer gates = ", composer.get_num_gates()); - bool result = composer.check_circuit(); - EXPECT_EQ(result, expect_verify); -}; +// bool result = composer.check_circuit(); +// EXPECT_EQ(result, expect_verify); +// }; -typedef wnaf_record wnaf_record_ct; +// typedef wnaf_record wnaf_record_ct; /** * @brief Helper function to compare wnaf_records, useful since == on bool_ct's returns a bool_ct. */ -bool compare_records(wnaf_record_ct a, wnaf_record_ct b) -{ - bool result = a.skew.witness_bool == b.skew.witness_bool; - if (result) { - for (size_t i = 0; i != a.bits.size(); ++i) { - bool a_bit = a.bits[i].witness_bool; - bool b_bit = b.bits[i].witness_bool; - result = result == false ? false : a_bit == b_bit; - } - } - return result; -} - -TEST(stdlib_schnorr, convert_field_into_wnaf_special) -{ - Composer composer = Composer(); - - // the wnaf_record ((b_1, ... b_128), skew) corresponding to the 129-bit non-negative value - // is, 2^128 + 2^127 w_1 + ... + 2 w_127 + w_128 - skew, where w_i = 1 if b_i is true, else -1.. - // We make some auxiliary wnaf records that will be helpful. - std::vector false128(128, false); - wnaf_record_ct all_false({ .bits = false128, .skew = false }); - - std::vector true128(128, true); - wnaf_record_ct all_true({ .bits = true128, .skew = true }); - - // establish a list of special values to be converted to a wnaf_record - std::vector special_values({ 1, - 0, - (static_cast(1) << 128) - 1, - (static_cast(1) << 128) + 1, - (static_cast(1) << 128), - (static_cast(1) << 129) - 1 }); - - size_t num_special_values(special_values.size()); - - // convert these values to field elements - std::vector special_field_elts(num_special_values); - for (size_t i = 0; i != num_special_values; ++i) { - field_ct a(special_values[i]); - special_field_elts[i] = a; - }; - - // manually build the expected wnaf records - // 1 is given by ((false, ..., false), false) - auto record_1 = all_false; - - // 0 is given by ((false, ..., false), true) - auto record_0 = all_false; - record_0.skew = true; - - // 2^128 - 1 = 2^128 - 2^127 + (2^127 - 1) - 0 is given by((false, true, ..., true), false) - auto record_128_minus_1 = all_true; - record_128_minus_1.bits[0] = false; - record_128_minus_1.skew = false; - - // 2^128 + 1 = 2^128 + (2^127 - (2^127 - 1)) - 0 is given by((true, false, false, ..., false), false) - auto record_128_plus_1 = all_false; - record_128_plus_1.bits[0] = true; - - // 2^128 = 2^128 + (2^127 - (2^127 - 1)) - 1 is given by((true, false, false, ..., false), true) - auto record_128 = all_false; - record_128.bits[0] = true; - record_128.skew = true; - - // // 2^129-1 = 2^128 + 2^127 + ... + 1 - 0 should be given by ((true, true, ..., true), false). - // Note: fixed_wnaf<129, 1, 1>, used inside of convert_field_into_wnaf, incorrectly computes the the coefficient - // of - // 2^127 in the wnaf representation of to be -1. - auto record_max = all_true; - record_max.skew = false; - - std::vector expected_wnaf_records( - { record_1, record_0, record_128_minus_1, record_128_plus_1, record_128, record_max }); - - // integers less than 2^128 are converted correctly - for (size_t i = 0; i != num_special_values; ++i) { - field_ct elt = special_field_elts[i]; - wnaf_record_ct record = convert_field_into_wnaf(&composer, elt); - wnaf_record_ct expected_record = expected_wnaf_records[i]; - bool records_equal = compare_records(record, expected_record); - ASSERT_TRUE(records_equal); - ASSERT_FALSE(composer.failed()); - } -} - -TEST(stdlib_schnorr, convert_field_into_wnaf) -{ - Composer composer = Composer(); - - grumpkin::fq scalar_mont = grumpkin::fq::random_element(); - grumpkin::fq scalar = scalar_mont.from_montgomery_form(); - - // our wnaf records only represent 128 bits, so we test by generating a field - // element and then truncating. - scalar.data[2] = 0ULL; - scalar.data[3] = 0ULL; - - scalar = scalar.to_montgomery_form(); - - field_ct input(&composer, scalar); - convert_field_into_wnaf(&composer, input); - - info("composer gates = ", composer.get_num_gates()); - - bool result = composer.check_circuit(); - EXPECT_EQ(result, true); -} - -/** - * @brief Test variable_base_mul(const point& pub_key, - * const field_t& low_bits, - * const field_t& high_bits) - * by taking a random field Fr element s, computing the corresponding Grumpkin G1 element both natively - * and using the function in question (splitting s into 128-bit halves), then comparing the results. - */ -TEST(stdlib_schnorr, test_scalar_mul_low_high) -{ - run_scalar_mul_test(grumpkin::fr::random_element(), true); - run_scalar_mul_test(grumpkin::fr(static_cast(1) << 128), false); - run_scalar_mul_test(0, false); -} +// bool compare_records(wnaf_record_ct a, wnaf_record_ct b) +// { +// bool result = a.skew.witness_bool == b.skew.witness_bool; +// if (result) { +// for (size_t i = 0; i != a.bits.size(); ++i) { +// bool a_bit = a.bits[i].witness_bool; +// bool b_bit = b.bits[i].witness_bool; +// result = result == false ? false : a_bit == b_bit; +// } +// } +// return result; +// } + +// TEST(stdlib_schnorr, convert_field_into_wnaf_special) +// { +// Composer composer = Composer(); + +// // the wnaf_record ((b_1, ... b_128), skew) corresponding to the 129-bit non-negative value +// // is, 2^128 + 2^127 w_1 + ... + 2 w_127 + w_128 - skew, where w_i = 1 if b_i is true, else -1.. +// // We make some auxiliary wnaf records that will be helpful. +// std::vector false128(128, false); +// wnaf_record_ct all_false({ .bits = false128, .skew = false }); + +// std::vector true128(128, true); +// wnaf_record_ct all_true({ .bits = true128, .skew = true }); + +// // establish a list of special values to be converted to a wnaf_record +// std::vector special_values({ 1, +// 0, +// (static_cast(1) << 128) - 1, +// (static_cast(1) << 128) + 1, +// (static_cast(1) << 128), +// (static_cast(1) << 129) - 1 }); + +// size_t num_special_values(special_values.size()); + +// // convert these values to field elements +// std::vector special_field_elts(num_special_values); +// for (size_t i = 0; i != num_special_values; ++i) { +// field_ct a(special_values[i]); +// special_field_elts[i] = a; +// }; + +// // manually build the expected wnaf records +// // 1 is given by ((false, ..., false), false) +// auto record_1 = all_false; + +// // 0 is given by ((false, ..., false), true) +// auto record_0 = all_false; +// record_0.skew = true; + +// // 2^128 - 1 = 2^128 - 2^127 + (2^127 - 1) - 0 is given by((false, true, ..., true), false) +// auto record_128_minus_1 = all_true; +// record_128_minus_1.bits[0] = false; +// record_128_minus_1.skew = false; + +// // 2^128 + 1 = 2^128 + (2^127 - (2^127 - 1)) - 0 is given by((true, false, false, ..., false), false) +// auto record_128_plus_1 = all_false; +// record_128_plus_1.bits[0] = true; + +// // 2^128 = 2^128 + (2^127 - (2^127 - 1)) - 1 is given by((true, false, false, ..., false), true) +// auto record_128 = all_false; +// record_128.bits[0] = true; +// record_128.skew = true; + +// // // 2^129-1 = 2^128 + 2^127 + ... + 1 - 0 should be given by ((true, true, ..., true), false). +// // Note: fixed_wnaf<129, 1, 1>, used inside of convert_field_into_wnaf, incorrectly computes the the coefficient +// // of +// // 2^127 in the wnaf representation of to be -1. +// auto record_max = all_true; +// record_max.skew = false; + +// std::vector expected_wnaf_records( +// { record_1, record_0, record_128_minus_1, record_128_plus_1, record_128, record_max }); + +// // integers less than 2^128 are converted correctly +// for (size_t i = 0; i != num_special_values; ++i) { +// field_ct elt = special_field_elts[i]; +// wnaf_record_ct record = convert_field_into_wnaf(&composer, elt); +// wnaf_record_ct expected_record = expected_wnaf_records[i]; +// bool records_equal = compare_records(record, expected_record); +// ASSERT_TRUE(records_equal); +// ASSERT_FALSE(composer.failed()); +// } +// } + +// TEST(stdlib_schnorr, convert_field_into_wnaf) +// { +// Composer composer = Composer(); + +// grumpkin::fq scalar_mont = grumpkin::fq::random_element(); +// grumpkin::fq scalar = scalar_mont.from_montgomery_form(); + +// // our wnaf records only represent 128 bits, so we test by generating a field +// // element and then truncating. +// scalar.data[2] = 0ULL; +// scalar.data[3] = 0ULL; + +// scalar = scalar.to_montgomery_form(); + +// field_ct input(&composer, scalar); +// convert_field_into_wnaf(&composer, input); + +// info("composer gates = ", composer.get_num_gates()); + +// bool result = composer.check_circuit(); +// EXPECT_EQ(result, true); +// } + +// /** +// * @brief Test variable_base_mul(const point& pub_key, +// * const field_t& low_bits, +// * const field_t& high_bits) +// * by taking a random field Fr element s, computing the corresponding Grumpkin G1 element both natively +// * and using the function in question (splitting s into 128-bit halves), then comparing the results. +// */ +// TEST(stdlib_schnorr, test_scalar_mul_low_high) +// { +// run_scalar_mul_test(grumpkin::fr::random_element(), true); +// run_scalar_mul_test(grumpkin::fr(static_cast(1) << 128), false); +// run_scalar_mul_test(0, false); +// } /** * @test Test circuit verifying a Schnorr signature generated by \see{crypto::schnorr::verify_signature}. @@ -202,6 +202,8 @@ TEST(stdlib_schnorr, verify_signature) message_string, account.public_key, signature); EXPECT_EQ(first_result, true); + field_ct foo(witness_ct(&composer, 100)); + foo.create_range_constraint(256); point_ct pub_key{ witness_ct(&composer, account.public_key.x), witness_ct(&composer, account.public_key.y) }; signature_bits sig = convert_signature(&composer, signature); byte_array_ct message(&composer, message_string); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp index 000ce66d3ad..ea387251323 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.cpp @@ -1,6 +1,5 @@ #include "pedersen_refactor.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" -#include "barretenberg/stdlib/primitives/group/cycle_group.hpp" namespace proof_system::plonk::stdlib { using namespace barretenberg; @@ -16,16 +15,16 @@ field_t pedersen_hash_refactor::hash_multiple(const std::vector& using cycle_group = cycle_group; using cycle_scalar = typename cycle_group::cycle_scalar; - using Curve = typename C::EmbeddedCurve; + using Curve = EmbeddedCurve; auto base_points = generator_context->conditional_extend(inputs.size() + hash_index).generators; std::vector scalars; std::vector points; - scalars.emplace_back(field_t(inputs.size())); + scalars.emplace_back(cycle_scalar::create_from_bn254_scalar(field_t(inputs.size()))); points.emplace_back(crypto::pedersen_hash_refactor::get_length_generator()); for (size_t i = 0; i < inputs.size(); ++i) { - scalars.emplace_back(inputs[i]); + scalars.emplace_back(cycle_scalar::create_from_bn254_scalar(inputs[i])); // constructs constant cycle_group objects (non-witness) points.emplace_back(base_points[i + hash_index]); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp index 37c982799d6..30727dcee46 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp @@ -2,6 +2,7 @@ #include "../../primitives/field/field.hpp" #include "../../primitives/point/point.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" +#include "barretenberg/stdlib/primitives/group/cycle_group.hpp" #include "../../primitives/circuit_builders/circuit_builders.hpp" @@ -20,17 +21,12 @@ template class pedersen_hash_refactor { using field_t = stdlib::field_t; using point = stdlib::point; using bool_t = stdlib::bool_t; - using EmbeddedCurve = - std::conditional_t, - curve::BN254, - curve::Grumpkin>; - + using EmbeddedCurve = typename cycle_group::Curve; using generator_data = crypto::generator_data; public: // TODO(@suyash67) as part of refactor project, can we remove this and replace with `hash` // (i.e. simplify the name as we no longer have a need for `hash_single`) - // TODO update to new interface static field_t hash_multiple(const std::vector& in, size_t hash_index = 0, const generator_data* generator_context = generator_data::get_default_generators(), diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index d06dba16100..c0c469c6407 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -576,13 +576,64 @@ typename cycle_group::cycle_scalar cycle_group::cycle_scalar return cycle_scalar(lo, hi); } +/** + * @brief Use when we want to multiply a group element by a string of bits of known size. + * N.B. using this constructor method will make our scalar multiplication methods not perform primality tests. + * + * @tparam Composer + * @param context + * @param value + * @param num_bits + * @return cycle_group::cycle_scalar + */ +template +typename cycle_group::cycle_scalar cycle_group::cycle_scalar::from_witness_bitstring( + Composer* context, const uint256_t& bitstring, const size_t num_bits) +{ + ASSERT(bitstring.get_msb() < num_bits); + const uint256_t lo_v = bitstring.slice(0, LO_BITS); + const uint256_t hi_v = bitstring.slice(LO_BITS, HI_BITS); + field_t lo = witness_t(context, lo_v); + field_t hi = witness_t(context, hi_v); + cycle_scalar result{ lo, hi, num_bits, true, false }; + return result; +} + +/** + * @brief Use when we want to multiply a group element by a string of bits of known size. + * N.B. using this constructor method will make our scalar multiplication methods not perform primality tests. + * + * @tparam Composer + * @param context + * @param value + * @param num_bits + * @return cycle_group::cycle_scalar + */ +template +typename cycle_group::cycle_scalar cycle_group::cycle_scalar::create_from_bn254_scalar( + const field_t& in) +{ + const uint256_t value_u256(in.get_value()); + const uint256_t lo_v = value_u256.slice(0, LO_BITS); + const uint256_t hi_v = value_u256.slice(LO_BITS, HI_BITS); + if (in.is_constant()) { + cycle_scalar result{ field_t(lo_v), field_t(hi_v), NUM_BITS, false, true }; + return result; + } + field_t lo = witness_t(in.get_context(), lo_v); + field_t hi = witness_t(in.get_context(), hi_v); + lo.add_two(hi * (uint256_t(1) << LO_BITS), -in).assert_equal(0); + cycle_scalar result{ lo, hi, NUM_BITS, false, true }; + return result; +} + template bool cycle_group::cycle_scalar::is_constant() const { return (lo.is_constant() && hi.is_constant()); } template -typename cycle_group::cycle_scalar::ScalarField cycle_group::cycle_scalar::get_value() const +typename cycle_group::ScalarField cycle_group::cycle_scalar::get_value() const { uint256_t lo_v(lo.get_value()); uint256_t hi_v(hi.get_value()); @@ -612,6 +663,9 @@ cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* contex // this also performs an implicit range check on the input slices const auto slice_scalar = [&](const field_t& scalar, const size_t num_bits) { std::vector result; + if (num_bits == 0) { + return result; + } if (scalar.is_constant()) { const size_t num_slices = (num_bits + table_bits - 1) / table_bits; const uint64_t table_mask = (1ULL << table_bits) - 1ULL; @@ -657,14 +711,17 @@ cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* contex return result; }; - auto hi_slices = slice_scalar(scalar.hi, cycle_scalar::HI_BITS); - auto lo_slices = slice_scalar(scalar.lo, cycle_scalar::LO_BITS); + const size_t lo_bits = scalar.num_bits() > cycle_scalar::LO_BITS ? cycle_scalar::LO_BITS : scalar.num_bits(); + const size_t hi_bits = scalar.num_bits() > cycle_scalar::LO_BITS ? scalar.num_bits() - cycle_scalar::LO_BITS : 0; + auto hi_slices = slice_scalar(scalar.hi, hi_bits); + auto lo_slices = slice_scalar(scalar.lo, lo_bits); - if (!scalar.is_constant()) { + if (!scalar.is_constant() && !scalar.skip_primality_test()) { // Check that scalar.hi * 2^LO_BITS + scalar.lo < cycle_group_modulus when evaluated over the integers - constexpr uint256_t cycle_group_modulus = cycle_scalar::ScalarField::modulus; - constexpr uint256_t r_lo = cycle_group_modulus.slice(0, cycle_scalar::LO_BITS); - constexpr uint256_t r_hi = cycle_group_modulus.slice(cycle_scalar::LO_BITS, cycle_scalar::HI_BITS); + const uint256_t cycle_group_modulus = + scalar.use_bn254_scalar_field_for_primality_test() ? FF::modulus : ScalarField::modulus; + const uint256_t r_lo = cycle_group_modulus.slice(0, cycle_scalar::LO_BITS); + const uint256_t r_hi = cycle_group_modulus.slice(cycle_scalar::LO_BITS, cycle_scalar::HI_BITS); bool need_borrow = uint256_t(scalar.lo.get_value()) > r_lo; field_t borrow = scalar.lo.is_constant() ? need_borrow : field_t::from_witness(context, need_borrow); @@ -699,9 +756,13 @@ cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* contex * @param index * @return field_t */ -template field_t cycle_group::straus_scalar_slice::read(size_t index) +template +std::optional> cycle_group::straus_scalar_slice::read(size_t index) { - ASSERT(slices.size() > index); + + if (index >= slices.size()) { + return {}; + } return slices[index]; } @@ -843,6 +904,12 @@ typename cycle_group::batch_mul_internal_output cycle_group: } } + size_t num_bits = 0; + for (auto& s : scalars) { + num_bits = std::max(num_bits, s.num_bits()); + } + size_t num_rounds = (num_bits + TABLE_BITS - 1) / TABLE_BITS; + const size_t num_points = scalars.size(); std::vector scalar_slices; @@ -855,7 +922,7 @@ typename cycle_group::batch_mul_internal_output cycle_group: Element offset_generator_accumulator = offset_generators[0]; cycle_group accumulator = offset_generators[0]; - for (size_t i = 0; i < NUM_ROUNDS; ++i) { + for (size_t i = 0; i < num_rounds; ++i) { if (i != 0) { for (size_t j = 0; j < TABLE_BITS; ++j) { // offset_generator_accuulator is a regular Element, so dbl() won't add constraints @@ -865,11 +932,15 @@ typename cycle_group::batch_mul_internal_output cycle_group: } for (size_t j = 0; j < num_points; ++j) { - const field_t scalar_slice = scalar_slices[j].read(NUM_ROUNDS - i - 1); - const cycle_group point = point_tables[j].read(scalar_slice); - accumulator = unconditional_add ? accumulator.unconditional_add(point) - : accumulator.constrained_unconditional_add(point); - offset_generator_accumulator = offset_generator_accumulator + Element(offset_generators[j + 1]); + const std::optional scalar_slice = scalar_slices[j].read(num_rounds - i - 1); + // if we are doing a batch mul over scalars of different bit-lengths, we may not have any scalar bits for a + // given round and a given scalar + if (scalar_slice.has_value()) { + const cycle_group point = point_tables[j].read(scalar_slice.value()); + accumulator = unconditional_add ? accumulator.unconditional_add(point) + : accumulator.constrained_unconditional_add(point); + offset_generator_accumulator = offset_generator_accumulator + Element(offset_generators[j + 1]); + } } } @@ -993,6 +1064,11 @@ typename cycle_group::batch_mul_internal_output cycle_group: } } + size_t num_bits = 0; + for (auto& s : scalars) { + num_bits = std::max(num_bits, s.num_bits()); + } + size_t num_rounds = (num_bits + TABLE_BITS - 1) / TABLE_BITS; // core algorithm // define a `table_bits` size lookup table const size_t num_points = scalars.size(); @@ -1003,33 +1079,37 @@ typename cycle_group::batch_mul_internal_output cycle_group: // creating these point tables should cost 0 constraints if base points are constant for (size_t i = 0; i < num_points; ++i) { - std::vector round_points(NUM_ROUNDS); - std::vector round_offset_generators(NUM_ROUNDS); + std::vector round_points(num_rounds); + std::vector round_offset_generators(num_rounds); round_points[0] = base_points[i]; round_offset_generators[0] = offset_generators[i + 1]; - for (size_t j = 1; j < NUM_ROUNDS; ++j) { + for (size_t j = 1; j < num_rounds; ++j) { round_points[j] = round_points[j - 1].dbl(); round_offset_generators[j] = round_offset_generators[j - 1].dbl(); } - Element::batch_normalize(&round_points[0], NUM_ROUNDS); - Element::batch_normalize(&round_offset_generators[0], NUM_ROUNDS); - point_tables[i].resize(NUM_ROUNDS); - for (size_t j = 0; j < NUM_ROUNDS; ++j) { + Element::batch_normalize(&round_points[0], num_rounds); + Element::batch_normalize(&round_offset_generators[0], num_rounds); + point_tables[i].resize(num_rounds); + for (size_t j = 0; j < num_rounds; ++j) { point_tables[i][j] = straus_lookup_table( context, cycle_group(round_points[j]), cycle_group(round_offset_generators[j]), TABLE_BITS); } scalar_slices.emplace_back(straus_scalar_slice(context, scalars[i], TABLE_BITS)); } Element offset_generator_accumulator = offset_generators[0]; - cycle_group accumulator = cycle_group(Element(offset_generators[0]) * (uint256_t(1) << (NUM_ROUNDS - 1))); - for (size_t i = 0; i < NUM_ROUNDS; ++i) { + cycle_group accumulator = cycle_group(Element(offset_generators[0]) * (uint256_t(1) << (num_rounds - 1))); + for (size_t i = 0; i < num_rounds; ++i) { offset_generator_accumulator = (i > 0) ? offset_generator_accumulator.dbl() : offset_generator_accumulator; for (size_t j = 0; j < num_points; ++j) { auto& point_table = point_tables[j][i]; - const field_t scalar_slice = scalar_slices[j].read(i); - const cycle_group point = point_table.read(scalar_slice); - accumulator = accumulator.unconditional_add(point); - offset_generator_accumulator = offset_generator_accumulator + Element(offset_generators[j + 1]); + const std::optional scalar_slice = scalar_slices[j].read(i); + // if we are doing a batch mul over scalars of different bit-lengths, we may not have any scalar bits for a + // given round and a given scalar + if (scalar_slice.has_value()) { + const cycle_group point = point_table.read(scalar_slice.value()); + accumulator = accumulator.unconditional_add(point); + offset_generator_accumulator = offset_generator_accumulator + Element(offset_generators[j + 1]); + } } } @@ -1086,6 +1166,14 @@ cycle_group cycle_group::batch_mul(const std::vector fixed_base_scalars; std::vector fixed_base_points; + size_t num_bits = 0; + for (auto& s : scalars) { + num_bits = std::max(num_bits, s.num_bits()); + } + + // if num_bits > NUM_BITS, skip lookup-version of fixed-base scalar mul. too much complexity + bool num_bits_exceeds_lookup_table_size = num_bits > NUM_BITS; + // When calling `_variable_base_batch_mul_internal`, we can unconditionally add iff all of the input points // are fixed-base points // (i.e. we are ULTRA Composer and we are doing fixed-base mul over points not present in our plookup tables) @@ -1103,7 +1191,8 @@ cycle_group cycle_group::batch_mul(const std::vector namespace proof_system::plonk::stdlib { @@ -35,12 +36,13 @@ template class cycle_group { using Element = typename Curve::Element; using AffineElement = typename Curve::AffineElement; using generator_data = crypto::generator_data; + using ScalarField = typename Curve::ScalarField; static constexpr size_t STANDARD_NUM_TABLE_BITS = 1; static constexpr size_t ULTRA_NUM_TABLE_BITS = 4; static constexpr bool IS_ULTRA = Composer::CIRCUIT_TYPE == CircuitType::ULTRA; static constexpr size_t TABLE_BITS = IS_ULTRA ? ULTRA_NUM_TABLE_BITS : STANDARD_NUM_TABLE_BITS; - static constexpr size_t NUM_BITS = FF::modulus.get_msb() + 1; + static constexpr size_t NUM_BITS = ScalarField::modulus.get_msb() + 1; static constexpr size_t NUM_ROUNDS = (NUM_BITS + TABLE_BITS - 1) / TABLE_BITS; inline static const std::string OFFSET_GENERATOR_DOMAIN_SEPARATOR = "cycle_group_offset_generator"; @@ -66,18 +68,44 @@ template class cycle_group { * free from the `batch_mul` algorithm, making the range checks performed by `bigfield` largely redundant. */ struct cycle_scalar { - using ScalarField = typename Curve::ScalarField; - static constexpr size_t LO_BITS = 128; - static constexpr size_t HI_BITS = ScalarField::modulus.get_msb() + 1 - LO_BITS; + static constexpr size_t LO_BITS = plookup::FixedBaseParams::BITS_PER_LO_SCALAR; + static constexpr size_t HI_BITS = NUM_BITS - LO_BITS; + field_t lo; + field_t hi; + + private: + size_t _num_bits = NUM_BITS; + bool _skip_primality_test = false; + // if our scalar multiplier is a bn254 FF scalar (e.g. pedersen hash), + // we want to validate the cycle_scalar < bn254::fr::modulus *not* grumpkin::fr::modulus + bool _use_bn254_scalar_field_for_primality_test = false; + + public: + cycle_scalar(const field_t& _lo, + const field_t& _hi, + const size_t bits, + const bool skip_primality_test, + const bool use_bn254_scalar_field_for_primality_test) + : lo(_lo) + , hi(_hi) + , _num_bits(bits) + , _skip_primality_test(skip_primality_test) + , _use_bn254_scalar_field_for_primality_test(use_bn254_scalar_field_for_primality_test){}; cycle_scalar(const ScalarField& _in = 0); cycle_scalar(const field_t& _lo, const field_t& _hi); cycle_scalar(const field_t& _in); static cycle_scalar from_witness(Composer* context, const ScalarField& value); + static cycle_scalar from_witness_bitstring(Composer* context, const uint256_t& bitstring, size_t num_bits); + static cycle_scalar create_from_bn254_scalar(const field_t& _in); [[nodiscard]] bool is_constant() const; ScalarField get_value() const; Composer* get_context() const { return lo.get_context() != nullptr ? lo.get_context() : hi.get_context(); } - field_t lo; - field_t hi; + [[nodiscard]] size_t num_bits() const { return _num_bits; } + [[nodiscard]] bool skip_primality_test() const { return _skip_primality_test; } + [[nodiscard]] bool use_bn254_scalar_field_for_primality_test() const + { + return _use_bn254_scalar_field_for_primality_test; + } }; /** @@ -87,7 +115,7 @@ template class cycle_group { */ struct straus_scalar_slice { straus_scalar_slice(Composer* context, const cycle_scalar& scalars, size_t table_bits); - field_t read(size_t index); + std::optional read(size_t index); size_t _table_bits; std::vector slices; }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index 170eb59ee99..e2da96f6c92 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -9,7 +9,7 @@ #define STDLIB_TYPE_ALIASES \ using Composer = TypeParam; \ using cycle_group_ct = stdlib::cycle_group; \ - using Curve = typename Composer::EmbeddedCurve; \ + using Curve = typename stdlib::cycle_group::Curve; \ using Element = typename Curve::Element; \ using AffineElement = typename Curve::AffineElement; \ using Group = typename Curve::Group; \ @@ -28,8 +28,8 @@ auto& engine = numeric::random::get_debug_engine(); template class CycleGroupTest : public ::testing::Test { public: - using Curve = typename Composer::EmbeddedCurve; - using Group = typename Composer::EmbeddedCurve::Group; + using Curve = typename stdlib::cycle_group::Curve; + using Group = typename Curve::Group; using Element = typename Curve::Element; using AffineElement = typename Curve::AffineElement; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/group.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/group.hpp index 90b666f9572..96ad284c3f8 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/group.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/group.hpp @@ -1,5 +1,6 @@ #pragma once +// TODO(@zac-williamson #2341 delete this file and rename cycle_group to group once we migrate to new hash standard) #include "../field/field.hpp" #include "barretenberg/crypto/pedersen_commitment/pedersen.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/group.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/group.test.cpp index 5861877d051..58c8f41ae3c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/group.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/group.test.cpp @@ -1,3 +1,5 @@ +// TODO(@zac-williamson #2341 delete this file and once we migrate to new hash standard) + #include "barretenberg/stdlib/primitives/group/group.hpp" #include "barretenberg/numeric/random/engine.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" From af2a6ac97bb7b7427addc560ff9714a1ceaf6012 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 15 Sep 2023 21:56:51 +0000 Subject: [PATCH 17/50] fixed ecc_dbl gates incorrectly fusing into ecc_add gates --- .../circuit_builder/ultra_circuit_builder.cpp | 68 ++++++-- .../circuit_builder/ultra_circuit_builder.hpp | 4 +- .../stdlib/encryption/schnorr/schnorr.cpp | 20 +-- .../encryption/schnorr/schnorr.test.cpp | 159 ------------------ .../stdlib/primitives/group/cycle_group.cpp | 31 +++- 5 files changed, 89 insertions(+), 193 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp index 9ddf2f5a6b7..85dd86a8104 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp @@ -420,7 +420,6 @@ template void UltraCircuitBuilder_::create_ecc_add_gate(const can_fuse_into_previous_gate = can_fuse_into_previous_gate && (q_arith[this->num_gates - 1] == 0); if (can_fuse_into_previous_gate) { - q_3[this->num_gates - 1] = in.endomorphism_coefficient; q_4[this->num_gates - 1] = in.endomorphism_coefficient.sqr(); q_1[this->num_gates - 1] = in.sign_coefficient; @@ -476,13 +475,46 @@ template void UltraCircuitBuilder_::create_ecc_add_gate(const */ template void UltraCircuitBuilder_::create_ecc_dbl_gate(const ecc_dbl_gate_& in) { + /** + * gate structure: + * | 1 | 2 | 3 | 4 | + * | - | x1 | y1 | - | + * | - | x3 | y3 | - | + * we can chain an ecc_add_gate + an ecc_dbl_gate if x3 y3 of previous add_gate equals x1 y1 of current gate + * can also chain double gates together + **/ + bool can_fuse_into_previous_gate = true; + can_fuse_into_previous_gate = can_fuse_into_previous_gate && (w_r[this->num_gates - 1] == in.x1); + can_fuse_into_previous_gate = can_fuse_into_previous_gate && (w_o[this->num_gates - 1] == in.y1); + // q_elliptic_double.emplace_back(1); - w_l.emplace_back(in.x1); - w_4.emplace_back(in.y1); + if (can_fuse_into_previous_gate) { + q_elliptic_double[this->num_gates - 1] = 1; + } else { + w_r.emplace_back(in.x1); + w_o.emplace_back(in.y1); + w_l.emplace_back(this->zero_idx); + w_4.emplace_back(this->zero_idx); + q_elliptic_double.emplace_back(1); + q_m.emplace_back(0); + q_1.emplace_back(0); + q_2.emplace_back(0); + q_3.emplace_back(0); + q_c.emplace_back(0); + q_arith.emplace_back(0); + q_4.emplace_back(0); + q_sort.emplace_back(0); + q_lookup_type.emplace_back(0); + q_elliptic.emplace_back(0); + q_aux.emplace_back(0); + ++this->num_gates; + } + w_r.emplace_back(in.x3); w_o.emplace_back(in.y3); - - q_elliptic_double.emplace_back(1); + w_l.emplace_back(this->zero_idx); + w_4.emplace_back(this->zero_idx); + q_elliptic_double.emplace_back(0); q_m.emplace_back(0); q_1.emplace_back(0); q_2.emplace_back(0); @@ -3402,15 +3434,20 @@ inline FF UltraCircuitBuilder_::compute_auxilary_identity(FF q_aux_value, * @return fr */ template -inline FF UltraCircuitBuilder_::compute_elliptic_double_identity( - FF q_elliptic_double_value, FF w_1_value, FF w_2_value, FF w_3_value, FF w_4_value, FF alpha_base, FF alpha) const +inline FF UltraCircuitBuilder_::compute_elliptic_double_identity(FF q_elliptic_double_value, + FF w_2_value, + FF w_3_value, + FF w_2_shifted_value, + FF w_3_shifted_value, + FF alpha_base, + FF alpha) const { constexpr FF curve_b = CircuitBuilderBase>::EmbeddedCurve::Group::curve_b; static_assert(CircuitBuilderBase>::EmbeddedCurve::Group::curve_a == 0); - const auto x1 = w_1_value; - const auto y1 = w_4_value; - const auto x3 = w_2_value; - const auto y3 = w_3_value; + const auto x1 = w_2_value; + const auto y1 = w_3_value; + const auto x3 = w_2_shifted_value; + const auto y3 = w_3_shifted_value; // x-coordinate relation // (x3 + 2x1)(4y^2) - (9x^4) = 0 @@ -3689,8 +3726,13 @@ template bool UltraCircuitBuilder_::check_circuit() break; } } - if (!compute_elliptic_double_identity( - q_elliptic_double_value, w_1_value, w_2_value, w_3_value, w_4_value, elliptic_double_base, alpha) + if (!compute_elliptic_double_identity(q_elliptic_double_value, + w_2_value, + w_3_value, + w_2_shifted_value, + w_3_shifted_value, + elliptic_double_base, + alpha) .is_zero()) { #ifndef FUZZING info("Elliptic doubling identity fails at gate ", i); diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp index 901c108cbf8..dcede73071f 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp @@ -1207,10 +1207,10 @@ template class UltraCircuitBuilder_ : public CircuitBuilderBase signature_bits convert_signature(C* context, const cryp * e' = hash(([s]g + [e]pub).x | message) and return e'. * - * @details UltraPlonk: ~5318 gates, excluding gates required to init the UltraPlonk range check - * (~1.5k for fixed/variable_base_mul (hmm high), ~4k for blake2s) for a string of length = 34. + * @details UltraPlonk: ~5018 gates, excluding gates required to init the UltraPlonk range check + * (~1,169k for fixed/variable_base_mul, ~4k for blake2s) for a string of length = 34. */ template std::array, 2> verify_signature_internal(const byte_array& message, const point& pub_key, const signature_bits& sig) { - // Compute [s]g, where s = (s_lo, s_hi) and g = G1::one. - cycle_group g1(grumpkin::g1::one); - auto R_1 = g1 * sig.s; - - // Compute [e]pub, where e = (e_lo, e_hi) cycle_group key(pub_key.x, pub_key.y, false); - auto R_2 = key * sig.e; - - // check R_1 != R_2 - (R_1.x - R_2.x).assert_is_not_zero("Cannot add points in Schnorr verification."); - // Compute x-coord of R_1 + R_2 = [s]g + [e]pub. - field_t lambda = (R_1.y - R_2.y) / (R_1.x - R_2.x); - field_t x_3 = lambda * lambda - (R_1.x + R_2.x); + cycle_group g1(grumpkin::g1::one); + // compute g1 * sig.s + key * sig,e + auto x_3 = cycle_group::batch_mul({ sig.s, sig.e }, { g1, key }).x; // build input (pedersen(([s]g + [e]pub).x | pub.x | pub.y) | message) to hash function // pedersen hash ([r].x | pub.x) to make sure the size of `hash_input` is <= 64 bytes for a 32 byte message byte_array hash_input(stdlib::pedersen_hash_refactor::hash({ x_3, key.x, key.y })); @@ -66,7 +57,6 @@ std::array, 2> verify_signature_internal(const byte_array& message static constexpr size_t HI_BYTES = 32 - LO_BYTES; field_t output_hi(output.slice(0, LO_BYTES)); field_t output_lo(output.slice(LO_BYTES, HI_BYTES)); - return { output_lo, output_hi }; } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp index cfd7f6ca6fc..d851b2fcd63 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp @@ -19,163 +19,6 @@ using field_ct = field_t; using point_ct = point; using witness_ct = witness_t; -// auto run_scalar_mul_test = [](grumpkin::fr scalar_mont, bool expect_verify) { -// Composer composer = Composer(); - -// grumpkin::fr scalar = scalar_mont.from_montgomery_form(); - -// uint256_t scalar_low{ scalar.data[0], scalar.data[1], 0ULL, 0ULL }; -// uint256_t scalar_high{ scalar.data[2], scalar.data[3], 0ULL, 0ULL }; - -// field_ct input_lo = witness_ct(&composer, scalar_low); -// field_ct input_hi = witness_ct(&composer, scalar_high); - -// grumpkin::g1::element expected = grumpkin::g1::one * scalar_mont; -// expected = expected.normalize(); -// point_ct point_input{ witness_ct(&composer, grumpkin::g1::affine_one.x), -// witness_ct(&composer, grumpkin::g1::affine_one.y) }; - -// point_ct output = variable_base_mul(point_input, input_lo, input_hi); - -// if (expect_verify) { -// EXPECT_EQ(output.x.get_value(), expected.x); -// EXPECT_EQ(output.y.get_value(), expected.y); -// }; - -// info("composer gates = ", composer.get_num_gates()); - -// bool result = composer.check_circuit(); -// EXPECT_EQ(result, expect_verify); -// }; - -// typedef wnaf_record wnaf_record_ct; - -/** - * @brief Helper function to compare wnaf_records, useful since == on bool_ct's returns a bool_ct. - */ -// bool compare_records(wnaf_record_ct a, wnaf_record_ct b) -// { -// bool result = a.skew.witness_bool == b.skew.witness_bool; -// if (result) { -// for (size_t i = 0; i != a.bits.size(); ++i) { -// bool a_bit = a.bits[i].witness_bool; -// bool b_bit = b.bits[i].witness_bool; -// result = result == false ? false : a_bit == b_bit; -// } -// } -// return result; -// } - -// TEST(stdlib_schnorr, convert_field_into_wnaf_special) -// { -// Composer composer = Composer(); - -// // the wnaf_record ((b_1, ... b_128), skew) corresponding to the 129-bit non-negative value -// // is, 2^128 + 2^127 w_1 + ... + 2 w_127 + w_128 - skew, where w_i = 1 if b_i is true, else -1.. -// // We make some auxiliary wnaf records that will be helpful. -// std::vector false128(128, false); -// wnaf_record_ct all_false({ .bits = false128, .skew = false }); - -// std::vector true128(128, true); -// wnaf_record_ct all_true({ .bits = true128, .skew = true }); - -// // establish a list of special values to be converted to a wnaf_record -// std::vector special_values({ 1, -// 0, -// (static_cast(1) << 128) - 1, -// (static_cast(1) << 128) + 1, -// (static_cast(1) << 128), -// (static_cast(1) << 129) - 1 }); - -// size_t num_special_values(special_values.size()); - -// // convert these values to field elements -// std::vector special_field_elts(num_special_values); -// for (size_t i = 0; i != num_special_values; ++i) { -// field_ct a(special_values[i]); -// special_field_elts[i] = a; -// }; - -// // manually build the expected wnaf records -// // 1 is given by ((false, ..., false), false) -// auto record_1 = all_false; - -// // 0 is given by ((false, ..., false), true) -// auto record_0 = all_false; -// record_0.skew = true; - -// // 2^128 - 1 = 2^128 - 2^127 + (2^127 - 1) - 0 is given by((false, true, ..., true), false) -// auto record_128_minus_1 = all_true; -// record_128_minus_1.bits[0] = false; -// record_128_minus_1.skew = false; - -// // 2^128 + 1 = 2^128 + (2^127 - (2^127 - 1)) - 0 is given by((true, false, false, ..., false), false) -// auto record_128_plus_1 = all_false; -// record_128_plus_1.bits[0] = true; - -// // 2^128 = 2^128 + (2^127 - (2^127 - 1)) - 1 is given by((true, false, false, ..., false), true) -// auto record_128 = all_false; -// record_128.bits[0] = true; -// record_128.skew = true; - -// // // 2^129-1 = 2^128 + 2^127 + ... + 1 - 0 should be given by ((true, true, ..., true), false). -// // Note: fixed_wnaf<129, 1, 1>, used inside of convert_field_into_wnaf, incorrectly computes the the coefficient -// // of -// // 2^127 in the wnaf representation of to be -1. -// auto record_max = all_true; -// record_max.skew = false; - -// std::vector expected_wnaf_records( -// { record_1, record_0, record_128_minus_1, record_128_plus_1, record_128, record_max }); - -// // integers less than 2^128 are converted correctly -// for (size_t i = 0; i != num_special_values; ++i) { -// field_ct elt = special_field_elts[i]; -// wnaf_record_ct record = convert_field_into_wnaf(&composer, elt); -// wnaf_record_ct expected_record = expected_wnaf_records[i]; -// bool records_equal = compare_records(record, expected_record); -// ASSERT_TRUE(records_equal); -// ASSERT_FALSE(composer.failed()); -// } -// } - -// TEST(stdlib_schnorr, convert_field_into_wnaf) -// { -// Composer composer = Composer(); - -// grumpkin::fq scalar_mont = grumpkin::fq::random_element(); -// grumpkin::fq scalar = scalar_mont.from_montgomery_form(); - -// // our wnaf records only represent 128 bits, so we test by generating a field -// // element and then truncating. -// scalar.data[2] = 0ULL; -// scalar.data[3] = 0ULL; - -// scalar = scalar.to_montgomery_form(); - -// field_ct input(&composer, scalar); -// convert_field_into_wnaf(&composer, input); - -// info("composer gates = ", composer.get_num_gates()); - -// bool result = composer.check_circuit(); -// EXPECT_EQ(result, true); -// } - -// /** -// * @brief Test variable_base_mul(const point& pub_key, -// * const field_t& low_bits, -// * const field_t& high_bits) -// * by taking a random field Fr element s, computing the corresponding Grumpkin G1 element both natively -// * and using the function in question (splitting s into 128-bit halves), then comparing the results. -// */ -// TEST(stdlib_schnorr, test_scalar_mul_low_high) -// { -// run_scalar_mul_test(grumpkin::fr::random_element(), true); -// run_scalar_mul_test(grumpkin::fr(static_cast(1) << 128), false); -// run_scalar_mul_test(0, false); -// } - /** * @test Test circuit verifying a Schnorr signature generated by \see{crypto::schnorr::verify_signature}. * We only test: messages signed and verified using Grumpkin and the BLAKE2s hash function. We only test @@ -202,8 +45,6 @@ TEST(stdlib_schnorr, verify_signature) message_string, account.public_key, signature); EXPECT_EQ(first_result, true); - field_ct foo(witness_ct(&composer, 100)); - foo.create_range_constraint(256); point_ct pub_key{ witness_ct(&composer, account.public_key.x), witness_ct(&composer, account.public_key.y) }; signature_bits sig = convert_signature(&composer, signature); byte_array_ct message(&composer, message_string); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index c0c469c6407..c1bd00358f2 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -889,7 +889,6 @@ typename cycle_group::batch_mul_internal_output cycle_group: const bool unconditional_add) { ASSERT(scalars.size() == base_points.size()); - Composer* context = nullptr; for (auto& scalar : scalars) { if (scalar.lo.get_context() != nullptr) { @@ -922,6 +921,24 @@ typename cycle_group::batch_mul_internal_output cycle_group: Element offset_generator_accumulator = offset_generators[0]; cycle_group accumulator = offset_generators[0]; + // populate the set of points we are going to add into our accumulator, *before* we do any ECC operations + // this way we are able to fuse mutliple ecc add / ecc double operations and reduce total gate count. + // (ecc add/ecc double gates normally cost 2 UltraPlonk gates. However if we chain add->add, add->double, + // double->add, double->double, they only cost one) + std::vector points_to_add; + for (size_t i = 0; i < num_rounds; ++i) { + for (size_t j = 0; j < num_points; ++j) { + const std::optional scalar_slice = scalar_slices[j].read(num_rounds - i - 1); + // if we are doing a batch mul over scalars of different bit-lengths, we may not have any scalar bits for a + // given round and a given scalar + if (scalar_slice.has_value()) { + const cycle_group point = point_tables[j].read(scalar_slice.value()); + points_to_add.emplace_back(point); + } + } + } + std::vector> x_coordinate_checks; + size_t point_counter = 0; for (size_t i = 0; i < num_rounds; ++i) { if (i != 0) { for (size_t j = 0; j < TABLE_BITS; ++j) { @@ -936,14 +953,20 @@ typename cycle_group::batch_mul_internal_output cycle_group: // if we are doing a batch mul over scalars of different bit-lengths, we may not have any scalar bits for a // given round and a given scalar if (scalar_slice.has_value()) { - const cycle_group point = point_tables[j].read(scalar_slice.value()); - accumulator = unconditional_add ? accumulator.unconditional_add(point) - : accumulator.constrained_unconditional_add(point); + const auto& point = points_to_add[point_counter++]; + if (!unconditional_add) { + x_coordinate_checks.push_back({ accumulator.x, point.x }); + } + accumulator = accumulator.unconditional_add(point); offset_generator_accumulator = offset_generator_accumulator + Element(offset_generators[j + 1]); } } } + for (auto& [x1, x2] : x_coordinate_checks) { + auto x_diff = x2 - x1; + x_diff.assert_is_not_zero("_variable_base_batch_mul_internal x-coordinate collision"); + } /** * offset_generator_accumulator represents the sum of all the offset generator terms present in `accumulator`. * We don't subtract off yet, as we may be able to combine `offset_generator_accumulator` with other constant terms From 4426370b4b522b5c592e8fc978162c883495918c Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 15 Sep 2023 22:11:43 +0000 Subject: [PATCH 18/50] wip --- .../barretenberg/stdlib/primitives/group/cycle_group.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index c1bd00358f2..b33b51819d3 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -950,7 +950,7 @@ typename cycle_group::batch_mul_internal_output cycle_group: for (size_t j = 0; j < num_points; ++j) { const std::optional scalar_slice = scalar_slices[j].read(num_rounds - i - 1); - // if we are doing a batch mul over scalars of different bit-lengths, we may not have any scalar bits for a + // if we are doing a batch mul over scalars of different bit-lengths, we may not have a bit slice for a // given round and a given scalar if (scalar_slice.has_value()) { const auto& point = points_to_add[point_counter++]; @@ -1194,8 +1194,8 @@ cycle_group cycle_group::batch_mul(const std::vector NUM_BITS, skip lookup-version of fixed-base scalar mul. too much complexity - bool num_bits_exceeds_lookup_table_size = num_bits > NUM_BITS; + // if num_bits != NUM_BITS, skip lookup-version of fixed-base scalar mul. too much complexity + bool num_bits_not_full_field_size = num_bits != NUM_BITS; // When calling `_variable_base_batch_mul_internal`, we can unconditionally add iff all of the input points // are fixed-base points @@ -1214,7 +1214,7 @@ cycle_group cycle_group::batch_mul(const std::vector Date: Wed, 20 Sep 2023 14:58:04 +0000 Subject: [PATCH 19/50] compiler fixes --- .../benchmark/relations_bench/CMakeLists.txt | 1 + .../crypto/pedersen_hash/c_bind_new.cpp | 23 ++++---- .../crypto/pedersen_hash/c_bind_refactor.cpp | 58 ------------------- .../cpp/src/barretenberg/ecc/CMakeLists.txt | 2 +- .../plookup_tables/fixed_base/fixed_base.cpp | 2 +- 5 files changed, 15 insertions(+), 71 deletions(-) delete mode 100644 barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_refactor.cpp diff --git a/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/CMakeLists.txt index 7fd8082895c..cb7a05e724e 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/CMakeLists.txt @@ -7,6 +7,7 @@ relations.bench.cpp # Required libraries for benchmark suites set(LINKED_LIBRARIES polynomials + proof_system benchmark::benchmark ) diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_new.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_new.cpp index 0c0d506112c..1c75d6f6ae3 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_new.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_new.cpp @@ -1,23 +1,20 @@ #include "barretenberg/common/mem.hpp" #include "barretenberg/common/serialize.hpp" #include "c_bind.hpp" -#include "pedersen.hpp" -#include "pedersen_lookup.hpp" +#include "pedersen_refactor.hpp" extern "C" { WASM_EXPORT void pedersen_hash_init() { - // TODO: do we need this if we are using lookup-pedersen in merkle trees? - crypto::generators::init_generator_data(); - crypto::pedersen_hash::lookup::init(); + // TODO delete } WASM_EXPORT void pedersen_hash_pair(uint8_t const* left, uint8_t const* right, uint8_t* result) { auto lhs = barretenberg::fr::serialize_from_buffer(left); auto rhs = barretenberg::fr::serialize_from_buffer(right); - auto r = crypto::pedersen_hash::lookup::hash_multiple({ lhs, rhs }); + auto r = crypto::pedersen_hash_refactor::hash_multiple({ lhs, rhs }); barretenberg::fr::serialize_to_buffer(r, result); } @@ -25,7 +22,7 @@ WASM_EXPORT void pedersen_hash_multiple(uint8_t const* inputs_buffer, uint8_t* o { std::vector to_compress; read(inputs_buffer, to_compress); - auto r = crypto::pedersen_hash::lookup::hash_multiple(to_compress); + auto r = crypto::pedersen_hash_refactor::hash_multiple(to_compress); barretenberg::fr::serialize_to_buffer(r, output); } @@ -35,7 +32,7 @@ WASM_EXPORT void pedersen_hash_multiple_with_hash_index(uint8_t const* inputs_bu { std::vector to_compress; read(inputs_buffer, to_compress); - auto r = crypto::pedersen_hash::lookup::hash_multiple(to_compress, ntohl(*hash_index)); + auto r = crypto::pedersen_hash_refactor::hash_multiple(to_compress, ntohl(*hash_index)); barretenberg::fr::serialize_to_buffer(r, output); } @@ -45,7 +42,6 @@ WASM_EXPORT void pedersen_hash_multiple_with_hash_index(uint8_t const* inputs_bu * e.g. * input: [1][2][3][4] * output: [1][2][3][4][compress(1,2)][compress(3,4)][compress(5,6)] - * */ WASM_EXPORT void pedersen_hash_to_tree(fr::vec_in_buf data, fr::vec_out_buf out) { @@ -54,9 +50,14 @@ WASM_EXPORT void pedersen_hash_to_tree(fr::vec_in_buf data, fr::vec_out_buf out) fields.reserve(num_outputs); for (size_t i = 0; fields.size() < num_outputs; i += 2) { - fields.push_back(crypto::pedersen_hash::lookup::hash_multiple({ fields[i], fields[i + 1] })); + fields.push_back(crypto::pedersen_hash_refactor::hash_multiple({ fields[i], fields[i + 1] })); } - *out = to_heap_buffer(fields); + auto buf_size = 4 + num_outputs * sizeof(grumpkin::fq); + // TODO(@charlielye) Can we get rid of cppcoreguidelines-owning-memory warning here? + // NOLINTNEXTLINE(cppcoreguidelines-owning-memory, cppcoreguidelines-no-malloc) + *out = static_cast(malloc(buf_size)); + auto* dst = *out; + write(dst, fields); } } \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_refactor.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_refactor.cpp deleted file mode 100644 index 496fe633d89..00000000000 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_refactor.cpp +++ /dev/null @@ -1,58 +0,0 @@ -#include "barretenberg/common/mem.hpp" -#include "barretenberg/common/serialize.hpp" -#include "c_bind.hpp" -#include "pedersen_refactor.hpp" - -extern "C" { - -WASM_EXPORT void pedersen_hash_pair(uint8_t const* left, uint8_t const* right, uint8_t* result) -{ - auto lhs = barretenberg::fr::serialize_from_buffer(left); - auto rhs = barretenberg::fr::serialize_from_buffer(right); - auto r = crypto::pedersen_hash_refactor::hash_multiple({ lhs, rhs }); - barretenberg::fr::serialize_to_buffer(r, result); -} - -WASM_EXPORT void pedersen_hash_multiple(uint8_t const* inputs_buffer, uint8_t* output) -{ - std::vector to_compress; - read(inputs_buffer, to_compress); - auto r = crypto::pedersen_hash_refactor::hash_multiple(to_compress); - barretenberg::fr::serialize_to_buffer(r, output); -} - -WASM_EXPORT void pedersen_hash_multiple_with_hash_index(uint8_t const* inputs_buffer, - uint32_t const* hash_index, - uint8_t* output) -{ - std::vector to_compress; - read(inputs_buffer, to_compress); - auto r = crypto::pedersen_hash_refactor::hash_multiple(to_compress, ntohl(*hash_index)); - barretenberg::fr::serialize_to_buffer(r, output); -} - -/** - * Given a buffer containing 32 byte pedersen leaves, return a new buffer containing the leaves and all pairs of - * nodes that define a merkle tree. - * e.g. - * input: [1][2][3][4] - * output: [1][2][3][4][compress(1,2)][compress(3,4)][compress(5,6)] - */ -WASM_EXPORT void pedersen_hash_to_tree(fr::vec_in_buf data, fr::vec_out_buf out) -{ - auto fields = from_buffer>(data); - auto num_outputs = fields.size() * 2 - 1; - fields.reserve(num_outputs); - - for (size_t i = 0; fields.size() < num_outputs; i += 2) { - fields.push_back(crypto::pedersen_hash_refactor::hash_multiple({ fields[i], fields[i + 1] })); - } - - auto buf_size = 4 + num_outputs * sizeof(grumpkin::fq); - // TODO(@charlielye) Can we get rid of cppcoreguidelines-owning-memory warning here? - // NOLINTNEXTLINE(cppcoreguidelines-owning-memory, cppcoreguidelines-no-malloc) - *out = static_cast(malloc(buf_size)); - auto* dst = *out; - write(dst, fields); -} -} \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/ecc/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/ecc/CMakeLists.txt index 1133de8da90..35e543283ee 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/ecc/CMakeLists.txt @@ -1,4 +1,4 @@ -barretenberg_module(ecc numeric crypto_keccak) +barretenberg_module(ecc numeric crypto_keccak crypto_sha256) if(DISABLE_ADX) message(STATUS "Disabling ADX assembly variant.") diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp index f58e10ff50a..feae413938a 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp @@ -193,7 +193,7 @@ BasicTable table::generate_basic_fixed_base_table(BasicTableId id, size_t basic_ const bool is_small_table = (multitable_bits - bits_covered_by_previous_tables_in_multitable) < BITS_PER_TABLE; const size_t table_bits = is_small_table ? multitable_bits - bits_covered_by_previous_tables_in_multitable : BITS_PER_TABLE; - const size_t table_size = 1ULL << table_bits; + const size_t table_size = static_cast(1ULL << table_bits); BasicTable table; table.id = id; table.table_index = basic_table_index; From a29ebb32cc197ee561d77a34832ff6db8e2ffbb1 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Wed, 20 Sep 2023 15:03:50 +0000 Subject: [PATCH 20/50] compiler fox --- .../crypto/pedersen_commitment/pedersen_refactor.hpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp index 3afc8f8e06c..75f17d2d28f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp @@ -98,17 +98,16 @@ template class pedersen_commitment_refactor { using Fr = typename Curve::ScalarField; using Fq = typename Curve::BaseField; using Group = typename Curve::Group; - using generator_data = generator_data; static AffineElement commit_native( const std::vector& inputs, size_t hash_index = 0, - const generator_data* generator_context = generator_data::get_default_generators()); + const generator_data* generator_context = generator_data::get_default_generators()); static AffineElement commit_native( const std::vector& inputs, size_t hash_index = 0, - const generator_data* generator_context = generator_data::get_default_generators()); + const generator_data* generator_context = generator_data::get_default_generators()); }; extern template class pedersen_commitment_refactor; From 79b17e18863f88c77509cab5f964e55c60290bb3 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Wed, 20 Sep 2023 15:12:09 +0000 Subject: [PATCH 21/50] compiler fox --- .../crypto/pedersen_commitment/pedersen_refactor.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp index 52021bf6e8e..e9d0bd254f3 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp @@ -23,7 +23,7 @@ namespace crypto { */ template typename Curve::AffineElement pedersen_commitment_refactor::commit_native( - const std::vector& inputs, const size_t hash_index, const generator_data* const generator_context) + const std::vector& inputs, const size_t hash_index, const generator_data* const generator_context) { const auto generators = generator_context->conditional_extend(inputs.size() + hash_index); Element result = Group::point_at_infinity; @@ -49,7 +49,7 @@ typename Curve::AffineElement pedersen_commitment_refactor::commit_native */ template typename Curve::AffineElement pedersen_commitment_refactor::commit_native( - const std::vector& inputs, const size_t hash_index, const generator_data* const generator_context) + const std::vector& inputs, const size_t hash_index, const generator_data* const generator_context) { const auto generators = generator_context->conditional_extend(inputs.size() + hash_index); Element result = Group::point_at_infinity; From a0c9f924e97d723d4de893108f8ff8383eafb754 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Wed, 20 Sep 2023 15:23:34 +0000 Subject: [PATCH 22/50] compiler fix --- .../barretenberg/stdlib/primitives/group/cycle_group.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index 16430d917d9..4316ee76fa7 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -27,9 +27,9 @@ concept IsNotUltraArithmetic = (Composer::CIRCUIT_TYPE != CircuitType::ULTRA); */ template class cycle_group { public: - using field_t = field_t; - using bool_t = bool_t; - using witness_t = witness_t; + using field_t = stdlib::field_t; + using bool_t = stdlib::bool_t; + using witness_t = stdlib::witness_t; using FF = typename Composer::FF; using Curve = typename Composer::EmbeddedCurve; using Group = typename Curve::Group; From 5e7a4d56812a55ad639d5ae6d367cfecd2c66726 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Wed, 20 Sep 2023 15:28:33 +0000 Subject: [PATCH 23/50] compiler fix --- .../barretenberg/stdlib/primitives/group/cycle_group.test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index e2da96f6c92..ae682b7e379 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -24,7 +24,7 @@ namespace { auto& engine = numeric::random::get_debug_engine(); } #pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-local-typedef" +#pragma GCC diagnostic ignored "-Wunused-local-typedefs" template class CycleGroupTest : public ::testing::Test { public: From 8b58e39d0faad284c28a52700d72cd56374c4616 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 21 Sep 2023 10:44:24 +0000 Subject: [PATCH 24/50] reverted schnorr --- .../stdlib/encryption/schnorr/schnorr.cpp | 297 ++++++++++++++++-- .../stdlib/encryption/schnorr/schnorr.hpp | 21 +- .../encryption/schnorr/schnorr.test.cpp | 157 +++++++++ 3 files changed, 446 insertions(+), 29 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.cpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.cpp index 97435809dfd..52f7e698d4f 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.cpp @@ -1,13 +1,69 @@ #include "schnorr.hpp" #include "barretenberg/crypto/pedersen_commitment/pedersen.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" +#include "barretenberg/stdlib/commitment/pedersen/pedersen.hpp" #include "barretenberg/stdlib/hash/blake2s/blake2s.hpp" -#include "barretenberg/stdlib/hash/pedersen/pedersen_refactor.hpp" -#include "barretenberg/stdlib/primitives/bigfield/bigfield.hpp" -#include "barretenberg/stdlib/primitives/group/cycle_group.hpp" #include -namespace proof_system::plonk::stdlib::schnorr { +namespace proof_system::plonk { +namespace stdlib { +namespace schnorr { + +/** + * @brief Expand a 128-bits integer in a form amenable to doing elliptic curve arithmetic in circuits. + * + * @details The output wnaf_record records the expansion coefficients + * limb % 129 = 2^128 + 2^127 w_1 + ... + 2 w_127 + w_128 - skew + * where each w_i lies in {-1, 1} and skew is 0 or 1. The boolean `skew` could also be called `is_even`; the even + * 129-bit non-negative integers are those with skew == 1, while the odd ones have skew==0. + * + * @warning While it is possible to express any 129-bit value in this form, this function only works correctly + * on 128-bit values, since the same is true for fixed_wnaf<129, 1, 1>. This is illusrated in the tests. + * + * + * TurboPLONK: ~260 gates. + */ +template wnaf_record convert_field_into_wnaf(C* context, const field_t& limb) +{ + constexpr size_t num_wnaf_bits = 129; + uint256_t value = limb.get_value(); + + bool skew = false; + uint64_t wnaf_entries[129] = { 0 }; + + // compute wnaf representation of value natively + barretenberg::wnaf::fixed_wnaf(&value.data[0], &wnaf_entries[0], skew, 0); + + std::vector> wnaf_bits; + bool_t wnaf_skew(witness_t(context, skew)); + field_t two(context, 2); + field_t one(context, 1); + field_t accumulator(context, 1); + + // set accumulator = 2^{128} + \sum_{i=0}^{127} 2^i w_{128-i}, where w_i = 2 * wnaf_entries[i+1] - 1 + for (size_t i = 0; i < 128; ++i) { + // accumulator = 2 * accumulator + 1 (resp. -1) if the 32nd bit of wnaf_entries[i+1] is 0 (resp. 1). + + // extract sign bit of wnaf_entries[i+1] (32nd entry in list of bits) + uint64_t predicate = (wnaf_entries[i + 1] >> 31U) & 1U; + // type of !predicate below is bool + bool_t wnaf_bit = witness_t(context, !predicate); + wnaf_bits.push_back(wnaf_bit); + + // !predicate == false ~> -1; true ~> +1 + accumulator = accumulator + accumulator; + accumulator = accumulator + (field_t(wnaf_bit) * two - one); + } + + // subtract 1 from accumulator if there is skew + accumulator = accumulator - field_t(wnaf_skew); + + accumulator.assert_equal(limb); + wnaf_record result; + result.bits = wnaf_bits; + result.skew = wnaf_skew; + return result; +} /** * @brief Instantiate a witness containing the signature (s, e) as a quadruple of @@ -15,48 +71,233 @@ namespace proof_system::plonk::stdlib::schnorr { */ template signature_bits convert_signature(C* context, const crypto::schnorr::signature& signature) { - using cycle_scalar = typename cycle_group::cycle_scalar; + signature_bits sig{ + field_t(), + field_t(), + field_t(), + field_t(), + }; uint256_t s_bigint(0); uint256_t e_bigint(0); - const uint8_t* s_ptr = &signature.s[0]; - const uint8_t* e_ptr = &signature.e[0]; - numeric::read(s_ptr, s_bigint); - numeric::read(e_ptr, e_bigint); - signature_bits sig{ .s = cycle_scalar::from_witness_bitstring(context, s_bigint, 256), - .e = cycle_scalar::from_witness_bitstring(context, e_bigint, 256) }; + + for (size_t i = 0; i < 32; ++i) { + for (size_t j = 7; j < 8; --j) { + uint8_t s_shift = static_cast(signature.s[i] >> j); + uint8_t e_shift = static_cast(signature.e[i] >> j); + bool s_bit = (s_shift & 1U) == 1U; + bool e_bit = (e_shift & 1U) == 1U; + s_bigint += s_bigint; + e_bigint += e_bigint; + + s_bigint += static_cast(s_bit); + e_bigint += static_cast(e_bit); + } + } + + sig.s_lo = witness_t(context, s_bigint.slice(0, 128)); + sig.s_hi = witness_t(context, s_bigint.slice(128, 256)); + sig.e_lo = witness_t(context, e_bigint.slice(0, 128)); + sig.e_hi = witness_t(context, e_bigint.slice(128, 256)); + return sig; } +/** + * @brief Compute [(low_bits + 2^128 high_bits)]pub_key. + * + * @details This method cannot handle the case where either of low_bits, high_bits is zero. + * This assumption is backed by a constraint (see the tests for an illustration). + */ +template +point variable_base_mul(const point& pub_key, const field_t& low_bits, const field_t& high_bits) +{ + C* context = pub_key.x.context; + + // N.B. this method does not currently work if low_bits == 0 or high_bits == 0 + field_t zero_test = (low_bits * high_bits); + zero_test.assert_is_not_zero(); + + const auto low_wnaf = stdlib::schnorr::convert_field_into_wnaf(context, low_bits); + const auto high_wnaf = stdlib::schnorr::convert_field_into_wnaf(context, high_bits); + // current_accumulator is pub_key, so init is true, so high_output is [high_wnaf]pub_key + point high_output = stdlib::schnorr::variable_base_mul(pub_key, pub_key, high_wnaf); + // compute output = [low_wnaf]pub_key + [2^128]high_output. + point output = stdlib::schnorr::variable_base_mul(pub_key, high_output, low_wnaf); + return output; +} + +/** + * @brief Multiply a point of Grumpkin by a scalar described as a wnaf record, possibly offsetting by another point. + * + * @param pub_key A point of Grumpkin known to the prover in terms of the generator grumpkin::g1::one. + * @param current_accumulator A point of the curve that will remain unchanged. + * @param wnaf A wnaf_record, a collection of bool_t's typically recording an expansion of an element of + * field_t in the form 2^{128} + 2^{127} w_1 + ... + 2 w_127 + w_128 - skew. + * + * @details Let W be the scalar represented by wnaf. If pub_key = ± current_accumulator, this function returns + * [W]pub_key. Otherwise, it returns [W]pub_key + [2^128]current_accumulator. These two cases are distinguished + * between a boolean `init`. The idea here is that, if `pub_key==±current_accumulator`, then the function is being + * called for the first time. + * + * @warning This function should not be used on its own, as its security depends on the manner in which it is + * expected to be used. + */ +template +point variable_base_mul(const point& pub_key, const point& current_accumulator, const wnaf_record& wnaf) +{ + // Check if the pub_key is a points on the curve. + pub_key.on_curve(); + + // The account circuit constrains `pub_key` to lie on Grumpkin. Presently, the only values that are passed in the + // second argument as `current_accumulator` are `pub_key` and a point which is the output of the present function. + // We therefore assume that `current_accumulator` lies on Grumpkin as well. + grumpkin::g1::affine_element pub_key_native(pub_key.x.get_value(), pub_key.y.get_value()); + grumpkin::g1::affine_element current_accumulator_native(current_accumulator.x.get_value(), + current_accumulator.y.get_value()); + + field_t two(pub_key.x.context, 2); + + // Various elliptic curve point additions that follow assume that the two points are distinct and not mutually + // inverse. collision_offset is chosen to prevent a malicious prover from exploiting this assumption. + grumpkin::g1::affine_element collision_offset = crypto::generators::get_generator_data(DEFAULT_GEN_1).generator; + grumpkin::g1::affine_element collision_end = collision_offset * grumpkin::fr(uint256_t(1) << 129); + + const bool init = current_accumulator.x.get_value() == pub_key.x.get_value(); + + // if init == true, check pub_key != collision_offset (ruling out 3 other points at the same time), + // if init == false we assume this has already been checked in an earlier call wherein init==true. + if (init) { + field_t zero_test = ((pub_key.x - collision_offset.x) * (pub_key.y - collision_offset.y)); + zero_test.assert_is_not_zero("pub_key and collision_offset have a coordinate in common."); + } else { + // Check if the current_accumulator is a point on the curve only if init is false. + current_accumulator.on_curve(); + } + + point accumulator{ collision_offset.x, collision_offset.y }; + + /* + * Let w_i = 2 wnaf.bits[i-1] - 1 for i = 1, ..., 128. + * The integer represented by the digits w_i and a skew bit `skew` in {0, 1} is + * W := 2^{128} + 2^{127} w_1 + ... + 2 w_127 + w_128 - skew + * = 2^{128} + \sum_{k=0}^{127}2^{k}w_{128-k} - skew. + * When init == true, the for loop that follows sets + * accumulator = [W+skew]pub_key + [2^{129}]collision_offset + * When init == false, the for loop that follows sets + * accumulator = [W+skew]pub_key + [2^{129}]collision_offset + [2^{128}]current_accumulator. + * We describe the accumulation process in the loop. + * + * Defining w_{-1} = 0, W_{0} = 1, and W_{i+1} = 2 W_{i} + w_i for i = 1, ..., 128, we have + * W_1 = 2 + w_0 + * W_2 = 4 + 2 w_0 + w_1 + * W_i = 2^i + 2^{i-1} w_0 + ... + 2 w_{i-2} + w_{i-1} + * W_128 = W + skew + * + * Let A_0 = collision_offset. For i = 0, ..., 127, let + * A_{i+1} = 2^{i+1} collision_offset + [W_{i}]pub_key and A'_{i+1} = A_{i+1} + [2^{i}]current_accumulator. + * Suppose we are at the end of the loop with loop variable i. + * - If `init==true`, then the value of `accumulator` is A_{i+i}. + * - If `init==false`, then the value of `accumulator` is A'_{i+1}. + * In both cases, setting the final accumulator value is that claimed above. + * + * Note that all divisons are safe, i.e., failing contsraints will be imposed if any denominator is zero. + */ + for (size_t i = 0; i < 129; ++i) { + if (!init && i == 1) { + // set accumulator = accumulator + current_accumulator. + field_t x1 = accumulator.x; + field_t y1 = accumulator.y; + + field_t x2 = current_accumulator.x; + field_t y2 = current_accumulator.y; + + field_t lambda1 = (y2 - y1) / (x2 - x1); + field_t x3 = lambda1.madd(lambda1, -(x2 + x1)); + field_t y3 = lambda1.madd((x1 - x3), -y1); + accumulator.x = x3; + accumulator.y = y3; + } + + // if i == 0: set accumulator = [2]accumulator + pub_key + // otherwise, set accumulator = [2]accumulator + [w_i]pub_key. + + // // Set P_3 = accumulator + pub_key or P_3 = accumulator - pub_key, depending on the current wnaf bit. + + field_t x1 = accumulator.x; + field_t y1 = accumulator.y; + + field_t x2 = (i == 0) ? pub_key.x : pub_key.x; + field_t y2 = (i == 0) ? pub_key.y : pub_key.y.madd(field_t(wnaf.bits[i - 1]) * two, -pub_key.y); + field_t lambda1 = (y2 - y1) / (x2 - x1); + field_t x3 = lambda1.madd(lambda1, -(x2 + x1)); + + // // Set P_4 = P_3 + accumulator. + // // We save gates by not using the formula lambda2 = (y3 - y1) / (x3 - x1), which would require computing + // // y_3. Instead we use another formula for lambda2 derived using the substitution y3 = lambda1(x1 - x3) - y1. + field_t lambda2 = -lambda1 - (y1 * two) / (x3 - x1); + field_t x4 = lambda2.madd(lambda2, -(x3 + x1)); + field_t y4 = lambda2.madd(x1 - x4, -y1); + + accumulator.x = x4; + accumulator.y = y4; + } + + // At this point, accumulator is [W + skew]pub + [2^{129}]collision_mask. + // If wnaf_skew, subtract pub_key frorm accumulator. + field_t add_lambda = (accumulator.y + pub_key.y) / (accumulator.x - pub_key.x); + field_t x_add = add_lambda.madd(add_lambda, -(accumulator.x + pub_key.x)); + field_t y_add = add_lambda.madd((pub_key.x - x_add), pub_key.y); + bool_t add_predicate = wnaf.skew; + accumulator.x = ((x_add - accumulator.x).madd(field_t(add_predicate), accumulator.x)); + accumulator.y = ((y_add - accumulator.y).madd(field_t(add_predicate), accumulator.y)); + + // subtract [2^{129}]collision_offset from accumulator. + point collision_mask{ collision_end.x, -collision_end.y }; + + field_t lambda = (accumulator.y - collision_mask.y) / (accumulator.x - collision_mask.x); + field_t x3 = lambda.madd(lambda, -(collision_mask.x + accumulator.x)); + field_t y3 = lambda.madd(collision_mask.x - x3, -collision_mask.y); + + accumulator.x = x3; + accumulator.y = y3; + return accumulator; +} + /** * @brief Make the computations needed to verify a signature (s, e), i.e., compute * e' = hash(([s]g + [e]pub).x | message) and return e'. * - * @details UltraPlonk: ~5018 gates, excluding gates required to init the UltraPlonk range check - * (~1,169k for fixed/variable_base_mul, ~4k for blake2s) for a string of length = 34. + * @details TurboPlonk: ~10850 gates (~4k for variable_base_mul, ~6k for blake2s) for a string of length < 32. */ template std::array, 2> verify_signature_internal(const byte_array& message, const point& pub_key, const signature_bits& sig) { - cycle_group key(pub_key.x, pub_key.y, false); - cycle_group g1(grumpkin::g1::one); - // compute g1 * sig.s + key * sig,e + // Compute [s]g, where s = (s_lo, s_hi) and g = G1::one. + point R_1 = group::fixed_base_scalar_mul(sig.s_lo, sig.s_hi); + // Compute [e]pub, where e = (e_lo, e_hi) + point R_2 = variable_base_mul(pub_key, sig.e_lo, sig.e_hi); + + // check R_1 != R_2 + (R_1.x - R_2.x).assert_is_not_zero("Cannot add points in Schnorr verification."); + // Compute x-coord of R_1 + R_2 = [s]g + [e]pub. + field_t lambda = (R_1.y - R_2.y) / (R_1.x - R_2.x); + field_t x_3 = lambda * lambda - (R_1.x + R_2.x); - auto x_3 = cycle_group::batch_mul({ sig.s, sig.e }, { g1, key }).x; // build input (pedersen(([s]g + [e]pub).x | pub.x | pub.y) | message) to hash function // pedersen hash ([r].x | pub.x) to make sure the size of `hash_input` is <= 64 bytes for a 32 byte message - byte_array hash_input(stdlib::pedersen_hash_refactor::hash({ x_3, key.x, key.y })); + byte_array hash_input(stdlib::pedersen_commitment::compress({ x_3, pub_key.x, pub_key.y })); hash_input.write(message); // compute e' = hash(([s]g + [e]pub).x | message) byte_array output = blake2s(hash_input); - static constexpr size_t LO_BYTES = cycle_group::cycle_scalar::LO_BITS / 8; - static constexpr size_t HI_BYTES = 32 - LO_BYTES; - field_t output_hi(output.slice(0, LO_BYTES)); - field_t output_lo(output.slice(LO_BYTES, HI_BYTES)); + + field_t output_hi(output.slice(0, 16)); + field_t output_lo(output.slice(16, 16)); + return { output_lo, output_hi }; } @@ -70,8 +311,8 @@ template void verify_signature(const byte_array& message, const point& pub_key, const signature_bits& sig) { auto [output_lo, output_hi] = verify_signature_internal(message, pub_key, sig); - output_lo.assert_equal(sig.e.lo, "verify signature failed"); - output_hi.assert_equal(sig.e.hi, "verify signature failed"); + output_lo.assert_equal(sig.e_lo, "verify signature failed"); + output_hi.assert_equal(sig.e_hi, "verify signature failed"); } /** @@ -85,12 +326,16 @@ bool_t signature_verification_result(const byte_array& message, const signature_bits& sig) { auto [output_lo, output_hi] = verify_signature_internal(message, pub_key, sig); - bool_t valid = (output_lo == sig.e.lo) && (output_hi == sig.e.hi); + bool_t valid = (output_lo == sig.e_lo) && (output_hi == sig.e_hi); return valid; } +INSTANTIATE_STDLIB_METHOD(VARIABLE_BASE_MUL) +INSTANTIATE_STDLIB_METHOD(CONVERT_FIELD_INTO_WNAF) INSTANTIATE_STDLIB_METHOD(VERIFY_SIGNATURE_INTERNAL) INSTANTIATE_STDLIB_METHOD(VERIFY_SIGNATURE) INSTANTIATE_STDLIB_METHOD(SIGNATURE_VERIFICATION_RESULT) INSTANTIATE_STDLIB_METHOD(CONVERT_SIGNATURE) -} // namespace proof_system::plonk::stdlib::schnorr +} // namespace schnorr +} // namespace stdlib +} // namespace proof_system::plonk diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.hpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.hpp index fdff62ea9c0..082799a8ac6 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.hpp @@ -6,17 +6,30 @@ #include "../../primitives/point/point.hpp" #include "../../primitives/witness/witness.hpp" #include "barretenberg/crypto/schnorr/schnorr.hpp" -#include "barretenberg/stdlib/primitives/group/cycle_group.hpp" namespace proof_system::plonk { namespace stdlib { namespace schnorr { template struct signature_bits { - typename cycle_group::cycle_scalar s; - typename cycle_group::cycle_scalar e; + field_t s_lo; + field_t s_hi; + field_t e_lo; + field_t e_hi; }; +template struct wnaf_record { + std::vector> bits; + bool_t skew; +}; + +template wnaf_record convert_field_into_wnaf(C* context, const field_t& limb); + +template +point variable_base_mul(const point& pub_key, const point& current_accumulator, const wnaf_record& scalar); +template +point variable_base_mul(const point& pub_key, const field_t& low_bits, const field_t& high_bits); + template signature_bits convert_signature(C* context, const crypto::schnorr::signature& sig); template @@ -55,6 +68,8 @@ bool_t signature_verification_result(const byte_array& message, #define CONVERT_SIGNATURE(circuit_type) \ signature_bits convert_signature(circuit_type*, const crypto::schnorr::signature&) +EXTERN_STDLIB_METHOD(VARIABLE_BASE_MUL) +EXTERN_STDLIB_METHOD(CONVERT_FIELD_INTO_WNAF) EXTERN_STDLIB_METHOD(VERIFY_SIGNATURE_INTERNAL) EXTERN_STDLIB_METHOD(VERIFY_SIGNATURE) EXTERN_STDLIB_METHOD(SIGNATURE_VERIFICATION_RESULT) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp index d851b2fcd63..9e7110e844c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/schnorr/schnorr.test.cpp @@ -19,6 +19,163 @@ using field_ct = field_t; using point_ct = point; using witness_ct = witness_t; +auto run_scalar_mul_test = [](grumpkin::fr scalar_mont, bool expect_verify) { + Composer composer = Composer(); + + grumpkin::fr scalar = scalar_mont.from_montgomery_form(); + + uint256_t scalar_low{ scalar.data[0], scalar.data[1], 0ULL, 0ULL }; + uint256_t scalar_high{ scalar.data[2], scalar.data[3], 0ULL, 0ULL }; + + field_ct input_lo = witness_ct(&composer, scalar_low); + field_ct input_hi = witness_ct(&composer, scalar_high); + + grumpkin::g1::element expected = grumpkin::g1::one * scalar_mont; + expected = expected.normalize(); + point_ct point_input{ witness_ct(&composer, grumpkin::g1::affine_one.x), + witness_ct(&composer, grumpkin::g1::affine_one.y) }; + + point_ct output = variable_base_mul(point_input, input_lo, input_hi); + + if (expect_verify) { + EXPECT_EQ(output.x.get_value(), expected.x); + EXPECT_EQ(output.y.get_value(), expected.y); + }; + + info("composer gates = ", composer.get_num_gates()); + + bool result = composer.check_circuit(); + EXPECT_EQ(result, expect_verify); +}; + +typedef wnaf_record wnaf_record_ct; + +/** + * @brief Helper function to compare wnaf_records, useful since == on bool_ct's returns a bool_ct. + */ +bool compare_records(wnaf_record_ct a, wnaf_record_ct b) +{ + bool result = a.skew.witness_bool == b.skew.witness_bool; + if (result) { + for (size_t i = 0; i != a.bits.size(); ++i) { + bool a_bit = a.bits[i].witness_bool; + bool b_bit = b.bits[i].witness_bool; + result = result == false ? false : a_bit == b_bit; + } + } + return result; +} + +TEST(stdlib_schnorr, convert_field_into_wnaf_special) +{ + Composer composer = Composer(); + + // the wnaf_record ((b_1, ... b_128), skew) corresponding to the 129-bit non-negative value + // is, 2^128 + 2^127 w_1 + ... + 2 w_127 + w_128 - skew, where w_i = 1 if b_i is true, else -1.. + // We make some auxiliary wnaf records that will be helpful. + std::vector false128(128, false); + wnaf_record_ct all_false({ .bits = false128, .skew = false }); + + std::vector true128(128, true); + wnaf_record_ct all_true({ .bits = true128, .skew = true }); + + // establish a list of special values to be converted to a wnaf_record + std::vector special_values({ 1, + 0, + (static_cast(1) << 128) - 1, + (static_cast(1) << 128) + 1, + (static_cast(1) << 128), + (static_cast(1) << 129) - 1 }); + + size_t num_special_values(special_values.size()); + + // convert these values to field elements + std::vector special_field_elts(num_special_values); + for (size_t i = 0; i != num_special_values; ++i) { + field_ct a(special_values[i]); + special_field_elts[i] = a; + }; + + // manually build the expected wnaf records + // 1 is given by ((false, ..., false), false) + auto record_1 = all_false; + + // 0 is given by ((false, ..., false), true) + auto record_0 = all_false; + record_0.skew = true; + + // 2^128 - 1 = 2^128 - 2^127 + (2^127 - 1) - 0 is given by((false, true, ..., true), false) + auto record_128_minus_1 = all_true; + record_128_minus_1.bits[0] = false; + record_128_minus_1.skew = false; + + // 2^128 + 1 = 2^128 + (2^127 - (2^127 - 1)) - 0 is given by((true, false, false, ..., false), false) + auto record_128_plus_1 = all_false; + record_128_plus_1.bits[0] = true; + + // 2^128 = 2^128 + (2^127 - (2^127 - 1)) - 1 is given by((true, false, false, ..., false), true) + auto record_128 = all_false; + record_128.bits[0] = true; + record_128.skew = true; + + // // 2^129-1 = 2^128 + 2^127 + ... + 1 - 0 should be given by ((true, true, ..., true), false). + // Note: fixed_wnaf<129, 1, 1>, used inside of convert_field_into_wnaf, incorrectly computes the the coefficient + // of + // 2^127 in the wnaf representation of to be -1. + auto record_max = all_true; + record_max.skew = false; + + std::vector expected_wnaf_records( + { record_1, record_0, record_128_minus_1, record_128_plus_1, record_128, record_max }); + + // integers less than 2^128 are converted correctly + for (size_t i = 0; i != num_special_values; ++i) { + field_ct elt = special_field_elts[i]; + wnaf_record_ct record = convert_field_into_wnaf(&composer, elt); + wnaf_record_ct expected_record = expected_wnaf_records[i]; + bool records_equal = compare_records(record, expected_record); + ASSERT_TRUE(records_equal); + ASSERT_FALSE(composer.failed()); + } +} + +TEST(stdlib_schnorr, convert_field_into_wnaf) +{ + Composer composer = Composer(); + + grumpkin::fq scalar_mont = grumpkin::fq::random_element(); + grumpkin::fq scalar = scalar_mont.from_montgomery_form(); + + // our wnaf records only represent 128 bits, so we test by generating a field + // element and then truncating. + scalar.data[2] = 0ULL; + scalar.data[3] = 0ULL; + + scalar = scalar.to_montgomery_form(); + + field_ct input(&composer, scalar); + convert_field_into_wnaf(&composer, input); + + info("composer gates = ", composer.get_num_gates()); + + bool result = composer.check_circuit(); + EXPECT_EQ(result, true); +} + +/** + * @brief Test variable_base_mul(const point& pub_key, + * const field_t& low_bits, + * const field_t& high_bits) + * by taking a random field Fr element s, computing the corresponding Grumpkin G1 element both natively + * and using the function in question (splitting s into 128-bit halves), then comparing the results. + */ +TEST(stdlib_schnorr, test_scalar_mul_low_high) +{ + run_scalar_mul_test(grumpkin::fr::random_element(), true); + run_scalar_mul_test(grumpkin::fr(static_cast(1) << 128), false); + run_scalar_mul_test(0, false); +} + /** * @test Test circuit verifying a Schnorr signature generated by \see{crypto::schnorr::verify_signature}. * We only test: messages signed and verified using Grumpkin and the BLAKE2s hash function. We only test From 87ff132e520a887024067110b8467b83fe55b89c Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 21 Sep 2023 10:47:23 +0000 Subject: [PATCH 25/50] compiler fix --- .../barretenberg/stdlib/primitives/group/cycle_group.cpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index b33b51819d3..5bd96646e21 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -996,14 +996,6 @@ typename cycle_group::batch_mul_internal_output cycle_group: { ASSERT(scalars.size() == base_points.size()); - Composer* context = nullptr; - for (auto& scalar : scalars) { - if (scalar.get_context() != nullptr) { - context = scalar.get_context(); - break; - } - } - const size_t num_points = base_points.size(); using MultiTableId = plookup::MultiTableId; using ColumnIdx = plookup::ColumnIdx; From 923650692f4ad66421d4983a9b8525e9ea800006 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 21 Sep 2023 10:50:03 +0000 Subject: [PATCH 26/50] revert pedersen c_bind --- .../crypto/pedersen_hash/c_bind_new.cpp | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_new.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_new.cpp index 1c75d6f6ae3..0c0d506112c 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_new.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/c_bind_new.cpp @@ -1,20 +1,23 @@ #include "barretenberg/common/mem.hpp" #include "barretenberg/common/serialize.hpp" #include "c_bind.hpp" -#include "pedersen_refactor.hpp" +#include "pedersen.hpp" +#include "pedersen_lookup.hpp" extern "C" { WASM_EXPORT void pedersen_hash_init() { - // TODO delete + // TODO: do we need this if we are using lookup-pedersen in merkle trees? + crypto::generators::init_generator_data(); + crypto::pedersen_hash::lookup::init(); } WASM_EXPORT void pedersen_hash_pair(uint8_t const* left, uint8_t const* right, uint8_t* result) { auto lhs = barretenberg::fr::serialize_from_buffer(left); auto rhs = barretenberg::fr::serialize_from_buffer(right); - auto r = crypto::pedersen_hash_refactor::hash_multiple({ lhs, rhs }); + auto r = crypto::pedersen_hash::lookup::hash_multiple({ lhs, rhs }); barretenberg::fr::serialize_to_buffer(r, result); } @@ -22,7 +25,7 @@ WASM_EXPORT void pedersen_hash_multiple(uint8_t const* inputs_buffer, uint8_t* o { std::vector to_compress; read(inputs_buffer, to_compress); - auto r = crypto::pedersen_hash_refactor::hash_multiple(to_compress); + auto r = crypto::pedersen_hash::lookup::hash_multiple(to_compress); barretenberg::fr::serialize_to_buffer(r, output); } @@ -32,7 +35,7 @@ WASM_EXPORT void pedersen_hash_multiple_with_hash_index(uint8_t const* inputs_bu { std::vector to_compress; read(inputs_buffer, to_compress); - auto r = crypto::pedersen_hash_refactor::hash_multiple(to_compress, ntohl(*hash_index)); + auto r = crypto::pedersen_hash::lookup::hash_multiple(to_compress, ntohl(*hash_index)); barretenberg::fr::serialize_to_buffer(r, output); } @@ -42,6 +45,7 @@ WASM_EXPORT void pedersen_hash_multiple_with_hash_index(uint8_t const* inputs_bu * e.g. * input: [1][2][3][4] * output: [1][2][3][4][compress(1,2)][compress(3,4)][compress(5,6)] + * */ WASM_EXPORT void pedersen_hash_to_tree(fr::vec_in_buf data, fr::vec_out_buf out) { @@ -50,14 +54,9 @@ WASM_EXPORT void pedersen_hash_to_tree(fr::vec_in_buf data, fr::vec_out_buf out) fields.reserve(num_outputs); for (size_t i = 0; fields.size() < num_outputs; i += 2) { - fields.push_back(crypto::pedersen_hash_refactor::hash_multiple({ fields[i], fields[i + 1] })); + fields.push_back(crypto::pedersen_hash::lookup::hash_multiple({ fields[i], fields[i + 1] })); } - auto buf_size = 4 + num_outputs * sizeof(grumpkin::fq); - // TODO(@charlielye) Can we get rid of cppcoreguidelines-owning-memory warning here? - // NOLINTNEXTLINE(cppcoreguidelines-owning-memory, cppcoreguidelines-no-malloc) - *out = static_cast(malloc(buf_size)); - auto* dst = *out; - write(dst, fields); + *out = to_heap_buffer(fields); } } \ No newline at end of file From c899d900527cc59452ea3d75199c2d45e5b3a32c Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 21 Sep 2023 10:54:16 +0000 Subject: [PATCH 27/50] revert crypto/schnorr --- barretenberg/cpp/src/barretenberg/crypto/schnorr/schnorr.tcc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/schnorr/schnorr.tcc b/barretenberg/cpp/src/barretenberg/crypto/schnorr/schnorr.tcc index e8200a45e82..6984479398e 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/schnorr/schnorr.tcc +++ b/barretenberg/cpp/src/barretenberg/crypto/schnorr/schnorr.tcc @@ -1,7 +1,7 @@ #pragma once #include "barretenberg/crypto/hmac/hmac.hpp" -#include "barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp" +#include "barretenberg/crypto/pedersen_commitment/pedersen.hpp" #include "schnorr.hpp" @@ -43,7 +43,7 @@ static auto generate_schnorr_challenge(const std::string& message, { using Fq = typename G1::coordinate_field; // create challenge message pedersen_commitment(R.x, pubkey) - Fq compressed_keys = crypto::pedersen_hash_refactor::hash({ R.x, pubkey.x, pubkey.y }); + Fq compressed_keys = crypto::pedersen_commitment::compress_native({ R.x, pubkey.x, pubkey.y }); std::vector e_buffer; write(e_buffer, compressed_keys); std::copy(message.begin(), message.end(), std::back_inserter(e_buffer)); From 6c8adcc5b236239a809e7e3f2a98a088eb522512 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 22 Sep 2023 09:36:12 +0000 Subject: [PATCH 28/50] bugfix --- .../cpp/src/barretenberg/stdlib/primitives/field/field.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp index 733b8dd2534..86dea4d25b2 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp @@ -724,7 +724,8 @@ field_t field_t::conditional_assign(const bool return predicate.get_value() ? lhs : rhs; } // if lhs and rhs are the same witness, just return it! - if (lhs.get_witness_index() == rhs.get_witness_index()) { + if (lhs.get_witness_index() == rhs.get_witness_index() && (lhs.additive_constant == rhs.additive_constant) && + (lhs.multiplicative_constant == rhs.multiplicative_constant)) { return lhs; } return (lhs - rhs).madd(predicate, rhs); From f02c8ae8703cb46f1927acde00fc06bd227eaf94 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 11:06:27 +0000 Subject: [PATCH 29/50] Yeet. --- .../pedersen_commitment/pedersen_refactor.cpp | 40 +++++++------------ .../pedersen_commitment/pedersen_refactor.hpp | 5 --- 2 files changed, 14 insertions(+), 31 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp index e9d0bd254f3..cd044164241 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.cpp @@ -16,6 +16,9 @@ namespace crypto { * * @details This method uses `Curve::BaseField` members as inputs. This aligns with what we expect when creating * grumpkin commitments to field elements inside a BN254 SNARK circuit. + * + * @note Fq is the *coordinate field* of Curve. Curve itself is a SNARK-friendly curve, + * i.e. Fq represents the native field type of the SNARK circuit. * @param inputs * @param hash_index * @param generator_context @@ -28,36 +31,21 @@ typename Curve::AffineElement pedersen_commitment_refactor::commit_native const auto generators = generator_context->conditional_extend(inputs.size() + hash_index); Element result = Group::point_at_infinity; + // `Curve::Fq` represents the field that `Curve` is defined over (i.e. x/y coordinate field) and `Curve::Fr` is the + // field whose modulus = the group order of `Curve`. + // The `Curve` we're working over here is a generic SNARK-friendly curve. i.e. the SNARK circuit is defined over a + // field equivalent to `Curve::Fq`. This adds complexity when we wish to commit to SNARK circuit field elements, as + // these are members of `Fq` and *not* `Fr`. We cast to `uint256_t` in order to convert an element of `Fq` into an + // `Fr` element, which is the required type when performing scalar multiplications. + static_assert(Fr::modulus > Fq::modulus, + "pedersen_commitment::commit_native Curve subgroup field is smaller than coordinate field. Cannot " + "perform injective conversion"); for (size_t i = 0; i < inputs.size(); ++i) { - result += Element(generators.get(i, hash_index)) * static_cast(inputs[i]); + Fr scalar_multiplier(static_cast(inputs[i])); + result += Element(generators.get(i, hash_index)) * scalar_multiplier; } return result; } -/** - * @brief Given a vector of fields, generate a pedersen commitment using the indexed generators. - * - * @details This method uses `ScalarField` members as inputs. This aligns with what we expect for a "canonical" - * elliptic curve commitment function. However, when creating grumpkin commitments inside a BN254 SNARK crcuit it is not - * efficient to pack data into grumpkin::fr elements, as grumpkin::fq is the native field of BN254 circuits. - * - * @note This method is used currently for tests. If we find no downstream use for it by Jan 2024, delete! - * @param inputs - * @param hash_index - * @param generator_context - * @return Curve::AffineElement - */ -template -typename Curve::AffineElement pedersen_commitment_refactor::commit_native( - const std::vector& inputs, const size_t hash_index, const generator_data* const generator_context) -{ - const auto generators = generator_context->conditional_extend(inputs.size() + hash_index); - Element result = Group::point_at_infinity; - - for (size_t i = 0; i < inputs.size(); ++i) { - result += Element(generators.get(i, hash_index)) * (inputs[i]); - } - return result; -} template class pedersen_commitment_refactor; } // namespace crypto diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp index 75f17d2d28f..5fec5e24186 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/pedersen_refactor.hpp @@ -103,11 +103,6 @@ template class pedersen_commitment_refactor { const std::vector& inputs, size_t hash_index = 0, const generator_data* generator_context = generator_data::get_default_generators()); - - static AffineElement commit_native( - const std::vector& inputs, - size_t hash_index = 0, - const generator_data* generator_context = generator_data::get_default_generators()); }; extern template class pedersen_commitment_refactor; From 6d7553ebb88326c328d6fe00e0503eb095d64f84 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 20:34:18 +0000 Subject: [PATCH 30/50] remove oof --- .../plookup_tables/fixed_base/fixed_base_params.hpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp index c6f44c09346..2f69820961e 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base_params.hpp @@ -1,6 +1,7 @@ #pragma once #include "barretenberg/plonk/proof_system/constants.hpp" +#include #include #include #include @@ -56,6 +57,7 @@ struct FixedBaseParams { { return (num_bits / BITS_PER_TABLE) + ((num_bits % BITS_PER_TABLE == 0) ? 0 : 1); } + /** * @brief For a given multitable index, how many scalar mul bits are we traversing with our multitable? * @@ -65,9 +67,10 @@ struct FixedBaseParams { static constexpr size_t get_num_bits_of_multi_table(const size_t multitable_index) { ASSERT(multitable_index < NUM_FIXED_BASE_MULTI_TABLES); - // This...is very hacky. - const bool is_lo_multi_table = (multitable_index & 1) == 0; - return is_lo_multi_table ? BITS_PER_LO_SCALAR : BITS_PER_HI_SCALAR; + constexpr std::array MULTI_TABLE_BIT_LENGTHS{ + BITS_PER_LO_SCALAR, BITS_PER_HI_SCALAR, BITS_PER_LO_SCALAR, BITS_PER_HI_SCALAR + }; + return MULTI_TABLE_BIT_LENGTHS[multitable_index]; } }; } // namespace plookup \ No newline at end of file From e96faf1c472cb3ccb25169f8bc69632d688b6808 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 20:40:46 +0000 Subject: [PATCH 31/50] comment fixes --- .../barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp | 2 +- .../cpp/src/barretenberg/ecc/fields/field_declarations.hpp | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp index 0e8628d3b06..abd898cc326 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/pedersen_hash/pedersen_refactor.hpp @@ -15,7 +15,7 @@ namespace crypto { * * To hash to a size-n list of field elements `x`, we return the X-coordinate of: * - * Hash(x) = n.[h] + Commit(x) + * Hash(x) = n.[h] + x_0. [g_0] + x_1 . [g_1] +... + x_n . [g_n] * * Where `g` is a list of generator points defined by `generator_data` * And `h` is a unique generator whose domain separator is the string `pedersen_hash_length`. diff --git a/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp b/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp index cde5364aa61..4e2b292fa8c 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp @@ -89,6 +89,12 @@ template struct alignas(32) field { constexpr field(const uint64_t a, const uint64_t b, const uint64_t c, const uint64_t d) noexcept : data{ a, b, c, d } {}; + /** + * @brief Convert a 512-bit big integer into a field element. + * + * @details Used for deriving field elements from random values. 512-bits prevents biased output as 2^512>>modulus + * + */ constexpr explicit field(const uint512_t& input) noexcept { uint256_t value = (input % modulus).lo; From 56592400eb2513f33e2fffaac462d155a14f3975 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 20:41:53 +0000 Subject: [PATCH 32/50] comments --- .../cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp index 095b30b3d82..ffb19ae8c36 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp @@ -211,6 +211,8 @@ constexpr std::optional> affine_element::de auto [found_root, y] = yy.sqrt(); if (found_root) { + // This is for determinism; a different sqrt algorithm could give -y instead of y and so this parity check + // allows all algorithms to get the "same" y if (uint256_t(y).get_bit(0) != sign_bit) { y = -y; } From 730e7eea53f0c71aaf7e0eee230103b5ad3e74b1 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 20:47:53 +0000 Subject: [PATCH 33/50] fix --- .../ecc/groups/affine_element.hpp | 6 +- .../ecc/groups/affine_element_impl.hpp | 73 +++++++++---------- 2 files changed, 39 insertions(+), 40 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp index 9deba2d3569..0c7c33cb482 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp @@ -6,6 +6,8 @@ #include namespace barretenberg::group_elements { +template +concept SupportsHashToCurve = T::can_hash_to_curve; template class alignas(64) affine_element { public: using in_buf = const uint8_t*; @@ -85,8 +87,8 @@ template class alignas(64) affine_el template > static affine_element hash_to_curve(uint64_t seed) noexcept; - template > - static affine_element hash_to_curve(const std::vector& seed) noexcept; + static affine_element hash_to_curve(const std::vector& seed, uint8_t attempt_count = 0) noexcept + requires SupportsHashToCurve; constexpr bool operator==(const affine_element& other) const noexcept; diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp index ffb19ae8c36..e74938ff495 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp @@ -248,49 +248,46 @@ affine_element affine_element::hash_to_curve(uint64_t seed } template -template -affine_element affine_element::hash_to_curve(const std::vector& seed) noexcept +affine_element affine_element::hash_to_curve(const std::vector& seed, + uint8_t attempt_count) noexcept + requires SupportsHashToCurve + { std::vector target_seed(seed); - - // expand by 33 bytes to cover incremental hash attempts + // expand by 2 bytes to cover incremental hash attempts const size_t seed_size = seed.size(); - for (size_t i = 0; i < 33; ++i) { - target_seed.emplace_back(0); + for (size_t i = 0; i < 2; ++i) { + target_seed.push_back(0); } - uint16_t attempt_counter = 0; - - while (true) { - auto hi = static_cast(attempt_counter >> static_cast(8)); - auto lo = static_cast(attempt_counter & static_cast(0xff)); - target_seed[seed_size] = hi; - target_seed[seed_size + 1] = lo; - target_seed[target_seed.size() - 1] = 0; - std::array hash_hi = sha256::sha256(target_seed); - target_seed[target_seed.size() - 1] = 1; - std::array hash_lo = sha256::sha256(target_seed); - std::vector gg(hash_hi.begin(), hash_hi.end()); - std::vector ff(hash_lo.begin(), hash_lo.end()); - uint256_t x_lo = 0; - uint256_t x_hi = 0; - // uint8_t* f = &hash_lo[0]; - // uint8_t* g = &hash_hi[0]; - read(ff, x_lo); - read(gg, x_hi); - // numeric::read(*f, x_lo); - // numeric::read(*g, x_hi); - uint512_t x_full(x_lo, x_hi); - Fq x(x_full); - bool sign_bit = false; - sign_bit = x_hi.get_bit(0); - std::optional result = derive_from_x_coordinate(x, sign_bit); - - if (result.has_value()) { - return result.value(); - } - attempt_counter++; + target_seed[seed_size] = attempt_count; + target_seed.back() = 0; + const auto hash_hi = sha256::sha256(target_seed); + target_seed.back() = 1; + const auto hash_lo = sha256::sha256(target_seed); + // custom serialize methods as common/serialize.hpp is not constexpr + // (next PR will make this method constexpr) + const auto read_uint256 = [](const uint8_t* in) { + const auto read_limb = [](const uint8_t* in, uint64_t& out) { + for (size_t i = 0; i < 8; ++i) { + out += static_cast(in[i]) << ((7 - i) * 8); + } + }; + uint256_t out = 0; + read_limb(&in[0], out.data[3]); + read_limb(&in[8], out.data[2]); + read_limb(&in[16], out.data[1]); + read_limb(&in[24], out.data[0]); + return out; + }; + // interpret 64 byte hash output as a uint512_t, reduce to Fq element + //(512 bits of entropy ensures result is not biased as 512 >> Fq::modulus.get_msb()) + Fq x(uint512_t(read_uint256(&hash_lo[0]), read_uint256(&hash_hi[0]))); + bool sign_bit = hash_hi[0] > 127; + std::optional result = derive_from_x_coordinate(x, sign_bit); + if (result.has_value()) { + return result.value(); } - return affine_element(0, 0); + return hash_to_curve(seed, attempt_count + 1); } template From 0c7e95589c966d8ec65a7df7abdff1c9df09263e Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 20:52:06 +0000 Subject: [PATCH 34/50] fix --- barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp index 846b2fda55d..38286d9e805 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp @@ -136,8 +136,6 @@ template ((gen_idx >> 8) & mask); generator_preimage[35] = static_cast(gen_idx & mask); auto result = affine_element::hash_to_curve(generator_preimage); - ASSERT(result.x != 0); - ASSERT(result.y != 0); return result; } From f782338d792355b42c179501cb199e532885891b Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 20:52:24 +0000 Subject: [PATCH 35/50] fix --- barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp index 38286d9e805..a7eb24b92ea 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp @@ -135,8 +135,7 @@ template ((gen_idx >> 16) & mask); generator_preimage[34] = static_cast((gen_idx >> 8) & mask); generator_preimage[35] = static_cast(gen_idx & mask); - auto result = affine_element::hash_to_curve(generator_preimage); - return result; + return affine_element::hash_to_curve(generator_preimage); } BBERG_INLINE static void conditional_negate_affine(const affine_element* src, From 3db157697bd57d6b45d5e1f30679d96e4ee1c653 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 20:53:44 +0000 Subject: [PATCH 36/50] comments --- .../circuit_builder/ultra_circuit_builder.cpp | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp index 85dd86a8104..0eb7c274e8a 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp @@ -464,14 +464,10 @@ template void UltraCircuitBuilder_::create_ecc_add_gate(const } /** - * @brief Create an elliptic curve addition gate + * @brief Create an elliptic curve doubling gate * - * @details x and y are defined over scalar field. Addition can handle applying the curve endomorphism to one of the - * points being summed at the time of addition. * - * @param in Elliptic curve point addition gate parameters, including the the affine coordinates of the two points being - * added, the resulting point coordinates and the selector values that describe whether the endomorphism is used on the - * second point and whether it is negated. + * @param in Elliptic curve point doubling gate parameters */ template void UltraCircuitBuilder_::create_ecc_dbl_gate(const ecc_dbl_gate_& in) { From 664c8691378864c7eb663f967b4adf9e1d2d2443 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 20:54:17 +0000 Subject: [PATCH 37/50] fix --- .../circuit_builder/ultra_circuit_builder.cpp | 98 ------------------- 1 file changed, 98 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp index 0eb7c274e8a..25199d85959 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp @@ -524,104 +524,6 @@ template void UltraCircuitBuilder_::create_ecc_dbl_gate(const q_aux.emplace_back(0); ++this->num_gates; } -// /** -// * @brief Create a gate where we validate an elliptic curve point doubling -// * (x1, y1) * 2 = (x3, y3) -// * @tparam FF -// * @param in -// */ -// template void UltraCircuitBuilder_::create_ecc_dbl_gate(const ecc_dbl_gate_& in) -// { -// const auto x1 = this->get_variable(in.x1); -// const auto x3 = this->get_variable(in.x3); -// const auto y1 = this->get_variable(in.y1); - -// // lambda = 3x^2 / 2y -// const auto three_x1_sqr_v = x1 * x1 * 3; -// const auto three_x1_sqr = this->add_variable(three_x1_sqr_v); -// create_poly_gate({ -// .a = in.x1, -// .b = in.x1, -// .c = three_x1_sqr, -// .q_m = 3, -// .q_l = 0, -// .q_r = 0, -// .q_o = -1, -// .q_c = 0, -// }); -// const auto lambda_v = three_x1_sqr_v / (y1 + y1); -// const auto lambda = this->add_variable(lambda_v); -// create_poly_gate({ -// .a = lambda, -// .b = in.y1, -// .c = three_x1_sqr, -// .q_m = 2, -// .q_l = 0, -// .q_r = 0, -// .q_o = -1, -// .q_c = 0, -// }); - -// // lambda * lambda - 2x1 - x3 = 0 -// const auto lambda_sqr_v = lambda_v * lambda_v; -// const auto lambda_sqr = this->add_variable(lambda_sqr_v); -// create_poly_gate({ -// .a = lambda, -// .b = lambda, -// .c = lambda_sqr, -// .q_m = 1, -// .q_l = 0, -// .q_r = 0, -// .q_o = -1, -// .q_c = 0, -// }); -// create_poly_gate({ -// .a = lambda_sqr, -// .b = in.x1, -// .c = in.x3, -// .q_m = 0, -// .q_l = 1, -// .q_r = -2, -// .q_o = -1, -// .q_c = 0, -// }); - -// // lambda * (x1 - x3) - y1 = 0 -// const auto x1_sub_x3_v = x1 - x3; -// const auto x1_sub_x3 = this->add_variable(x1_sub_x3_v); -// create_poly_gate({ -// .a = in.x1, -// .b = in.x3, -// .c = x1_sub_x3, -// .q_m = 0, -// .q_l = 1, -// .q_r = -1, -// .q_o = -1, -// .q_c = 0, -// }); -// const auto lambda_mul_x1_sub_x3_v = lambda_v * x1_sub_x3_v; -// const auto lambda_mul_x1_sub_x3 = this->add_variable(lambda_mul_x1_sub_x3_v); -// create_poly_gate({ -// .a = lambda, -// .b = x1_sub_x3, -// .c = lambda_mul_x1_sub_x3, -// .q_m = 1, -// .q_l = 0, -// .q_r = 0, -// .q_o = -1, -// .q_c = 0, -// }); -// create_poly_gate({ -// .a = lambda_mul_x1_sub_x3, -// .b = in.y1, -// .c = in.y3, -// .q_m = 0, -// .q_l = 1, -// .q_r = -1, -// .q_o = -1, -// .q_c = 0, -// }); -// } /** * @brief Add a gate equating a particular witness to a constant, fixing it the value From be67530b500da82f6744bc394136999fed1fc2da Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 20:56:17 +0000 Subject: [PATCH 38/50] fix --- .../proof_system/circuit_builder/ultra_circuit_builder.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp index 25199d85959..0945eb39494 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp @@ -483,7 +483,6 @@ template void UltraCircuitBuilder_::create_ecc_dbl_gate(const can_fuse_into_previous_gate = can_fuse_into_previous_gate && (w_r[this->num_gates - 1] == in.x1); can_fuse_into_previous_gate = can_fuse_into_previous_gate && (w_o[this->num_gates - 1] == in.y1); - // q_elliptic_double.emplace_back(1); if (can_fuse_into_previous_gate) { q_elliptic_double[this->num_gates - 1] = 1; } else { From 9563341e86215d5677a8c1cad08e47deae72b60e Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 20:56:51 +0000 Subject: [PATCH 39/50] fix --- .../circuit_builder/ultra_circuit_builder.test.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.test.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.test.cpp index 66718faeab0..df20fc30d19 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.test.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.test.cpp @@ -194,11 +194,6 @@ TEST(ultra_circuit_constructor, test_elliptic_double_gate) bool result = circuit_constructor.check_circuit(); EXPECT_EQ(result, true); - // EXPECT_TRUE(saved_state.is_same_state(circuit_constructor)); - - // circuit_constructor.create_ecc_dbl_gate({ x1, y1, x3 + 1, y3 }); - - // EXPECT_EQ(circuit_constructor.check_circuit(), false); } TEST(ultra_circuit_constructor, non_trivial_tag_permutation) From c0ce229740e25414e9da903db09be2b726179816 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 20:57:32 +0000 Subject: [PATCH 40/50] fix --- .../proof_system/plookup_tables/fixed_base/fixed_base.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp index feae413938a..e6104b3f9d7 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/fixed_base/fixed_base.cpp @@ -124,7 +124,7 @@ std::optional> table::get_lookup_table_ids_for_point if (input == native_pedersen::get_rhs_generator()) { return { { FIXED_BASE_RIGHT_LO, FIXED_BASE_RIGHT_HI } }; } - return {}; + return std::nullopt; } /** @@ -149,7 +149,7 @@ std::optional table::get_generator_offset_for_tabl if (table_id == FIXED_BASE_RIGHT_HI) { return fixed_base_table_offset_generators[3]; } - return {}; + return std::nullopt; } using function_ptr = std::array (*)(const std::array); @@ -193,7 +193,7 @@ BasicTable table::generate_basic_fixed_base_table(BasicTableId id, size_t basic_ const bool is_small_table = (multitable_bits - bits_covered_by_previous_tables_in_multitable) < BITS_PER_TABLE; const size_t table_bits = is_small_table ? multitable_bits - bits_covered_by_previous_tables_in_multitable : BITS_PER_TABLE; - const size_t table_size = static_cast(1ULL << table_bits); + const auto table_size = static_cast(1ULL << table_bits); BasicTable table; table.id = id; table.table_index = basic_table_index; From e672785a4f2ee455bdac8b5739b9189c871954d3 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 21:03:09 +0000 Subject: [PATCH 41/50] fix --- .../stdlib/primitives/group/cycle_group.cpp | 36 +++++++++---------- .../stdlib/primitives/group/cycle_group.hpp | 4 ++- 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index 5bd96646e21..94bbf34224b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -12,11 +12,11 @@ namespace proof_system::plonk::stdlib { template cycle_group::cycle_group(Composer* _context) - : context(_context) - , x(0) + : x(0) , y(0) , _is_infinity(true) , _is_constant(true) + , context(_context) {} /** @@ -28,16 +28,19 @@ cycle_group::cycle_group(Composer* _context) */ template cycle_group::cycle_group(field_t _x, field_t _y, bool_t is_infinity) - : context(_x.get_context() == nullptr - ? _y.get_context() == nullptr - ? is_infinity.get_context() == nullptr ? nullptr : is_infinity.get_context() - : _y.get_context() - : _x.get_context()) - , x(_x.normalize()) + : x(_x.normalize()) , y(_y.normalize()) , _is_infinity(is_infinity) , _is_constant(_x.is_constant() && _y.is_constant() && is_infinity.is_constant()) -{} +{ + if (_x.get_context() != nullptr) { + context = _x.get_context(); + } else if (_y.get_context() != nullptr) { + context = _y.get_context(); + } else { + context = is_infinity.get_context(); + } +} /** * @brief Construct a new cycle group::cycle group object @@ -54,11 +57,11 @@ cycle_group::cycle_group(field_t _x, field_t _y, bool_t is_infinity) */ template cycle_group::cycle_group(const FF& _x, const FF& _y, bool is_infinity) - : context(nullptr) - , x(_x) + : x(_x) , y(_y) , _is_infinity(is_infinity) , _is_constant(true) + , context(nullptr) { ASSERT(get_value().on_curve()); } @@ -74,17 +77,17 @@ cycle_group::cycle_group(const FF& _x, const FF& _y, bool is_infinity) */ template cycle_group::cycle_group(const AffineElement& _in) - : context(nullptr) - , x(_in.x) + : x(_in.x) , y(_in.y) , _is_infinity(_in.is_point_at_infinity()) , _is_constant(true) + , context(nullptr) {} /** * @brief Converts an AffineElement into a circuit witness. * - * @details Somewhat expensive as we do an on-curve check and `_is_infiity` is a witness and not a constant. + * @details Somewhat expensive as we do an on-curve check and `_is_infinity` is a witness and not a constant. * If an element is being converted where it is known the element is on the curve and/or cannot be point at * infinity, it is best to use other methods (e.g. direct conversion of field_t coordinates) * @@ -136,10 +139,7 @@ template Composer* cycle_group::get_context(const if (get_context() != nullptr) { return get_context(); } - if (other.get_context() != nullptr) { - return other.get_context(); - } - return nullptr; + return other.get_context(); } template typename cycle_group::AffineElement cycle_group::get_value() const diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index 4316ee76fa7..9b45df007f7 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -205,14 +205,16 @@ template class cycle_group { cycle_group operator*(const cycle_scalar& scalar) const; cycle_group& operator*=(const cycle_scalar& scalar); cycle_group operator/(const cycle_scalar& scalar) const; - Composer* context; + field_t x; field_t y; private: bool_t _is_infinity; bool _is_constant; + Composer* context; + private: static batch_mul_internal_output _variable_base_batch_mul_internal(std::span scalars, std::span base_points, std::span offset_generators, From 246bf4f2b022a3be3f816ecfc911945a6621c9dc Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 21:03:26 +0000 Subject: [PATCH 42/50] more fix --- .../cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index 9b45df007f7..6d07ce72eab 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -214,7 +214,6 @@ template class cycle_group { bool _is_constant; Composer* context; - private: static batch_mul_internal_output _variable_base_batch_mul_internal(std::span scalars, std::span base_points, std::span offset_generators, From d7c120e56a961057ec29115379590ae31ae816ce Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 21:05:25 +0000 Subject: [PATCH 43/50] names are hard --- .../stdlib/primitives/group/cycle_group.cpp | 14 +++++++------- .../stdlib/primitives/group/cycle_group.hpp | 4 ++-- .../stdlib/primitives/group/cycle_group.test.cpp | 8 ++++---- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index 94bbf34224b..fa8380c4819 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -367,10 +367,10 @@ cycle_group cycle_group::unconditional_subtract(const cycle_ * @return cycle_group */ template -cycle_group cycle_group::constrained_unconditional_add(const cycle_group& other) const +cycle_group cycle_group::checked_unconditional_add(const cycle_group& other) const { field_t x_delta = x - other.x; - x_delta.assert_is_not_zero("cycle_group::constrained_unconditional_add, x-coordinate collision"); + x_delta.assert_is_not_zero("cycle_group::checked_unconditional_add, x-coordinate collision"); return unconditional_add(other); } @@ -387,10 +387,10 @@ cycle_group cycle_group::constrained_unconditional_add(const * @return cycle_group */ template -cycle_group cycle_group::constrained_unconditional_subtract(const cycle_group& other) const +cycle_group cycle_group::checked_unconditional_subtract(const cycle_group& other) const { field_t x_delta = x - other.x; - x_delta.assert_is_not_zero("cycle_group::constrained_unconditional_subtract, x-coordinate collision"); + x_delta.assert_is_not_zero("cycle_group::checked_unconditional_subtract, x-coordinate collision"); return unconditional_subtract(other); } @@ -805,7 +805,7 @@ cycle_group::straus_lookup_table::straus_lookup_table(Composer* contex field_t modded_y = field_t::conditional_assign(base_point.is_point_at_infinity(), fallback_point.y, base_point.y); cycle_group modded_base_point(modded_x, modded_y, false); for (size_t i = 1; i < table_size; ++i) { - auto add_output = point_table[i - 1].constrained_unconditional_add(modded_base_point); + auto add_output = point_table[i - 1].checked_unconditional_add(modded_base_point); field_t x = field_t::conditional_assign(base_point.is_point_at_infinity(), offset_generator.x, add_output.x); field_t y = field_t::conditional_assign(base_point.is_point_at_infinity(), offset_generator.y, add_output.y); point_table[i] = cycle_group(x, y, false); @@ -866,7 +866,7 @@ cycle_group cycle_group::straus_lookup_table::read(const fie * If Composer is not ULTRA, number of bits per Straus round = 1, * which reduces to the basic double-and-add algorithm * - * @details If `unconditional_add = true`, we use `::unconditional_add` instead of `::constrained_unconditional_add`. + * @details If `unconditional_add = true`, we use `::unconditional_add` instead of `::checked_unconditional_add`. * Use with caution! Only should be `true` if we're doing an ULTRA fixed-base MSM so we know the points cannot * collide with the offset generators. * @@ -1266,7 +1266,7 @@ cycle_group cycle_group::batch_mul(const std::vector class cycle_group { cycle_group unconditional_add(const cycle_group& other) const requires IsNotUltraArithmetic; cycle_group unconditional_subtract(const cycle_group& other) const; - cycle_group constrained_unconditional_add(const cycle_group& other) const; - cycle_group constrained_unconditional_subtract(const cycle_group& other) const; + cycle_group checked_unconditional_add(const cycle_group& other) const; + cycle_group checked_unconditional_subtract(const cycle_group& other) const; cycle_group operator+(const cycle_group& other) const; cycle_group operator-(const cycle_group& other) const; cycle_group operator-() const; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index ae682b7e379..c2a8c1d2ec5 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -101,7 +101,7 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalAddSucceed) // case 1. valid unconditional add cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); - cycle_group_ct c = a.constrained_unconditional_add(b); + cycle_group_ct c = a.checked_unconditional_add(b); AffineElement expected(Element(lhs) + Element(rhs)); AffineElement result = c.get_value(); EXPECT_EQ(result, expected); @@ -121,7 +121,7 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalAddFail) // case 2. invalid unconditional add cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); - a.constrained_unconditional_add(b); + a.checked_unconditional_add(b); EXPECT_TRUE(composer.failed()); @@ -235,7 +235,7 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalSubtractSucceed) // case 1. valid unconditional add cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); - cycle_group_ct c = a.constrained_unconditional_subtract(b); + cycle_group_ct c = a.checked_unconditional_subtract(b); AffineElement expected(Element(lhs) - Element(rhs)); AffineElement result = c.get_value(); EXPECT_EQ(result, expected); @@ -255,7 +255,7 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalSubtractFail) // case 2. invalid unconditional add cycle_group_ct a = cycle_group_ct::from_witness(&composer, lhs); cycle_group_ct b = cycle_group_ct::from_witness(&composer, rhs); - a.constrained_unconditional_subtract(b); + a.checked_unconditional_subtract(b); EXPECT_TRUE(composer.failed()); From c146851d47f1f91e7c949f495639919ea5253e24 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 21:07:54 +0000 Subject: [PATCH 44/50] fix --- .../barretenberg/stdlib/primitives/group/cycle_group.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index fa8380c4819..7d3d0fd0bf4 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -416,7 +416,9 @@ template cycle_group cycle_group::operat auto y1 = y; auto x2 = other.x; auto y2 = other.y; - auto x_diff = x2.add_two(-x1, x_coordinates_match); // todo document this oddity + // if x_coordinates match, lambda triggers a divide by zero error. + // Adding in `x_coordinates_match` ensures that lambda will always be well-formed + auto x_diff = x2.add_two(-x1, x_coordinates_match); auto lambda = (y2 - y1) / x_diff; auto x3 = lambda.madd(lambda, -(x2 + x1)); auto y3 = lambda.madd(x1 - x3, -y1); @@ -759,7 +761,7 @@ cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* contex template std::optional> cycle_group::straus_scalar_slice::read(size_t index) { - + return std::nullopt; if (index >= slices.size()) { return {}; } @@ -1314,6 +1316,7 @@ template cycle_group& cycle_group::opera template cycle_group cycle_group::operator/(const cycle_scalar& /*unused*/) const { + // TODO(@kevaundray solve the discrete logarithm problem) throw_or_abort("Implementation under construction..."); } From de6d232ec690e51c898dbaf71b8eeb5b64570a5f Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Thu, 28 Sep 2023 21:11:34 +0000 Subject: [PATCH 45/50] typo --- .../src/barretenberg/stdlib/primitives/group/cycle_group.cpp | 3 +-- .../src/barretenberg/stdlib/primitives/group/cycle_group.hpp | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index 7d3d0fd0bf4..8900583391d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -1313,8 +1313,7 @@ template cycle_group& cycle_group::opera return *this; } -template -cycle_group cycle_group::operator/(const cycle_scalar& /*unused*/) const +template cycle_group cycle_group::operator/(const cycle_group& /*unused*/) const { // TODO(@kevaundray solve the discrete logarithm problem) throw_or_abort("Implementation under construction..."); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index 0a305fc5d10..c2f03df4105 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -204,7 +204,7 @@ template class cycle_group { const generator_data* offset_generator_data = &default_offset_generators); cycle_group operator*(const cycle_scalar& scalar) const; cycle_group& operator*=(const cycle_scalar& scalar); - cycle_group operator/(const cycle_scalar& scalar) const; + cycle_group operator/(const cycle_group& other) const; field_t x; field_t y; From a04b4b4b6207f811b66485740c1827a1a5a6a34e Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 29 Sep 2023 09:40:52 +0000 Subject: [PATCH 46/50] test fix --- .../stdlib/primitives/group/cycle_group.test.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index c2a8c1d2ec5..b9a63a5c9be 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -438,7 +438,7 @@ TYPED_TEST(CycleGroupTest, TestBatchMul) { std::vector points; std::vector scalars; - std::vector scalars_native; + std::vector scalars_native; Element expected = Group::point_at_infinity; for (size_t i = 0; i < num_muls; ++i) { auto element = crypto::pedersen_hash_refactor::get_lhs_generator(); @@ -448,14 +448,14 @@ TYPED_TEST(CycleGroupTest, TestBatchMul) expected += (element * scalar); points.emplace_back(element); scalars.emplace_back(cycle_group_ct::cycle_scalar::from_witness(&composer, scalar)); - scalars_native.emplace_back(scalar); + scalars_native.emplace_back(uint256_t(scalar)); // 2: add entry where point is constant, scalar is constant element = crypto::pedersen_hash_refactor::get_rhs_generator(); expected += (element * scalar); points.emplace_back(element); scalars.emplace_back(typename cycle_group_ct::cycle_scalar(scalar)); - scalars_native.emplace_back(scalar); + scalars_native.emplace_back(uint256_t(scalar)); } auto result = cycle_group_ct::batch_mul(scalars, points); EXPECT_EQ(result.get_value(), AffineElement(expected)); From b79cac4edbce9313ba9b221f3bebd8da02703c3d Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 29 Sep 2023 09:46:13 +0000 Subject: [PATCH 47/50] fix --- .../proof_system/arithmetization/arithmetization.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/arithmetization.hpp b/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/arithmetization.hpp index 71bd7e367f9..969aac893fc 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/arithmetization.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/arithmetization.hpp @@ -125,7 +125,7 @@ template class Turbo : public Arithmetization class Ultra : public Arithmetization { +template class Ultra : public Arithmetization { public: using FF = _FF; struct Selectors : SelectorsBase { From 20bf104881fddb76ca900ab0f2747fd06bbe2c14 Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 29 Sep 2023 09:46:58 +0000 Subject: [PATCH 48/50] merge fix --- .../src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp | 3 +-- .../barretenberg/stdlib/primitives/group/cycle_group.test.cpp | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp index 2598d3849cb..c82041c4972 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/pedersen/pedersen.test.cpp @@ -21,8 +21,7 @@ template class PedersenTest : public ::testing::Test { }; }; -using CircuitTypes = ::testing:: - Types; +using CircuitTypes = ::testing::Types; TYPED_TEST_SUITE(PedersenTest, CircuitTypes); TYPED_TEST(PedersenTest, TestHash) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index b9a63a5c9be..65b722699db 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -46,8 +46,7 @@ template class CycleGroupTest : public ::testing::Test { }; }; -using CircuitTypes = ::testing:: - Types; +using CircuitTypes = ::testing::Types; TYPED_TEST_SUITE(CycleGroupTest, CircuitTypes); TYPED_TEST(CycleGroupTest, TestDbl) From 760aba7a353d4a079a1f3a6a9552434c5090834e Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 29 Sep 2023 09:51:25 +0000 Subject: [PATCH 49/50] comments --- .../proof_system/circuit_builder/ultra_circuit_builder.test.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.test.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.test.cpp index df20fc30d19..715effcc28f 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.test.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.test.cpp @@ -194,6 +194,7 @@ TEST(ultra_circuit_constructor, test_elliptic_double_gate) bool result = circuit_constructor.check_circuit(); EXPECT_EQ(result, true); + EXPECT_TRUE(saved_state.is_same_state(circuit_constructor)); } TEST(ultra_circuit_constructor, non_trivial_tag_permutation) From f1b3271420a36ce030b93f7aff5776ec7e77443c Mon Sep 17 00:00:00 2001 From: zac-williamson Date: Fri, 29 Sep 2023 10:12:39 +0000 Subject: [PATCH 50/50] bugfix --- .../src/barretenberg/stdlib/primitives/group/cycle_group.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index 8900583391d..806d1fa3c77 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -761,9 +761,8 @@ cycle_group::straus_scalar_slice::straus_scalar_slice(Composer* contex template std::optional> cycle_group::straus_scalar_slice::read(size_t index) { - return std::nullopt; if (index >= slices.size()) { - return {}; + return std::nullopt; } return slices[index]; }