From 839463557f0d8b0eaecf6b4cb65977168430fcef Mon Sep 17 00:00:00 2001 From: David Banks <47112877+dbanks12@users.noreply.github.com> Date: Mon, 21 Nov 2022 05:27:56 -0500 Subject: [PATCH 1/3] Account hardener, second class txs (#1537) * Refactored index.ts to support multiple cli commands/modes. Split out previous logic into migrate_accounts.ts and pulled some logic out of aztec_connect.ts into helper file get_rollup_blocks.ts. Started implementation of fix_accounts.ts for accounts that can have an alias reassigned to their public key * pulling blocks into worldstate, correct root. * account fixer now uses SDK, generates keys and aliasHash, constructs AccountTx, performs FFT and gets signing data. Rearranged fixAccounts function * minor tweak to fix-accounts test script * For fixAccounts function of account-migrator, refactored the initialization of modules like worker pools, fft, prover, etc to match account_prover.test.ts. This fixed a memory access out-of-bounds error in WASM. Am now computing schnorr signature properly, generating account proof, and verifying account proof * Updates to fix-accounts-local.sh so it detects when falafel is ready, parses rollup address, runs e2e test and then runs fixAccounts test locally. Can launch full test including ganache, 2 halloumis and a falafel all with a single run of this script * Began making shift over to Halloumi proofs while investigating why falafel is rejecting proofs generated directly by the account-migrator * Updated barretenberg rollup_cli and account proofs to accept account txs from halloumi for proof generation. Updated falafel to accept second-class transactions that do not pay a fee (still need to remove them from get_pending_txs and enforce that they only fill up empty space in blocks. fixAccounts is now submitting account txs to halloumi for proof generation before submitting them to falafel for validation and inclusion in rollup blocks. Falafel now has an e2e 'dev' start mode that sets up for e2e but also runs in dev mode for continuous rebuilding * some cleanup in the account fixer. Started adding code (commented out) to write/read account proofs to/from json files * Added logic for writing/reading account proofs to json files in batches before sending to falafel. Moved createFundedWalletProvider to utility file as is done for e2e tests. General cleanup in fix_accounts. * Moved sendSecondClassTxs into a 'limited' rollup provider instantiated in account fixer. * submitting actual batches of 2nd class txs to falafel * Moved sendSecondClassTxs into a new class. Moved all account fixing logic into AccountFixer class in account_fixer.ts * log fix * Added invertPublicKey wasm bindings. Creating new account first to then be used by many migration TXs for account fixing/hardening * Fixed account-migrator package deps. Began changes to pull real accounts from mainnet for account hardening/fixing * broke yarn lock file after merge * 2nd class txs now only fill up empty slots in rollup block. account fixer force flushes the rollup when in test mode * rename account fixer to hardener * added validateAuth for second class txs * fix account.cpp info text * rename function to get rollup publish conditions in rollup coordinator. Updated core SDK's pendingAliases to use a Map so that when multiple migration txs are submitted with the same alias, it only updates the SDK with the last tx's alias/acc-public-key pair * More private helper functions to break up long code blocks in hardener. Added some function header comments. * fixed falafel bug and broken falafel tests * separate modes for generation of proofs and submission of proofs * hardener now hardens 'initial' accounts. Added 'verifyHardened' command * minor cleanup in sdk and halloumi * hardener readme and script update * added pendingSecondClassTxs to status endpoint * Added falafel-monitoring mode for hardener. Hardener now ensures a unique set of accounts before generating proofs. Other fixes and cleanup to hardener * hardener cleanup * debugger working properly * Added schnorr public key inversion test to barretenberg JS. Added inversion function comments to barretenberg and bJS * prettier for barretenberg js * Typos in zk-money and bberg JS (intial -> initial). Split hardener commands into subcommands `harden createHardener` etc. Updated migrator README * comments and prints cleanup * revert falafel package.json and start_e2e.sh back to defi-bridge-project version. Commented account hardener and cleaned up harden-command modes (new full-sequence mode that performs all harden steps). * Tweaks to falafel after code review, bug in rollup_db getTotalTxCount, revert minor/style circuit change. Large updates to Account Hardener after review. Some additional comments and TODOs * include second class tx count (total and per type) in rollup profile print. * comment local hardener test script * fixed bug where second class txs could surpass block-slot limit * added rollup coordinator tests to check that: 1. rollup does not publish when filled with second class txs, 2. second-class txs are omitted when block is full with 1st class, 3. second-class txs are included when there is empty space and a rollup is going to publish anyway, 3. second class txs are omitted if they would surpass gas/call-data limits * prettier rollup_coordinator * compiler warning in rollup coord test * Hardener now always pulls ALL accounts from ALL blocks and segregates the target accounts for proof generation/verification * hardener constants updated (batch size and max second-class txs in falafel. Format fix in rolllup profile printer. Added metrics gauge for 2nd-class txs * Was able to reduce second-class-tx-related changeset in rollup_coordinator.test.ts via use of mockResolvedValueOnce to inject second class txs into rollupDb * improved hardener script. cleanup in hardener logging * various yarn package related issues resolved after large merge * bugs found by ci in barretenberg and falafel after large merge * type fix in rollup coordinator test * cleanup for db/account-hardener PR * revert zkmoney change not appropriate in hardener PR * minor style fix in account hardener local testing script * changed terminology: invert -> negate (for negation of public key Y coord) * fix account hardener after PR #1640 * simplify public key negation * fixes after merge update * fix account hardener after Crs->NetCrs rename --- src/aztec/crypto/schnorr/c_bind.cpp | 10 +++- .../ecc/curves/secp256k1/secp256k1.test.cpp | 4 +- .../ecc/curves/secp256r1/secp256r1.test.cpp | 2 +- .../rollup/proofs/account/account_tx.cpp | 50 ----------------- .../rollup/proofs/account/account_tx.hpp | 53 +++++++++++++++++-- src/aztec/rollup/proofs/account/index.hpp | 3 +- src/aztec/rollup/proofs/account/verify.cpp | 32 +++++++++++ src/aztec/rollup/proofs/account/verify.hpp | 19 +++++++ src/aztec/rollup/rollup_cli/main.cpp | 23 +++++++- 9 files changed, 137 insertions(+), 59 deletions(-) create mode 100644 src/aztec/rollup/proofs/account/verify.cpp create mode 100644 src/aztec/rollup/proofs/account/verify.hpp diff --git a/src/aztec/crypto/schnorr/c_bind.cpp b/src/aztec/crypto/schnorr/c_bind.cpp index b91140ffd98..329c91d883a 100644 --- a/src/aztec/crypto/schnorr/c_bind.cpp +++ b/src/aztec/crypto/schnorr/c_bind.cpp @@ -14,6 +14,14 @@ WASM_EXPORT void compute_public_key(uint8_t const* private_key, uint8_t* public_ write(public_key_buf, pub_key); } +WASM_EXPORT void negate_public_key(uint8_t const* public_key_buffer, uint8_t* output) +{ + // Negate the public key (effectively negating the y-coordinate of the public key) and return the resulting public + // key. + auto account_public_key = from_buffer(public_key_buffer); + barretenberg::group_elements::write(output, -account_public_key); +} + WASM_EXPORT void construct_signature( uint8_t const* message, size_t msg_len, uint8_t const* private_key, uint8_t* s, uint8_t* e) { @@ -129,4 +137,4 @@ WASM_EXPORT bool multisig_combine_signatures(uint8_t const* message, return false; } } -} \ No newline at end of file +} diff --git a/src/aztec/ecc/curves/secp256k1/secp256k1.test.cpp b/src/aztec/ecc/curves/secp256k1/secp256k1.test.cpp index 6397bc3557c..98b8c391967 100644 --- a/src/aztec/ecc/curves/secp256k1/secp256k1.test.cpp +++ b/src/aztec/ecc/curves/secp256k1/secp256k1.test.cpp @@ -508,9 +508,9 @@ TEST(secp256k1, neg_and_self_neg_0_cmp_regression) TEST(secp256k1, montgomery_mul_big_bug) { - secp256k1::fq a(uint256_t{0xfffffffe630dc02f, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}); + secp256k1::fq a(uint256_t{ 0xfffffffe630dc02f, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff }); secp256k1::fq a_sqr = a.sqr(); - secp256k1::fq expected(uint256_t{0x60381e557e100000, 0x0, 0x0, 0x0}); + secp256k1::fq expected(uint256_t{ 0x60381e557e100000, 0x0, 0x0, 0x0 }); EXPECT_EQ((a_sqr == expected), true); } diff --git a/src/aztec/ecc/curves/secp256r1/secp256r1.test.cpp b/src/aztec/ecc/curves/secp256r1/secp256r1.test.cpp index af0ff4ba629..17f5f3f89d7 100644 --- a/src/aztec/ecc/curves/secp256r1/secp256r1.test.cpp +++ b/src/aztec/ecc/curves/secp256r1/secp256r1.test.cpp @@ -476,7 +476,7 @@ TEST(secp256r1, montgomery_mul_big_bug) a.data[2] = 0xAAAAAAAAAAAAAAAA; a.data[3] = 0xFFFFFFFFE38E38E3; secp256r1::fr a_sqr = a.sqr(); - secp256r1::fr expected(uint256_t{0x57abc6aa0349c084, 0x65b21b232a4cb7a5, 0x5ba781948b0fcd6e, 0xd6e9e0644bda12f7}); + secp256r1::fr expected(uint256_t{ 0x57abc6aa0349c084, 0x65b21b232a4cb7a5, 0x5ba781948b0fcd6e, 0xd6e9e0644bda12f7 }); EXPECT_EQ((a_sqr == expected), true); } diff --git a/src/aztec/rollup/proofs/account/account_tx.cpp b/src/aztec/rollup/proofs/account/account_tx.cpp index e40ce058cc5..1d419305cdd 100644 --- a/src/aztec/rollup/proofs/account/account_tx.cpp +++ b/src/aztec/rollup/proofs/account/account_tx.cpp @@ -42,56 +42,6 @@ void account_tx::sign(key_pair const& keys) std::string(message.begin(), message.end()), keys); } -void write(std::vector& buf, account_tx const& tx) -{ - using serialize::write; - write(buf, tx.merkle_root); - write(buf, tx.account_public_key); - write(buf, tx.new_account_public_key); - write(buf, tx.new_signing_pub_key_1); - write(buf, tx.new_signing_pub_key_2); - write(buf, tx.alias_hash); - write(buf, tx.create); - write(buf, tx.migrate); - write(buf, tx.account_note_index); - write(buf, tx.account_note_path); - write(buf, tx.signing_pub_key); - write(buf, tx.signature); -} - -void read(uint8_t const*& buf, account_tx& tx) -{ - using serialize::read; - read(buf, tx.merkle_root); - read(buf, tx.account_public_key); - read(buf, tx.new_account_public_key); - read(buf, tx.new_signing_pub_key_1); - read(buf, tx.new_signing_pub_key_2); - read(buf, tx.alias_hash); - read(buf, tx.create); - read(buf, tx.migrate); - read(buf, tx.account_note_index); - read(buf, tx.account_note_path); - read(buf, tx.signing_pub_key); - read(buf, tx.signature); -} - -std::ostream& operator<<(std::ostream& os, account_tx const& tx) -{ - return os << "merkle_root: " << tx.merkle_root << "\n" - << "account_public_key: " << tx.account_public_key << "\n" - << "new_account_public_key: " << tx.new_account_public_key << "\n" - << "new_signing_pub_key_1: " << tx.new_signing_pub_key_1 << "\n" - << "new_signing_pub_key_2: " << tx.new_signing_pub_key_2 << "\n" - << "alias_hash: " << tx.alias_hash << "\n" - << "create: " << tx.create << "\n" - << "migrate: " << tx.migrate << "\n" - << "account_note_index: " << tx.account_note_index << "\n" - << "account_note_path: " << tx.account_note_path << "\n" - << "signing_pub_key: " << tx.signing_pub_key << "\n" - << "signature: " << tx.signature << "\n"; -} - } // namespace account } // namespace proofs } // namespace rollup diff --git a/src/aztec/rollup/proofs/account/account_tx.hpp b/src/aztec/rollup/proofs/account/account_tx.hpp index bd9d89ed104..4c9dbbc0970 100644 --- a/src/aztec/rollup/proofs/account/account_tx.hpp +++ b/src/aztec/rollup/proofs/account/account_tx.hpp @@ -32,10 +32,57 @@ struct account_tx { bool operator==(account_tx const&) const = default; }; -void read(uint8_t const*& it, account_tx& tx); -void write(std::vector& buf, account_tx const& tx); +template inline void read(B& buf, account_tx& tx) +{ + using serialize::read; + read(buf, tx.merkle_root); + read(buf, tx.account_public_key); + read(buf, tx.new_account_public_key); + read(buf, tx.new_signing_pub_key_1); + read(buf, tx.new_signing_pub_key_2); + read(buf, tx.alias_hash); + read(buf, tx.create); + read(buf, tx.migrate); + read(buf, tx.account_note_index); + read(buf, tx.account_note_path); + read(buf, tx.signing_pub_key); + read(buf, tx.signature.s); + read(buf, tx.signature.e); +} -std::ostream& operator<<(std::ostream& os, account_tx const& tx); +template inline void write(B& buf, account_tx const& tx) +{ + using serialize::write; + write(buf, tx.merkle_root); + write(buf, tx.account_public_key); + write(buf, tx.new_account_public_key); + write(buf, tx.new_signing_pub_key_1); + write(buf, tx.new_signing_pub_key_2); + write(buf, tx.alias_hash); + write(buf, tx.create); + write(buf, tx.migrate); + write(buf, tx.account_note_index); + write(buf, tx.account_note_path); + write(buf, tx.signing_pub_key); + write(buf, tx.signature.s); + write(buf, tx.signature.e); +} + +inline std::ostream& operator<<(std::ostream& os, account_tx const& tx) +{ + return os << "merkle_root: " << tx.merkle_root << "\n" + << "account_public_key: " << tx.account_public_key << "\n" + << "new_account_public_key: " << tx.new_account_public_key << "\n" + << "new_signing_pub_key_1: " << tx.new_signing_pub_key_1 << "\n" + << "new_signing_pub_key_2: " << tx.new_signing_pub_key_2 << "\n" + << "alias_hash: " << tx.alias_hash << "\n" + << "create: " << tx.create << "\n" + << "migrate: " << tx.migrate << "\n" + << "account_note_index: " << tx.account_note_index << "\n" + << "account_note_path: " << tx.account_note_path << "\n" + << "signing_pub_key: " << tx.signing_pub_key << "\n" + << "signature: " << tx.signature << "\n"; +} } // namespace account } // namespace proofs diff --git a/src/aztec/rollup/proofs/account/index.hpp b/src/aztec/rollup/proofs/account/index.hpp index 5e90cebf9ad..de4fd6623fe 100644 --- a/src/aztec/rollup/proofs/account/index.hpp +++ b/src/aztec/rollup/proofs/account/index.hpp @@ -4,4 +4,5 @@ #include "account.hpp" #include "c_bind.h" #include "compute_circuit_data.hpp" -#include "create_proof.hpp" \ No newline at end of file +#include "create_proof.hpp" +#include "verify.hpp" diff --git a/src/aztec/rollup/proofs/account/verify.cpp b/src/aztec/rollup/proofs/account/verify.cpp new file mode 100644 index 00000000000..8dfe759211a --- /dev/null +++ b/src/aztec/rollup/proofs/account/verify.cpp @@ -0,0 +1,32 @@ +#include "./verify.hpp" +#include "./account.hpp" +#include "./account_tx.hpp" + +namespace rollup { +namespace proofs { +namespace account { + +namespace { +verify_result build_circuit(Composer& composer, account_tx& tx, circuit_data const&) +{ + verify_result result; + account_circuit(composer, tx); + return result; +} +} // namespace + +verify_result verify_logic(account_tx& tx, circuit_data const& cd) +{ + Composer composer = Composer(cd.proving_key, cd.verification_key, cd.num_gates); + return verify_logic_internal(composer, tx, cd, "account", build_circuit); +} + +verify_result verify(account_tx& tx, circuit_data const& cd) +{ + Composer composer = Composer(cd.proving_key, cd.verification_key, cd.num_gates); + return verify_internal(composer, tx, cd, "account", true, build_circuit); +} + +} // namespace account +} // namespace proofs +} // namespace rollup diff --git a/src/aztec/rollup/proofs/account/verify.hpp b/src/aztec/rollup/proofs/account/verify.hpp new file mode 100644 index 00000000000..ba469c480ef --- /dev/null +++ b/src/aztec/rollup/proofs/account/verify.hpp @@ -0,0 +1,19 @@ +#pragma once +#include "../verify.hpp" +#include "./compute_circuit_data.hpp" +#include "./account.hpp" +#include + +namespace rollup { +namespace proofs { +namespace account { + +using namespace plonk::stdlib::types::turbo; + +verify_result verify_logic(account_tx& tx, circuit_data const& cd); + +verify_result verify(account_tx& tx, circuit_data const& cd); + +} // namespace account +} // namespace proofs +} // namespace rollup diff --git a/src/aztec/rollup/rollup_cli/main.cpp b/src/aztec/rollup/rollup_cli/main.cpp index d148243af61..b5d79ba69a6 100644 --- a/src/aztec/rollup/rollup_cli/main.cpp +++ b/src/aztec/rollup/rollup_cli/main.cpp @@ -6,6 +6,7 @@ #include #include "../proofs/account/compute_circuit_data.hpp" +#include "../proofs/account/verify.hpp" #include "../proofs/join_split/compute_circuit_data.hpp" #include "../proofs/claim/get_circuit_data.hpp" #include "../proofs/claim/verify.hpp" @@ -183,6 +184,21 @@ bool create_root_verifier() return result.verified; } +bool create_account_proof() +{ + account::account_tx account_tx; + std::cerr << "Reading account tx..." << std::endl; + read(std::cin, account_tx); + + auto result = verify(account_tx, account_cd); + + write(std::cout, result.proof_data); + write(std::cout, result.verified); + std::cout << std::flush; + + return result.verified; +} + int main(int argc, char** argv) { std::vector args(argv, argv + argc); @@ -257,6 +273,11 @@ int main(int argc, char** argv) create_root_verifier(); break; } + case 4: { + std::cerr << "Serving request to create account proof..." << std::endl; + create_account_proof(); + break; + } case 100: { // Convert to buffer first, so when we call write we prefix the buffer length. std::cerr << "Serving join split vk..." << std::endl; @@ -282,4 +303,4 @@ int main(int argc, char** argv) } return 0; -} \ No newline at end of file +} From 627d7f2ee11363d425256e91188a5acb24407911 Mon Sep 17 00:00:00 2001 From: Innokentii Sennovskii Date: Mon, 21 Nov 2022 14:46:30 +0000 Subject: [PATCH 2/3] Fuzzer and Guido found bugs in bigfield that don't change the core formula (#1608) * Fixed bigfield bugs: 1) The basic CRT formula bug (quick and dirty fix) didn't allow for small values on division (a*b < r) 2) Limb product maximum value computation used uint256_t resulting in overflows 3) Joined range checks can't handle the maximum range values, so in those cases we need to use separate range checks 4) Lots of issues with constant bigfield elements, where we try to access non-existing context 5) assert_is_in_field borrowing logic was wrong 6) 1 miscellaneous funny bug, where evaluate_square_add always expected an even number of elements to add, and because of that did a buffer overread * Minor fixes * Fixed Guido's bigfield conditional negate bug and added limb << operator * Fixed some ASSERTs that triggered Guido's fuzzer. * Removed misguided ASSERT. * Fixing picking wrong modulus for counting carry range * Removed formula dirty fix and test * Deleted old comments * Minor typos are fixed Co-authored-by: arijitdutta67 --- .../stdlib/primitives/bigfield/bigfield.hpp | 6 + .../primitives/bigfield/bigfield.test.cpp | 16 +- .../primitives/bigfield/bigfield_impl.hpp | 447 ++++++++++++------ 3 files changed, 311 insertions(+), 158 deletions(-) diff --git a/src/aztec/stdlib/primitives/bigfield/bigfield.hpp b/src/aztec/stdlib/primitives/bigfield/bigfield.hpp index 0fba46b0cc9..4d16a684d3c 100644 --- a/src/aztec/stdlib/primitives/bigfield/bigfield.hpp +++ b/src/aztec/stdlib/primitives/bigfield/bigfield.hpp @@ -47,6 +47,11 @@ template class bigfield { maximum_value = DEFAULT_MAXIMUM_LIMB; } } + friend std::ostream& operator<<(std::ostream& os, const Limb& a) + { + os << "{ " << a.element << " < " << a.maximum_value << " }"; + return os; + } Limb(const Limb& other) = default; Limb(Limb&& other) = default; Limb& operator=(const Limb& other) = default; @@ -212,6 +217,7 @@ template class bigfield { const std::vector& to_sub, bool enable_divisor_nz_check = false); + static bigfield sum(const std::vector& terms); static bigfield internal_div(const std::vector& numerators, const bigfield& denominator, bool check_for_zero); diff --git a/src/aztec/stdlib/primitives/bigfield/bigfield.test.cpp b/src/aztec/stdlib/primitives/bigfield/bigfield.test.cpp index 4fca9cae57d..95484f9ed4d 100644 --- a/src/aztec/stdlib/primitives/bigfield/bigfield.test.cpp +++ b/src/aztec/stdlib/primitives/bigfield/bigfield.test.cpp @@ -764,6 +764,17 @@ template class stdlib_bigfield : public testing::Test { bool proof_result = verifier.verify_proof(proof); EXPECT_EQ(proof_result, true); } + + static void test_conditional_select_regression() + { + auto composer = Composer(); + barretenberg::fq a(0); + barretenberg::fq b(1); + fq_ct a_ct(&composer, a); + fq_ct b_ct(&composer, b); + fq_ct selected = a_ct.conditional_select(b_ct, typename bn254::bool_ct(&composer, true)); + EXPECT_EQ(barretenberg::fq((selected.get_value() % uint512_t(barretenberg::fq::modulus)).lo), b); + } }; // Define types for which the above tests will be constructed. @@ -774,7 +785,6 @@ typedef testing::Types bigfield bigfield::operator-(const if (other.is_constant()) { uint512_t right = other.get_value() % modulus_u512; - uint512_t neg_right = modulus_u512 - right; + uint512_t neg_right = (modulus_u512 - right) % modulus_u512; return operator+(bigfield(ctx, uint256_t(neg_right.lo))); } @@ -472,6 +472,30 @@ template bigfield bigfield::operator/(const return internal_div({ *this }, other, false); } +/** + * @brief Create constraints for summing these terms + * + * @tparam C + * @tparam T + * @param terms + * @return The sum of terms + */ +template bigfield bigfield::sum(const std::vector& terms) +{ + ASSERT(terms.size() > 0); + + if (terms.size() == 1) { + return terms[0]; + } + std::vector halved; + for (size_t i = 0; i < terms.size() / 2; i++) { + halved.push_back(terms[2 * i] + terms[2 * i + 1]); + } + if (terms.size() & 1) { + halved.push_back(terms[terms.size() - 1]); + } + return sum(halved); +} /** * Division of a sum with an optional check if divisor is zero. Should not be used outside of class. @@ -648,16 +672,25 @@ template bigfield bigfield::sqradd(const st const uint1024_t add_right(add_values); const uint1024_t modulus(target_basis.modulus); - const auto [quotient_1024, remainder_1024] = (left * right + add_right).divmod(modulus); - - const uint512_t quotient_value = quotient_1024.lo; - const uint512_t remainder_value = remainder_1024.lo; - bigfield remainder; bigfield quotient; - if (is_constant() && add_constant) { - remainder = bigfield(ctx, uint256_t(remainder_value.lo)); - return remainder; + if (is_constant()) { + if (add_constant) { + + const auto [quotient_1024, remainder_1024] = (left * right + add_right).divmod(modulus); + remainder = bigfield(ctx, uint256_t(remainder_1024.lo.lo)); + return remainder; + } else { + + const auto [quotient_1024, remainder_1024] = (left * right).divmod(modulus); + std::vector new_to_add; + for (auto& add_element : to_add) { + new_to_add.push_back(add_element); + } + + new_to_add.push_back(bigfield(ctx, remainder_1024.lo.lo)); + return sum(new_to_add); + } } else { // Check the quotient fits the range proof @@ -668,14 +701,17 @@ template bigfield bigfield::sqradd(const st self_reduce(); return sqradd(to_add); } + const auto [quotient_1024, remainder_1024] = (left * right + add_right).divmod(modulus); + uint512_t quotient_value = quotient_1024.lo; + uint256_t remainder_value = remainder_1024.lo.lo; quotient = bigfield(witness_t(ctx, fr(quotient_value.slice(0, NUM_LIMB_BITS * 2).lo)), witness_t(ctx, fr(quotient_value.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 4).lo)), false, num_quotient_bits); remainder = bigfield( - witness_t(ctx, fr(remainder_value.slice(0, NUM_LIMB_BITS * 2).lo)), - witness_t(ctx, fr(remainder_value.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3 + NUM_LAST_LIMB_BITS).lo))); + witness_t(ctx, fr(remainder_value.slice(0, NUM_LIMB_BITS * 2))), + witness_t(ctx, fr(remainder_value.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3 + NUM_LAST_LIMB_BITS)))); }; unsafe_evaluate_square_add(*this, to_add, quotient, remainder); return remainder; @@ -724,7 +760,6 @@ bigfield bigfield::madd(const bigfield& to_mul, const std::vector to_mul.get_maximum_value()) { self_reduce(); @@ -901,48 +936,129 @@ bigfield bigfield::mult_madd(const std::vector& mul_left, const size_t number_of_products = mul_left.size(); - // First we need to check if it is possible to reduce the products enough - const uint1024_t modulus(target_basis.modulus); uint1024_t worst_case_product_sum(0); - uint1024_t add_right(0); - uint1024_t add_right_maximum(0); + uint1024_t add_right_constant_sum(0); - // Compute the sum of added values (we don't force-reduce these) - // We use add_right later for computing the quotient and remainder + // First we do all constant optimizations bool add_constant = true; + std::vector new_to_add; + for (const auto& add_element : to_add) { add_element.reduction_check(); - add_right += uint1024_t(add_element.get_value()); + if (add_element.is_constant()) { + add_right_constant_sum += uint1024_t(add_element.get_value()); + } else { + add_constant = false; + new_to_add.push_back(add_element); + } + } + + // Compute the product sum + // Optimize constant use + uint1024_t sum_of_constant_products(0); + std::vector new_input_left; + std::vector new_input_right; + bool product_sum_constant = true; + for (size_t i = 0; i < number_of_products; i++) { + if (mutable_mul_left[i].is_constant() && mutable_mul_right[i].is_constant()) { + // If constant, just add to the sum + sum_of_constant_products += + uint1024_t(mutable_mul_left[i].get_value()) * uint1024_t(mutable_mul_right[i].get_value()); + } else { + // If not, add to nonconstant sum and remember the elements + new_input_left.push_back(mutable_mul_left[i]); + new_input_right.push_back(mutable_mul_right[i]); + product_sum_constant = false; + } + } + + C* ctx = nullptr; + // Search through all multiplicands on the left + for (auto& el : mutable_mul_left) { + if (el.context) { + ctx = el.context; + break; + } + } + // And on the right + if (!ctx) { + for (auto& el : mutable_mul_right) { + if (el.context) { + ctx = el.context; + break; + } + } + } + if (product_sum_constant) { + if (add_constant) { + // Simply return the constant, no need unsafe_multiply_add + const auto [quotient_1024, remainder_1024] = + (sum_of_constant_products + add_right_constant_sum).divmod(modulus); + ASSERT(!fix_remainder_to_zero || remainder_1024 == 0); + return bigfield(ctx, uint256_t(remainder_1024.lo.lo)); + } else { + const auto [quotient_1024, remainder_1024] = + (sum_of_constant_products + add_right_constant_sum).divmod(modulus); + uint256_t remainder_value = remainder_1024.lo.lo; + bigfield result; + if (remainder_value == uint256_t(0)) { + // No need to add extra term to new_to_add + result = sum(new_to_add); + } else { + // Add the constant term + new_to_add.push_back(bigfield(ctx, uint256_t(remainder_value))); + result = sum(new_to_add); + } + if (fix_remainder_to_zero) { + result.self_reduce(); + result.assert_equal(zero()); + } + return result; + } + } + + // Now that we know that there is at least 1 non-constant multiplication, we can start estimating reductions, etc + + // Compute the constant term we're adding + const auto [_, constant_part_remainder_1024] = (sum_of_constant_products + add_right_constant_sum).divmod(modulus); + const uint256_t constant_part_remainder_256 = constant_part_remainder_1024.lo.lo; + + if (constant_part_remainder_256 != uint256_t(0)) { + new_to_add.push_back(bigfield(ctx, constant_part_remainder_256)); + } + // Compute added sum + uint1024_t add_right_final_sum(0); + uint1024_t add_right_maximum(0); + for (const auto& add_element : new_to_add) { + // Technically not needed, but better to leave just in case + add_element.reduction_check(); + add_right_final_sum += uint1024_t(add_element.get_value()); + add_right_maximum += uint1024_t(add_element.get_maximum_value()); - add_constant = add_constant && (add_element.is_constant()); } + const size_t final_number_of_products = new_input_left.size(); + + // We need to check if it is possible to reduce the products enough + worst_case_product_sum = uint1024_t(final_number_of_products) * uint1024_t(DEFAULT_MAXIMUM_REMAINDER) * + uint1024_t(DEFAULT_MAXIMUM_REMAINDER); - worst_case_product_sum = - uint1024_t(number_of_products) * uint1024_t(DEFAULT_MAXIMUM_REMAINDER) * uint1024_t(DEFAULT_MAXIMUM_REMAINDER); // Check that we can actually reduce the products enough, this assert will probably never get triggered ASSERT((worst_case_product_sum + add_right_maximum) < get_maximum_crt_product()); - perform_reductions_for_mult_madd(mutable_mul_left, mutable_mul_right, to_add); + // We've collapsed all constants, checked if we can compute the sum of products in the worst case, time to check if + // we need to reduce something + perform_reductions_for_mult_madd(new_input_left, new_input_right, new_to_add); + uint1024_t sum_of_products_final(0); + for (size_t i = 0; i < final_number_of_products; i++) { + sum_of_products_final += uint1024_t(new_input_left[i].get_value()) * uint1024_t(new_input_right[i].get_value()); + } // Get the number of range proof bits for the quotient const size_t num_quotient_bits = get_quotient_max_bits({ DEFAULT_MAXIMUM_REMAINDER }); - // TODO: Could probably search through all - C* ctx = mutable_mul_left[0].context ? mutable_mul_left[0].context : mutable_mul_right[0].context; - - // Compute the product sum - // And check if all the multiplied values are constant - uint1024_t product_sum(0); - bool product_sum_constant = true; - for (size_t i = 0; i < number_of_products; i++) { - product_sum += uint1024_t(mutable_mul_left[i].get_value()) * uint1024_t(mutable_mul_right[i].get_value()); - product_sum_constant = - product_sum_constant && mutable_mul_left[i].is_constant() && mutable_mul_right[i].is_constant(); - } - // Compute the quotient and remainder - const auto [quotient_1024, remainder_1024] = (product_sum + add_right).divmod(modulus); + const auto [quotient_1024, remainder_1024] = (sum_of_products_final + add_right_final_sum).divmod(modulus); // If we are establishing an identity and the remainder has to be zero, we need to check, that it actually is @@ -955,31 +1071,23 @@ bigfield bigfield::mult_madd(const std::vector& mul_left, bigfield remainder; bigfield quotient; - // If all was constant, just create a new constant - if (product_sum_constant && add_constant) { - // We don't check fix_remainder_to_zero here because it makes absolutely no sense - remainder = bigfield(ctx, uint256_t(remainder_value.lo)); - return remainder; + // Constrain quotient to mitigate CRT overflow attacks + quotient = bigfield(witness_t(ctx, fr(quotient_value.slice(0, NUM_LIMB_BITS * 2).lo)), + witness_t(ctx, fr(quotient_value.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 4).lo)), + false, + num_quotient_bits); + if (fix_remainder_to_zero) { + remainder = zero(); + // remainder needs to be defined as wire value and not selector values to satisfy + // UltraPlonk's bigfield custom gates + remainder.convert_constant_to_witness(ctx); } else { - // Constrain quotient to mitigate CRT overflow attacks - quotient = bigfield(witness_t(ctx, fr(quotient_value.slice(0, NUM_LIMB_BITS * 2).lo)), - witness_t(ctx, fr(quotient_value.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 4).lo)), - false, - num_quotient_bits); - if (fix_remainder_to_zero) { - remainder = zero(); - // remainder needs to be defined as wire value and not selector values to satisfy - // UltraPlonk's bigfield custom gates - remainder.convert_constant_to_witness(ctx); - } else { - remainder = bigfield( - witness_t(ctx, fr(remainder_value.slice(0, NUM_LIMB_BITS * 2).lo)), - witness_t(ctx, - fr(remainder_value.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3 + NUM_LAST_LIMB_BITS).lo))); - } - }; + remainder = bigfield( + witness_t(ctx, fr(remainder_value.slice(0, NUM_LIMB_BITS * 2).lo)), + witness_t(ctx, fr(remainder_value.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3 + NUM_LAST_LIMB_BITS).lo))); + } - unsafe_evaluate_multiple_multiply_add(mutable_mul_left, mutable_mul_right, to_add, quotient, { remainder }); + unsafe_evaluate_multiple_multiply_add(new_input_left, new_input_right, new_to_add, quotient, { remainder }); return remainder; } @@ -1177,9 +1285,9 @@ bigfield bigfield::conditional_select(const bigfield& other, const b { if (is_constant() && other.is_constant() && predicate.is_constant()) { if (predicate.get_value()) { - return *this; + return other; } - return other; + return *this; } C* ctx = context ? context : (other.context ? other.context : predicate.context); @@ -1251,7 +1359,7 @@ template void bigfield::reduction_check(const siz // create a version with mod 2^t element part in [0,p-1] // After reducing to size 2^s, we check (p-1)-a is non-negative as integer. -// We perform subtraction using carries on blocks of size 2^b. The operations insde the blocks are done mod r +// We perform subtraction using carries on blocks of size 2^b. The operations inside the blocks are done mod r // Including the effect of carries the operation inside each limb is in the range [-2^b-1,2^{b+1}] // Assuming this values are all distinct mod r, which happens e.g. if r/2>2^{b+1}, then if all limb values are // non-negative at the end of subtraction, we know the subtraction result is positive as integers and a

void bigfield::assert_is_in_field() cons bool borrow_0_value = value.slice(0, NUM_LIMB_BITS) > modulus_minus_one_0; bool borrow_1_value = - (value.slice(NUM_LIMB_BITS, NUM_LIMB_BITS * 2) - uint256_t(borrow_0_value)) > modulus_minus_one_1; + (value.slice(NUM_LIMB_BITS, NUM_LIMB_BITS * 2) + uint256_t(borrow_0_value)) > (modulus_minus_one_1); bool borrow_2_value = - (value.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3) - uint256_t(borrow_1_value)) > modulus_minus_one_2; + (value.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3) + uint256_t(borrow_1_value)) > (modulus_minus_one_2); field_t modulus_0(context, modulus_minus_one_0); field_t modulus_1(context, modulus_minus_one_1); @@ -1370,6 +1478,7 @@ template void bigfield::assert_equal(const bigfie // it's non-zero mod r template void bigfield::assert_is_not_equal(const bigfield& other) const { + // Why would we use this for 2 constants? Turns out, in biggroup const auto get_overload_count = [target_modulus = modulus_u512](const uint512_t& maximum_value) { uint512_t target = target_modulus; size_t overload_count = 0; @@ -1457,7 +1566,7 @@ template void bigfield::self_reduce() const } // namespace stdlib /** - * Evaluate a multiply add identity with severall added elements and several remainders + * Evaluate a multiply add identity with several added elements and several remainders * * i.e: * @@ -1483,42 +1592,42 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_left, C* ctx = left.context ? left.context : to_mul.context; - uint256_t max_b0 = (left.binary_basis_limbs[1].maximum_value * to_mul.binary_basis_limbs[0].maximum_value); + uint512_t max_b0 = (left.binary_basis_limbs[1].maximum_value * to_mul.binary_basis_limbs[0].maximum_value); max_b0 += (neg_modulus_limbs_u256[1] * quotient.binary_basis_limbs[0].maximum_value); - uint256_t max_b1 = (left.binary_basis_limbs[0].maximum_value * to_mul.binary_basis_limbs[1].maximum_value); + uint512_t max_b1 = (left.binary_basis_limbs[0].maximum_value * to_mul.binary_basis_limbs[1].maximum_value); max_b1 += (neg_modulus_limbs_u256[0] * quotient.binary_basis_limbs[1].maximum_value); - uint256_t max_c0 = (left.binary_basis_limbs[1].maximum_value * to_mul.binary_basis_limbs[1].maximum_value); + uint512_t max_c0 = (left.binary_basis_limbs[1].maximum_value * to_mul.binary_basis_limbs[1].maximum_value); max_c0 += (neg_modulus_limbs_u256[1] * quotient.binary_basis_limbs[1].maximum_value); - uint256_t max_c1 = (left.binary_basis_limbs[2].maximum_value * to_mul.binary_basis_limbs[0].maximum_value); + uint512_t max_c1 = (left.binary_basis_limbs[2].maximum_value * to_mul.binary_basis_limbs[0].maximum_value); max_c1 += (neg_modulus_limbs_u256[2] * quotient.binary_basis_limbs[0].maximum_value); - uint256_t max_c2 = (left.binary_basis_limbs[0].maximum_value * to_mul.binary_basis_limbs[2].maximum_value); + uint512_t max_c2 = (left.binary_basis_limbs[0].maximum_value * to_mul.binary_basis_limbs[2].maximum_value); max_c2 += (neg_modulus_limbs_u256[0] * quotient.binary_basis_limbs[2].maximum_value); - uint256_t max_d0 = (left.binary_basis_limbs[3].maximum_value * to_mul.binary_basis_limbs[0].maximum_value); + uint512_t max_d0 = (left.binary_basis_limbs[3].maximum_value * to_mul.binary_basis_limbs[0].maximum_value); max_d0 += (neg_modulus_limbs_u256[3] * quotient.binary_basis_limbs[0].maximum_value); - uint256_t max_d1 = (left.binary_basis_limbs[2].maximum_value * to_mul.binary_basis_limbs[1].maximum_value); + uint512_t max_d1 = (left.binary_basis_limbs[2].maximum_value * to_mul.binary_basis_limbs[1].maximum_value); max_d1 += (neg_modulus_limbs_u256[2] * quotient.binary_basis_limbs[1].maximum_value); - uint256_t max_d2 = (left.binary_basis_limbs[1].maximum_value * to_mul.binary_basis_limbs[2].maximum_value); + uint512_t max_d2 = (left.binary_basis_limbs[1].maximum_value * to_mul.binary_basis_limbs[2].maximum_value); max_d2 += (neg_modulus_limbs_u256[1] * quotient.binary_basis_limbs[2].maximum_value); - uint256_t max_d3 = (left.binary_basis_limbs[0].maximum_value * to_mul.binary_basis_limbs[3].maximum_value); + uint512_t max_d3 = (left.binary_basis_limbs[0].maximum_value * to_mul.binary_basis_limbs[3].maximum_value); max_d3 += (neg_modulus_limbs_u256[0] * quotient.binary_basis_limbs[3].maximum_value); - uint256_t max_r0 = left.binary_basis_limbs[0].maximum_value * to_mul.binary_basis_limbs[0].maximum_value; + uint512_t max_r0 = left.binary_basis_limbs[0].maximum_value * to_mul.binary_basis_limbs[0].maximum_value; max_r0 += (neg_modulus_limbs_u256[0] * quotient.binary_basis_limbs[0].maximum_value); - const uint256_t max_r1 = max_b0 + max_b1; - const uint256_t max_r2 = max_c0 + max_c1 + max_c2; - const uint256_t max_r3 = max_d0 + max_d1 + max_d2 + max_d3; + const uint512_t max_r1 = max_b0 + max_b1; + const uint512_t max_r2 = max_c0 + max_c1 + max_c2; + const uint512_t max_r3 = max_d0 + max_d1 + max_d2 + max_d3; - uint256_t max_a0(0); - uint256_t max_a1(0); + uint512_t max_a0(0); + uint512_t max_a1(0); for (size_t i = 0; i < to_add.size(); ++i) { max_a0 += to_add[i].binary_basis_limbs[0].maximum_value + (to_add[i].binary_basis_limbs[1].maximum_value << NUM_LIMB_BITS); max_a1 += to_add[i].binary_basis_limbs[2].maximum_value + (to_add[i].binary_basis_limbs[3].maximum_value << NUM_LIMB_BITS); } - const uint256_t max_lo = max_r0 + (max_r1 << NUM_LIMB_BITS) + max_a0; - const uint256_t max_hi = max_r2 + (max_r3 << NUM_LIMB_BITS) + max_a1; + const uint512_t max_lo = max_r0 + (max_r1 << NUM_LIMB_BITS) + max_a0; + const uint512_t max_hi = max_r2 + (max_r3 << NUM_LIMB_BITS) + max_a1; uint64_t max_lo_bits = (max_lo.get_msb() + 1); uint64_t max_hi_bits = max_hi.get_msb() + 1; @@ -1625,13 +1734,20 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_left, ctx->decompose_into_default_range(carry_hi.witness_index, static_cast(carry_hi_msb)); } else { - field_t carry_combined = carry_lo + (carry_hi * carry_lo_shift); - carry_combined = carry_combined.normalize(); - const auto accumulators = ctx->decompose_into_base4_accumulators( - carry_combined.witness_index, static_cast(carry_lo_msb + carry_hi_msb)); - field_t accumulator_midpoint = - field_t::from_witness_index(ctx, accumulators[static_cast((carry_hi_msb / 2) - 1)]); - carry_hi.assert_equal(accumulator_midpoint, "bigfield multiply range check failed"); + if ((carry_hi_msb + carry_lo_msb) < field_t::modulus.get_msb()) { + field_t carry_combined = carry_lo + (carry_hi * carry_lo_shift); + carry_combined = carry_combined.normalize(); + const auto accumulators = ctx->decompose_into_base4_accumulators( + carry_combined.witness_index, static_cast(carry_lo_msb + carry_hi_msb)); + field_t accumulator_midpoint = + field_t::from_witness_index(ctx, accumulators[static_cast((carry_hi_msb / 2) - 1)]); + carry_hi.assert_equal(accumulator_midpoint, "bigfield multiply range check failed"); + } else { + carry_lo = carry_lo.normalize(); + carry_hi = carry_hi.normalize(); + ctx->decompose_into_base4_accumulators(carry_lo.witness_index, static_cast(carry_lo_msb)); + ctx->decompose_into_base4_accumulators(carry_hi.witness_index, static_cast(carry_hi_msb)); + } } } @@ -1643,7 +1759,7 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_left, * (left_0 * right_0) + ... + (left_n-1 * right_n-1) + ...to_add - (input_quotient * q + ...input_remainders) = 0 * * This method supports multiple "remainders" because, when evaluating divisions, some of these remainders are terms - *we're subtracting from our product (see msub_div for more details) + * We're subtracting from our product (see msub_div for more details) * * The above quadratic relation can be evaluated using only a single quotient/remainder term. * @@ -1674,23 +1790,23 @@ void bigfield::unsafe_evaluate_multiple_multiply_add(const std::vector(max_lo_temp, max_hi_temp); + uint512_t max_b0_inner = (left.binary_basis_limbs[1].maximum_value * right.binary_basis_limbs[0].maximum_value); + uint512_t max_b1_inner = (left.binary_basis_limbs[0].maximum_value * right.binary_basis_limbs[1].maximum_value); + uint512_t max_c0_inner = (left.binary_basis_limbs[1].maximum_value * right.binary_basis_limbs[1].maximum_value); + uint512_t max_c1_inner = (left.binary_basis_limbs[2].maximum_value * right.binary_basis_limbs[0].maximum_value); + uint512_t max_c2_inner = (left.binary_basis_limbs[0].maximum_value * right.binary_basis_limbs[2].maximum_value); + uint512_t max_d0_inner = (left.binary_basis_limbs[3].maximum_value * right.binary_basis_limbs[0].maximum_value); + uint512_t max_d1_inner = (left.binary_basis_limbs[2].maximum_value * right.binary_basis_limbs[1].maximum_value); + uint512_t max_d2_inner = (left.binary_basis_limbs[1].maximum_value * right.binary_basis_limbs[2].maximum_value); + uint512_t max_d3_inner = (left.binary_basis_limbs[0].maximum_value * right.binary_basis_limbs[3].maximum_value); + uint512_t max_r0_inner = left.binary_basis_limbs[0].maximum_value * right.binary_basis_limbs[0].maximum_value; + + const uint512_t max_r1_inner = max_b0_inner + max_b1_inner; + const uint512_t max_r2_inner = max_c0_inner + max_c1_inner + max_c2_inner; + const uint512_t max_r3_inner = max_d0_inner + max_d1_inner + max_d2_inner + max_d3_inner; + const uint512_t max_lo_temp = max_r0_inner + (max_r1_inner << NUM_LIMB_BITS); + const uint512_t max_hi_temp = max_r2_inner + (max_r3_inner << NUM_LIMB_BITS); + return std::pair(max_lo_temp, max_hi_temp); }; /** @@ -1700,37 +1816,37 @@ void bigfield::unsafe_evaluate_multiple_multiply_add(const std::vector::unsafe_evaluate_multiple_multiply_add(const std::vectordecompose_into_default_range(carry_hi.witness_index, static_cast(carry_hi_msb)); } else { - field_t carry_combined = carry_lo + (carry_hi * carry_lo_shift); - carry_combined = carry_combined.normalize(); - const auto accumulators = ctx->decompose_into_base4_accumulators( - carry_combined.witness_index, static_cast(carry_lo_msb + carry_hi_msb)); - field_t accumulator_midpoint = - field_t::from_witness_index(ctx, accumulators[static_cast((carry_hi_msb / 2) - 1)]); - carry_hi.assert_equal(accumulator_midpoint, "bigfield multiply range check failed"); + if ((carry_hi_msb + carry_lo_msb) < field_t::modulus.get_msb()) { + field_t carry_combined = carry_lo + (carry_hi * carry_lo_shift); + carry_combined = carry_combined.normalize(); + const auto accumulators = ctx->decompose_into_base4_accumulators( + carry_combined.witness_index, static_cast(carry_lo_msb + carry_hi_msb)); + field_t accumulator_midpoint = + field_t::from_witness_index(ctx, accumulators[static_cast((carry_hi_msb / 2) - 1)]); + carry_hi.assert_equal(accumulator_midpoint, "bigfield multiply range check failed"); + } else { + carry_lo = carry_lo.normalize(); + carry_hi = carry_hi.normalize(); + ctx->decompose_into_base4_accumulators(carry_lo.witness_index, static_cast(carry_lo_msb)); + ctx->decompose_into_base4_accumulators(carry_hi.witness_index, static_cast(carry_hi_msb)); + } } } @@ -1957,38 +2080,38 @@ void bigfield::unsafe_evaluate_square_add(const bigfield& left, } C* ctx = left.context == nullptr ? quotient.context : left.context; - uint256_t max_b0 = (left.binary_basis_limbs[1].maximum_value * left.binary_basis_limbs[0].maximum_value); + uint512_t max_b0 = (left.binary_basis_limbs[1].maximum_value * left.binary_basis_limbs[0].maximum_value); max_b0 += (neg_modulus_limbs_u256[1] << NUM_LIMB_BITS); max_b0 += max_b0; - uint256_t max_c0 = (left.binary_basis_limbs[1].maximum_value * left.binary_basis_limbs[1].maximum_value); + uint512_t max_c0 = (left.binary_basis_limbs[1].maximum_value * left.binary_basis_limbs[1].maximum_value); max_c0 += (neg_modulus_limbs_u256[1] << NUM_LIMB_BITS); - uint256_t max_c1 = (left.binary_basis_limbs[2].maximum_value * left.binary_basis_limbs[0].maximum_value); + uint512_t max_c1 = (left.binary_basis_limbs[2].maximum_value * left.binary_basis_limbs[0].maximum_value); max_c1 += (neg_modulus_limbs_u256[2] << NUM_LIMB_BITS); max_c1 += max_c1; - uint256_t max_d0 = (left.binary_basis_limbs[3].maximum_value * left.binary_basis_limbs[0].maximum_value); + uint512_t max_d0 = (left.binary_basis_limbs[3].maximum_value * left.binary_basis_limbs[0].maximum_value); max_d0 += (neg_modulus_limbs_u256[3] << NUM_LIMB_BITS); max_d0 += max_d0; - uint256_t max_d1 = (left.binary_basis_limbs[2].maximum_value * left.binary_basis_limbs[1].maximum_value); + uint512_t max_d1 = (left.binary_basis_limbs[2].maximum_value * left.binary_basis_limbs[1].maximum_value); max_d1 += (neg_modulus_limbs_u256[2] << NUM_LIMB_BITS); max_d1 += max_d1; - uint256_t max_r0 = left.binary_basis_limbs[0].maximum_value * left.binary_basis_limbs[0].maximum_value; + uint512_t max_r0 = left.binary_basis_limbs[0].maximum_value * left.binary_basis_limbs[0].maximum_value; max_r0 += (neg_modulus_limbs_u256[0] << NUM_LIMB_BITS); - const uint256_t max_r1 = max_b0; - const uint256_t max_r2 = max_c0 + max_c1; - const uint256_t max_r3 = max_d0 + max_d1; + const uint512_t max_r1 = max_b0; + const uint512_t max_r2 = max_c0 + max_c1; + const uint512_t max_r3 = max_d0 + max_d1; - uint256_t max_a0(0); - uint256_t max_a1(1); + uint512_t max_a0(0); + uint512_t max_a1(1); for (size_t i = 0; i < to_add.size(); ++i) { max_a0 += to_add[i].binary_basis_limbs[0].maximum_value + (to_add[i].binary_basis_limbs[1].maximum_value << NUM_LIMB_BITS); max_a1 += to_add[i].binary_basis_limbs[2].maximum_value + (to_add[i].binary_basis_limbs[3].maximum_value << NUM_LIMB_BITS); } - const uint256_t max_lo = max_r0 + (max_r1 << NUM_LIMB_BITS) + max_a0; - const uint256_t max_hi = max_r2 + (max_r3 << NUM_LIMB_BITS) + max_a1; + const uint512_t max_lo = max_r0 + (max_r1 << NUM_LIMB_BITS) + max_a0; + const uint512_t max_hi = max_r2 + (max_r3 << NUM_LIMB_BITS) + max_a1; uint64_t max_lo_bits = max_lo.get_msb() + 1; uint64_t max_hi_bits = max_hi.get_msb() + 1; @@ -2058,14 +2181,13 @@ void bigfield::unsafe_evaluate_square_add(const bigfield& left, barretenberg::fr neg_prime = -barretenberg::fr(uint256_t(target_basis.modulus)); field_t linear_terms = -remainder.prime_basis_limb; if (to_add.size() >= 2) { - for (size_t i = 0; i < to_add.size(); i += 2) { - linear_terms = linear_terms.add_two(to_add[i].prime_basis_limb, to_add[i + 1].prime_basis_limb); + for (size_t i = 0; i < to_add.size() / 2; i += 1) { + linear_terms = linear_terms.add_two(to_add[2 * i].prime_basis_limb, to_add[2 * i + 1].prime_basis_limb); } } if ((to_add.size() & 1UL) == 1UL) { linear_terms += to_add[to_add.size() - 1].prime_basis_limb; } - field_t::evaluate_polynomial_identity( left.prime_basis_limb, left.prime_basis_limb, quotient.prime_basis_limb * neg_prime, linear_terms); @@ -2080,13 +2202,20 @@ void bigfield::unsafe_evaluate_square_add(const bigfield& left, ctx->decompose_into_default_range(carry_hi.witness_index, static_cast(carry_hi_msb)); } else { - field_t carry_combined = carry_lo + (carry_hi * carry_lo_shift); - carry_combined = carry_combined.normalize(); - const auto accumulators = ctx->decompose_into_base4_accumulators( - carry_combined.witness_index, static_cast(carry_lo_msb + carry_hi_msb)); - field_t accumulator_midpoint = - field_t::from_witness_index(ctx, accumulators[static_cast((carry_hi_msb / 2) - 1)]); - carry_hi.assert_equal(accumulator_midpoint, "bigfield multiply range check failed"); + if ((carry_hi_msb + carry_lo_msb) < field_t::modulus.get_msb()) { + field_t carry_combined = carry_lo + (carry_hi * carry_lo_shift); + carry_combined = carry_combined.normalize(); + const auto accumulators = ctx->decompose_into_base4_accumulators( + carry_combined.witness_index, static_cast(carry_lo_msb + carry_hi_msb)); + field_t accumulator_midpoint = + field_t::from_witness_index(ctx, accumulators[static_cast((carry_hi_msb / 2) - 1)]); + carry_hi.assert_equal(accumulator_midpoint, "bigfield multiply range check failed"); + } else { + carry_lo = carry_lo.normalize(); + carry_hi = carry_hi.normalize(); + ctx->decompose_into_base4_accumulators(carry_lo.witness_index, static_cast(carry_lo_msb)); + ctx->decompose_into_base4_accumulators(carry_hi.witness_index, static_cast(carry_hi_msb)); + } } } @@ -2143,10 +2272,14 @@ std::pair bigfield::get_quotient_reduction_info(const std::v if (mul_product_overflows_crt_modulus(as_max, bs_max, to_add)) { return std::pair(true, 0); } - const size_t num_quotient_bits = get_quotient_max_bits(remainders_max); + std::vector to_add_max; + for (auto& added_element : to_add) { + to_add_max.push_back(added_element.get_maximum_value()); + } // Get maximum value of quotient - const uint512_t maximum_quotient = compute_maximum_quotient_value(as_max, bs_max, {}); + const uint512_t maximum_quotient = compute_maximum_quotient_value(as_max, bs_max, to_add_max); + // Check if the quotient can fit into the range proof if (maximum_quotient >= (uint512_t(1) << num_quotient_bits)) { return std::pair(true, 0); From 7729e71947e87a36d5fe5738cce9e33920c032da Mon Sep 17 00:00:00 2001 From: Innokentii Sennovskii Date: Tue, 22 Nov 2022 10:46:57 +0000 Subject: [PATCH 3/3] Tracking barrentenberg benchmarks in an archive (#1762) * Storing benchmarking * Temporarily disable branch filtering to test * Restore filters * Removed file that was not needed * Added ifdef CI to the benchmark info functions to stop log pollution * Fix * Addressed charlie's comments and removed branch filtering to test * Added a human-readable field * Add filtering for defi-bridge-project back --- src/aztec/common/log.hpp | 92 ++++++++++++++++++- .../rollup/proofs/compute_circuit_data.hpp | 37 ++++++++ .../proofs/rollup/compute_circuit_data.hpp | 15 ++- .../root_rollup/compute_circuit_data.cpp | 15 ++- .../root_verifier/compute_circuit_data.hpp | 14 ++- .../primitives/bigfield/bigfield.test.cpp | 16 ++++ .../primitives/biggroup/biggroup.test.cpp | 12 ++- 7 files changed, 194 insertions(+), 7 deletions(-) diff --git a/src/aztec/common/log.hpp b/src/aztec/common/log.hpp index 2e3263d4e4f..96a58c3c4a2 100644 --- a/src/aztec/common/log.hpp +++ b/src/aztec/common/log.hpp @@ -1,8 +1,15 @@ #pragma once #include #include +#include +#include +#include +#define BENCHMARK_INFO_PREFIX "##BENCHMARK_INFO_PREFIX##" +#define BENCHMARK_INFO_SEPARATOR "#" +#define BENCHMARK_INFO_SUFFIX "##BENCHMARK_INFO_SUFFIX##" namespace { + inline void format_chain(std::ostream&) {} template void format_chain(std::ostream& os, T const& first) @@ -22,6 +29,36 @@ template std::string format(Args... args) format_chain(os, args...); return os.str(); } + +template void benchmark_format_chain(std::ostream& os, T const& first) +{ + // We will be saving these values to a CSV file, so we can't tolerate commas + std::stringstream current_argument; + current_argument << first; + std::string current_argument_string = current_argument.str(); + std::replace(current_argument_string.begin(), current_argument_string.end(), ',', ';'); + os << current_argument_string << BENCHMARK_INFO_SUFFIX; +} + +template +void benchmark_format_chain(std::ostream& os, T const& first, Args const&... args) +{ + // We will be saving these values to a CSV file, so we can't tolerate commas + std::stringstream current_argument; + current_argument << first; + std::string current_argument_string = current_argument.str(); + std::replace(current_argument_string.begin(), current_argument_string.end(), ',', ';'); + os << current_argument_string << BENCHMARK_INFO_SEPARATOR; + benchmark_format_chain(os, args...); +} + +template std::string benchmark_format(Args... args) +{ + std::ostringstream os; + os << BENCHMARK_INFO_PREFIX; + benchmark_format_chain(os, args...); + return os.str(); +} } // namespace #if NDEBUG @@ -36,4 +73,57 @@ template inline void debug(Args...) {} template inline void info(Args... args) { logstr(format(args...).c_str()); -} \ No newline at end of file +} + +/** + * @brief Info used to store circuit statistics during CI/CD with concrete structure. Writes straight to log + * + * @details Automatically appends the necessary prefix and suffix, as well as separators. + * + * @tparam Args + * @param args + */ +#ifdef CI +template +inline void benchmark_info(Arg1 composer, Arg2 class_name, Arg3 operation, Arg4 metric, Arg5 value) +{ + logstr(benchmark_format(composer, class_name, operation, metric, value).c_str()); +} +#else +template inline void benchmark_info(Args...) {} +#endif + +/** + * @brief A class for saving benchmarks and printing them all at once in the end of the function. + * + */ +class BenchmarkInfoCollator { + + std::vector saved_benchmarks; + + public: +/** + * @brief Info used to store circuit statistics during CI/CD with concrete structure. Stores string in vector for now + * (used to flush all benchmarks at the end of test). + * + * @details Automatically appends the necessary prefix and suffix, as well as separators. + * + * @tparam Args + * @param args + */ +#ifdef CI + template + inline void benchmark_info_deferred(Arg1 composer, Arg2 class_name, Arg3 operation, Arg4 metric, Arg5 value) + { + saved_benchmarks.push_back(benchmark_format(composer, class_name, operation, metric, value).c_str()); + } +#else + template inline void benchmark_info_deferred(Args...) {} +#endif + ~BenchmarkInfoCollator() + { + for (auto& x : saved_benchmarks) { + logstr(x.c_str()); + } + } +}; \ No newline at end of file diff --git a/src/aztec/rollup/proofs/compute_circuit_data.hpp b/src/aztec/rollup/proofs/compute_circuit_data.hpp index 0b963a5d6a8..882b6b35ead 100644 --- a/src/aztec/rollup/proofs/compute_circuit_data.hpp +++ b/src/aztec/rollup/proofs/compute_circuit_data.hpp @@ -8,6 +8,11 @@ #include #include +#define GET_COMPOSER_NAME_STRING(composer) \ + (typeid(composer) == typeid(waffle::StandardComposer) \ + ? "StandardPlonk" \ + : typeid(composer) == typeid(waffle::TurboComposer) ? "TurboPlonk" : "NULLPlonk") + namespace rollup { namespace proofs { @@ -51,6 +56,7 @@ circuit_data get_circuit_data(std::string const& name, data.mock = mock; ComposerType composer(srs); ComposerType mock_proof_composer(srs); + BenchmarkInfoCollator benchmark_collator; auto circuit_key_path = key_path + "/" + path_name; auto pk_path = circuit_key_path + "/proving_key/proving_key"; @@ -64,12 +70,19 @@ circuit_data get_circuit_data(std::string const& name, info(name, ": Building circuit..."); Timer timer; build_circuit(composer); + + benchmark_collator.benchmark_info_deferred( + GET_COMPOSER_NAME_STRING(ComposerType), "Core", name, "Build time", timer.toString()); + benchmark_collator.benchmark_info_deferred( + GET_COMPOSER_NAME_STRING(ComposerType), "Core", name, "Gates", composer.get_num_gates()); info(name, ": Circuit built in: ", timer.toString(), "s"); info(name, ": Circuit size: ", composer.get_num_gates()); if (mock) { auto public_inputs = composer.get_public_inputs(); mock::mock_circuit(mock_proof_composer, public_inputs); info(name, ": Mock circuit size: ", mock_proof_composer.get_num_gates()); + benchmark_collator.benchmark_info_deferred( + GET_COMPOSER_NAME_STRING(ComposerType), "Core", name, "Mock Gates", composer.get_num_gates()); } } @@ -90,6 +103,8 @@ circuit_data get_circuit_data(std::string const& name, std::make_shared(std::move(pk_data), srs->get_prover_crs(pk_data.n + 1)); data.num_gates = pk_data.n; info(name, ": Circuit size 2^n: ", data.num_gates); + benchmark_collator.benchmark_info_deferred( + GET_COMPOSER_NAME_STRING(ComposerType), "Core", name, "Gates 2^n", data.num_gates); } else if (compute) { Timer timer; info(name, ": Computing proving key..."); @@ -98,14 +113,21 @@ circuit_data get_circuit_data(std::string const& name, data.num_gates = composer.get_num_gates(); data.proving_key = composer.compute_proving_key(); info(name, ": Circuit size 2^n: ", data.proving_key->n); + + benchmark_collator.benchmark_info_deferred( + GET_COMPOSER_NAME_STRING(ComposerType), "Core", name, "Gates 2^n", data.proving_key->n); } else { data.num_gates = mock_proof_composer.get_num_gates(); data.proving_key = mock_proof_composer.compute_proving_key(); info(name, ": Mock circuit size 2^n: ", data.proving_key->n); + benchmark_collator.benchmark_info_deferred( + GET_COMPOSER_NAME_STRING(ComposerType), "Core", name, "Mock Gates 2^n", data.proving_key->n); } info(name, ": Proving key computed in ", timer.toString(), "s"); + benchmark_collator.benchmark_info_deferred( + GET_COMPOSER_NAME_STRING(ComposerType), "Core", name, "Proving key computed in", timer.toString()); if (save) { info(name, ": Saving proving key..."); std::filesystem::create_directories(pk_dir.c_str()); @@ -129,6 +151,11 @@ circuit_data get_circuit_data(std::string const& name, data.verification_key = std::make_shared(std::move(vk_data), data.srs->get_verifier_crs()); info(name, ": Verification key hash: ", data.verification_key->sha256_hash()); + benchmark_collator.benchmark_info_deferred(GET_COMPOSER_NAME_STRING(ComposerType), + "Core", + name, + "Verification key hash", + data.verification_key->sha256_hash()); } else if (compute) { info(name, ": Computing verification key..."); Timer timer; @@ -139,7 +166,15 @@ circuit_data get_circuit_data(std::string const& name, data.verification_key = mock_proof_composer.compute_verification_key(); } info(name, ": Computed verification key in ", timer.toString(), "s"); + + benchmark_collator.benchmark_info_deferred( + GET_COMPOSER_NAME_STRING(ComposerType), "Core", name, "Verification key computed in", timer.toString()); info(name, ": Verification key hash: ", data.verification_key->sha256_hash()); + benchmark_collator.benchmark_info_deferred(GET_COMPOSER_NAME_STRING(ComposerType), + "Core", + name, + "Verification key hash", + data.verification_key->sha256_hash()); if (save) { std::ofstream os(vk_path); @@ -184,6 +219,8 @@ circuit_data get_circuit_data(std::string const& name, info(name, ": Padding verified: ", verifier.verify_proof(proof)); } info(name, ": Padding proof computed in ", timer.toString(), "s"); + benchmark_collator.benchmark_info_deferred( + GET_COMPOSER_NAME_STRING(ComposerType), "Core", name, "Padding proof computed in", timer.toString()); if (save) { std::ofstream os(padding_path); diff --git a/src/aztec/rollup/proofs/rollup/compute_circuit_data.hpp b/src/aztec/rollup/proofs/rollup/compute_circuit_data.hpp index 985dcd32568..ea72e9f3d2a 100644 --- a/src/aztec/rollup/proofs/rollup/compute_circuit_data.hpp +++ b/src/aztec/rollup/proofs/rollup/compute_circuit_data.hpp @@ -50,8 +50,19 @@ inline circuit_data get_circuit_data(size_t rollup_size, rollup_circuit(composer, rollup, verification_keys, rollup_size); }; - auto cd = proofs::get_circuit_data( - "tx rollup", name, srs, key_path, compute, save, load, pk, vk, true, mock, build_circuit); + auto cd = proofs::get_circuit_data("tx rollup " + std::to_string(rollup_size) + "x" + + std::to_string(rollup_size_pow2), + name, + srs, + key_path, + compute, + save, + load, + pk, + vk, + true, + mock, + build_circuit); circuit_data data; data.num_gates = cd.num_gates; diff --git a/src/aztec/rollup/proofs/root_rollup/compute_circuit_data.cpp b/src/aztec/rollup/proofs/root_rollup/compute_circuit_data.cpp index 7764ce57678..becfe588272 100644 --- a/src/aztec/rollup/proofs/root_rollup/compute_circuit_data.cpp +++ b/src/aztec/rollup/proofs/root_rollup/compute_circuit_data.cpp @@ -55,8 +55,19 @@ circuit_data get_circuit_data(size_t num_inner_rollups, rollup_circuit_data.verification_key); }; - auto cd = proofs::get_circuit_data( - "root rollup", name, srs, key_path, compute, save, load, pk, vk, true, mock, build_circuit); + auto cd = + proofs::get_circuit_data(format("root rollup ", rollup_circuit_data.num_txs, "x", num_inner_rollups), + name, + srs, + key_path, + compute, + save, + load, + pk, + vk, + true, + mock, + build_circuit); circuit_data data; data.num_gates = cd.num_gates; diff --git a/src/aztec/rollup/proofs/root_verifier/compute_circuit_data.hpp b/src/aztec/rollup/proofs/root_verifier/compute_circuit_data.hpp index aed4f1ae564..ac330f6f9ca 100644 --- a/src/aztec/rollup/proofs/root_verifier/compute_circuit_data.hpp +++ b/src/aztec/rollup/proofs/root_verifier/compute_circuit_data.hpp @@ -33,7 +33,19 @@ inline circuit_data get_circuit_data(root_rollup::circuit_data const& root_rollu }; auto cd = proofs::get_circuit_data( - "root verifier", name, srs, key_path, compute, save, load, pk, vk, false, mock, build_verifier_circuit); + + format("root verifier ", root_rollup_circuit_data.inner_rollup_circuit_data.rollup_size, "x", valid_vks.size()), + name, + srs, + key_path, + compute, + save, + load, + pk, + vk, + false, + mock, + build_verifier_circuit); circuit_data data; data.num_gates = cd.num_gates; diff --git a/src/aztec/stdlib/primitives/bigfield/bigfield.test.cpp b/src/aztec/stdlib/primitives/bigfield/bigfield.test.cpp index 95484f9ed4d..34771757914 100644 --- a/src/aztec/stdlib/primitives/bigfield/bigfield.test.cpp +++ b/src/aztec/stdlib/primitives/bigfield/bigfield.test.cpp @@ -17,6 +17,10 @@ #include #include +#define GET_COMPOSER_NAME_STRING(composer) \ + (typeid(composer) == typeid(waffle::StandardComposer) \ + ? "StandardPlonk" \ + : typeid(composer) == typeid(waffle::TurboComposer) ? "TurboPlonk" : "NULLPlonk") namespace test_stdlib_bigfield { using namespace barretenberg; using namespace plonk; @@ -34,6 +38,7 @@ auto& engine = numeric::random::get_debug_engine(); } template class stdlib_bigfield : public testing::Test { + typedef stdlib::bn254 bn254; typedef typename bn254::fr_ct fr_ct; @@ -84,6 +89,7 @@ template class stdlib_bigfield : public testing::Test { uint64_t after = composer.get_num_gates(); if (i == num_repetitions - 1) { std::cout << "num gates per mul = " << after - before << std::endl; + benchmark_info(GET_COMPOSER_NAME_STRING(Composer), "Bigfield", "MUL", "Gate Count", after - before); } // uint256_t modulus{ Bn254FqParams::modulus_0, // Bn254FqParams::modulus_1, @@ -124,6 +130,7 @@ template class stdlib_bigfield : public testing::Test { uint64_t after = composer.get_num_gates(); if (i == num_repetitions - 1) { std::cout << "num gates per mul = " << after - before << std::endl; + benchmark_info(GET_COMPOSER_NAME_STRING(Composer), "Bigfield", "SQR", "Gate Count", after - before); } // uint256_t modulus{ Bn254FqParams::modulus_0, // Bn254FqParams::modulus_1, @@ -172,6 +179,7 @@ template class stdlib_bigfield : public testing::Test { uint64_t after = composer.get_num_gates(); if (i == num_repetitions - 1) { std::cout << "num gates per mul = " << after - before << std::endl; + benchmark_info(GET_COMPOSER_NAME_STRING(Composer), "Bigfield", "MADD", "Gate Count", after - before); } // uint256_t modulus{ Bn254FqParams::modulus_0, // Bn254FqParams::modulus_1, @@ -234,6 +242,8 @@ template class stdlib_bigfield : public testing::Test { uint64_t after = composer.get_num_gates(); if (i == num_repetitions - 1) { std::cout << "num gates with mult_madd = " << after - before << std::endl; + benchmark_info( + GET_COMPOSER_NAME_STRING(Composer), "Bigfield", "MULT_MADD", "Gate Count", after - before); } /** before = composer.get_num_gates(); @@ -337,7 +347,13 @@ template class stdlib_bigfield : public testing::Test { fq_ct b(witness_ct(&composer, fr(uint256_t(inputs[1]).slice(0, fq_ct::NUM_LIMB_BITS * 2))), witness_ct(&composer, fr(uint256_t(inputs[1]).slice(fq_ct::NUM_LIMB_BITS * 2, fq_ct::NUM_LIMB_BITS * 4)))); + uint64_t before = composer.get_num_gates(); fq_ct c = a / b; + uint64_t after = composer.get_num_gates(); + if (i == num_repetitions - 1) { + std::cout << "num gates per div = " << after - before << std::endl; + benchmark_info(GET_COMPOSER_NAME_STRING(Composer), "Bigfield", "DIV", "Gate Count", after - before); + } // uint256_t modulus{ Bn254FqParams::modulus_0, // Bn254FqParams::modulus_1, // Bn254FqParams::modulus_2, diff --git a/src/aztec/stdlib/primitives/biggroup/biggroup.test.cpp b/src/aztec/stdlib/primitives/biggroup/biggroup.test.cpp index d44e51cd3de..f1d65966164 100644 --- a/src/aztec/stdlib/primitives/biggroup/biggroup.test.cpp +++ b/src/aztec/stdlib/primitives/biggroup/biggroup.test.cpp @@ -20,6 +20,11 @@ using namespace barretenberg; using namespace plonk; +#define GET_COMPOSER_NAME_STRING(composer) \ + (typeid(composer) == typeid(waffle::StandardComposer) \ + ? "StandardPlonk" \ + : typeid(composer) == typeid(waffle::TurboComposer) ? "TurboPlonk" : "NULLPlonk") + // SEE BOTTOM FOR REMNANTS OF TESTS FOR PLOOKUP AND NOTE ON UPDATING THOSE template class stdlib_biggroup : public testing::Test { @@ -93,8 +98,13 @@ template class stdlib_biggroup : public testing::Test { g1_bigfr_ct a = convert_inputs_bigfr(&composer, input_a); g1_bigfr_ct b = convert_inputs_bigfr(&composer, input_b); - + uint64_t before = composer.get_num_gates(); g1_bigfr_ct c = a + b; + uint64_t after = composer.get_num_gates(); + if (i == num_repetitions - 1) { + std::cout << "num gates per add = " << after - before << std::endl; + benchmark_info(GET_COMPOSER_NAME_STRING(Composer), "Biggroup", "ADD", "Gate Count", after - before); + } g1::affine_element c_expected(g1::element(input_a) + g1::element(input_b));