diff --git a/.gitmodules b/.gitmodules index 64ffb023e533..24ef9571bd48 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,18 +1,6 @@ -[submodule "legacy-barretenberg-build-system"] - path = circuits/cpp/barretenberg/build-system - url = https://github.com/AztecProtocol/build-system [submodule "l1-contracts/lib/openzeppelin-contracts"] path = l1-contracts/lib/openzeppelin-contracts url = https://github.com/openzeppelin/openzeppelin-contracts [submodule "l1-contracts/lib/forge-std"] path = l1-contracts/lib/forge-std url = https://github.com/foundry-rs/forge-std -[submodule "circuits/cpp/barretenberg/sol/lib/forge-std"] - path = circuits/cpp/barretenberg/sol/lib/forge-std - url = https://github.com/foundry-rs/forge-std -[submodule "circuits/cpp/barretenberg/sol/lib/solidity-stringutils"] - path = circuits/cpp/barretenberg/sol/lib/solidity-stringutils - url = https://github.com/Arachnid/solidity-stringutils -[submodule "circuits/cpp/barretenberg/sol/lib/openzeppelin-contracts"] - path = circuits/cpp/barretenberg/sol/lib/openzeppelin-contracts - url = https://github.com/OpenZeppelin/openzeppelin-contracts diff --git a/circuits/cpp/barretenberg/.github/workflows/release-please.yml b/circuits/cpp/barretenberg/.github/workflows/release-please.yml index 79741dff70e6..f31e44c56ca4 100644 --- a/circuits/cpp/barretenberg/.github/workflows/release-please.yml +++ b/circuits/cpp/barretenberg/.github/workflows/release-please.yml @@ -28,86 +28,67 @@ jobs: needs: [release-please] if: ${{ needs.release-please.outputs.tag-name }} steps: - - name: Checkout Code - uses: actions/checkout@v3 - - - name: Install bleeding edge cmake - run: | - sudo apt -y remove --purge cmake - sudo snap install cmake --classic - - - name: Create Build Environment - run: | - sudo apt-get update - sudo apt-get -y install clang ninja-build yarn - - name: Install yarn - run: | - curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - - echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list - sudo apt -y update && sudo apt -y install yarn - - name: Compile Barretenberg - run: | + - name: Checkout Code + uses: actions/checkout@v2 + + - name: Install bleeding edge cmake + run: | + sudo apt -y remove --purge cmake + sudo snap install cmake --classic + + - name: Create Build Environment + run: | + sudo apt-get update + sudo apt-get -y install clang ninja-build yarn + - name: Install yarn + run: | + curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - + echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list + sudo apt -y update && sudo apt -y install yarn + - name: Compile Barretenberg + run: | + cd cpp + cmake --preset default -DCMAKE_BUILD_TYPE=RelWithAssert + cmake --build --preset default --target bb + + - name: Install WASI-SDK + run: | cd cpp - - cmake --preset default -DCMAKE_BUILD_TYPE=RelWithAssert - cmake --build --preset default --target bb - - - name: Install WASI-SDK - run: | - cd cpp - + ./scripts/install-wasi-sdk.sh - - - name: Compile Typescript - run: | - cd ts - yarn install && yarn && yarn build - - - name: Checkout destination repository - uses: actions/checkout@v3 - with: - repository: AztecProtocol/dev-bb.js - path: ./dev-bb.js - token: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} - - - name: Push to destination repository - run: | - cd ./dev-bb.js - cp -R ../ts/dest/* . - git config --global user.name AztecBot - git config --global user.email tech@aztecprotocol.com - git checkout -b dev || git checkout dev - git add . - git commit -m "Tracking changes" - git push origin dev - - - name: Tar and GZip barretenberg.wasm - run: tar -cvzf barretenberg.wasm.tar.gz cpp/build-wasm/bin/barretenberg.wasm - - - name: Setup Node.js - uses: actions/setup-node@v2 - with: - node-version: "18" - registry-url: "https://registry.npmjs.org" - - - name: Deploy Typescript to NPM - run: | - cd ts - yarn deploy - env: - NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}} - - - name: Tar and GZip bb Binary (Ubuntu) - run: tar -cvzf bb-ubuntu.tar.gz cpp/build/bin/bb - - - name: Upload artifacts - uses: actions/upload-artifact@v2 - with: + + - name: Compile Typescript + run: | + cd ts + yarn install && yarn && yarn build + + - name: Tar and GZip barretenberg.wasm + run: tar -cvzf barretenberg.wasm.tar.gz cpp/build-wasm/bin/barretenberg.wasm + + - name: Setup Node.js + uses: actions/setup-node@v2 + with: + node-version: '18' + registry-url: 'https://registry.npmjs.org' + + - name: Deploy Typescript to NPM + run: | + cd ts + yarn deploy + env: + NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}} + + - name: Tar and GZip bb Binary (Ubuntu) + run: tar -cvzf bb-ubuntu.tar.gz cpp/build/bin/bb + + - name: Upload artifacts + uses: actions/upload-artifact@v2 + with: name: release-linux-wasm path: | barretenberg.wasm.tar.gz bb-ubuntu.tar.gz - + build-mac: name: Build on Mac runs-on: macos-13 @@ -116,31 +97,36 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 - # We need clang 14.0.3 or higher, as lower versions do not seem - # to be spec conformant. In particular std::span does not seem - # to follow the specifications. +# We need clang 14.0.3 or higher, as lower versions do not seem +# to be spec conformant. In particular std::span does not seem +# to follow the specifications. - name: Select Xcode version run: | - sudo xcode-select -switch /Applications/Xcode_14.3.1.app + sudo xcode-select -switch /Applications/Xcode_14.3.1.app - name: Create Mac Build Environment run: | brew install cmake ninja - + - name: Compile Barretenberg + working-directory: cpp run: | - cd cpp cmake --preset default -DCMAKE_BUILD_TYPE=RelWithAssert cmake --build --preset default --target bb + + - name: Package barretenberg artifact + working-directory: cpp/build/bin + run: | + mkdir dist + cp ./bb ./dist/bb + 7z a -ttar -so -an ./dist/* | 7z a -si ./barretenberg-x86_64-apple-darwin.tar.gz - - name: Tar bb binary (Mac) - run: tar -cvzf bb-mac.tar.gz cpp/build/bin/bb - - - name: Upload artifacts - uses: actions/upload-artifact@v2 + - name: Upload artifact + uses: actions/upload-artifact@v3 with: - name: release-mac - path: bb-mac.tar.gz + name: barretenberg-x86_64-apple-darwin + path: ./cpp/build/bin/barretenberg-x86_64-apple-darwin.tar.gz + retention-days: 3 release: name: Publish @@ -151,12 +137,12 @@ jobs: uses: actions/download-artifact@v2 with: name: release-linux-wasm - + - name: Download files from Mac Runner uses: actions/download-artifact@v2 with: - name: release-mac - + name: barretenberg-x86_64-apple-darwin + - name: Publish to GitHub uses: softprops/action-gh-release@v1 with: @@ -165,4 +151,4 @@ jobs: files: | barretenberg.wasm.tar.gz bb-ubuntu.tar.gz - bb-mac.tar.gz + barretenberg-x86_64-apple-darwin.tar.gz diff --git a/circuits/cpp/barretenberg/.gitrepo b/circuits/cpp/barretenberg/.gitrepo index 6740555cbc66..c30d4d5728ea 100644 --- a/circuits/cpp/barretenberg/.gitrepo +++ b/circuits/cpp/barretenberg/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/barretenberg branch = master - commit = d4ade2a5f06a3abf3c9c2635946d7121cc2f64b4 - parent = 5cf9c203e126b7613bf80960063d86cb9ee97954 + commit = c5c2934ac39a8be401df4452428d367f6d224ec9 + parent = 3afd853074be02ebf0a8d1f6187e49505513017e method = merge cmdver = 0.4.6 diff --git a/circuits/cpp/barretenberg/.release-please-manifest.json b/circuits/cpp/barretenberg/.release-please-manifest.json index 9464665e4cee..2d487c350306 100644 --- a/circuits/cpp/barretenberg/.release-please-manifest.json +++ b/circuits/cpp/barretenberg/.release-please-manifest.json @@ -1 +1 @@ -{".":"0.3.4","ts":"0.3.4"} \ No newline at end of file +{".":"0.3.6","ts":"0.3.6"} \ No newline at end of file diff --git a/circuits/cpp/barretenberg/CHANGELOG.md b/circuits/cpp/barretenberg/CHANGELOG.md index f6549df82d02..5d79640e5857 100644 --- a/circuits/cpp/barretenberg/CHANGELOG.md +++ b/circuits/cpp/barretenberg/CHANGELOG.md @@ -1,5 +1,33 @@ # Changelog +## [0.3.6](https://github.com/AztecProtocol/barretenberg/compare/barretenberg-v0.3.5...barretenberg-v0.3.6) (2023-08-08) + + +### Features + +* Update release-please.yml ([#651](https://github.com/AztecProtocol/barretenberg/issues/651)) ([2795df6](https://github.com/AztecProtocol/barretenberg/commit/2795df6b705175a32fe2a6f18b3c572e297e277e)) + +## [0.3.5](https://github.com/AztecProtocol/barretenberg/compare/barretenberg-v0.3.4...barretenberg-v0.3.5) (2023-08-07) + + +### Features + +* Celer benchmark ([#1369](https://github.com/AztecProtocol/barretenberg/issues/1369)) ([d4ade2a](https://github.com/AztecProtocol/barretenberg/commit/d4ade2a5f06a3abf3c9c2635946d7121cc2f64b4)) +* Goblin Honk Composer/Prover/Verifier ([#1220](https://github.com/AztecProtocol/barretenberg/issues/1220)) ([970bb07](https://github.com/AztecProtocol/barretenberg/commit/970bb073763cc59552cd05dccf7f8fc63f58cef9)) +* Goblin translator prototype ([#1249](https://github.com/AztecProtocol/barretenberg/issues/1249)) ([7738d74](https://github.com/AztecProtocol/barretenberg/commit/7738d74791acc0fa8b1b1d8bb2a77783ca900123)) +* Internal keyword + lending contract and tests ([#978](https://github.com/AztecProtocol/barretenberg/issues/978)) ([e58ca4b](https://github.com/AztecProtocol/barretenberg/commit/e58ca4b332272fc57b2a5358bb5003bac79a8f5a)) +* Minimal barretenberg .circleci ([#1352](https://github.com/AztecProtocol/barretenberg/issues/1352)) ([708e2e2](https://github.com/AztecProtocol/barretenberg/commit/708e2e2786de5dce5bfc770c54734e5862a436e5)) + + +### Bug Fixes + +* Bootstrap.sh git hook for monorepo ([#1256](https://github.com/AztecProtocol/barretenberg/issues/1256)) ([b22b8d5](https://github.com/AztecProtocol/barretenberg/commit/b22b8d5f42ddfae140068c3ce8b3053d4c8d1874)) +* Build-system spot request cancellation ([#1339](https://github.com/AztecProtocol/barretenberg/issues/1339)) ([fc1d96a](https://github.com/AztecProtocol/barretenberg/commit/fc1d96a744a8d5a6cae06c408546c3638408551d)) +* Fixing external benchmarks ([#1250](https://github.com/AztecProtocol/barretenberg/issues/1250)) ([0ea6a39](https://github.com/AztecProtocol/barretenberg/commit/0ea6a39950e8cd5ff7765031457c162d03ebae06)) +* Fixing fuzzing build after composer splitting ([#1317](https://github.com/AztecProtocol/barretenberg/issues/1317)) ([946c23c](https://github.com/AztecProtocol/barretenberg/commit/946c23c52d45ddce973e453c40c048734e7f6937)) +* Reinstate barretenberg-benchmark-aggregator ([#1330](https://github.com/AztecProtocol/barretenberg/issues/1330)) ([407a915](https://github.com/AztecProtocol/barretenberg/commit/407a915a94c7d83dec9e14a11ad0e3461fd2906d)) +* Retry git submodule fetch ([#1371](https://github.com/AztecProtocol/barretenberg/issues/1371)) ([037dda3](https://github.com/AztecProtocol/barretenberg/commit/037dda3d254d56a20292d2bed5a9582d36c08427)) + ## [0.3.4](https://github.com/AztecProtocol/barretenberg/compare/barretenberg-v0.3.3...barretenberg-v0.3.4) (2023-07-25) diff --git a/circuits/cpp/barretenberg/VERSION b/circuits/cpp/barretenberg/VERSION index 8d7d53e0fe71..7ea12e0d6909 100644 --- a/circuits/cpp/barretenberg/VERSION +++ b/circuits/cpp/barretenberg/VERSION @@ -1 +1 @@ -v0.3.4 x-release-please-version +v0.3.6 x-release-please-version diff --git a/circuits/cpp/barretenberg/acir_tests/run_acir_tests.sh b/circuits/cpp/barretenberg/acir_tests/run_acir_tests.sh index f80da051b99c..2767f2ec909c 100755 --- a/circuits/cpp/barretenberg/acir_tests/run_acir_tests.sh +++ b/circuits/cpp/barretenberg/acir_tests/run_acir_tests.sh @@ -18,29 +18,45 @@ if [ ! -d acir_tests ]; then git clone -b $BRANCH --filter=blob:none --no-checkout https://github.com/noir-lang/noir.git cd noir git sparse-checkout init --cone - git sparse-checkout set crates/nargo_cli/tests/execution_success + git sparse-checkout set crates/nargo_cli/tests/test_data git checkout cd .. - mv noir/crates/nargo_cli/tests/execution_success acir_tests + mv noir/crates/nargo_cli/tests/test_data acir_tests rm -rf noir fi fi cd acir_tests +# Parse exclude and fail directories from cargo.toml +exclude_dirs=$(grep "^exclude" config.toml | sed 's/exclude = \[//;s/\]//;s/\"//g;s/ //g' | tr ',' '\n') +fail_dirs=$(grep "^fail" config.toml | sed 's/fail = \[//;s/\]//;s/\"//g;s/ //g' | tr ',' '\n') + # Convert them to array +exclude_array=($exclude_dirs) +fail_array=($fail_dirs) skip_array=(diamond_deps_0 workspace workspace_default_member) function test() { echo -n "Testing $1... " dir_name=$(basename "$1") + if [[ " ${exclude_array[@]} " =~ " $dir_name " ]]; then + echo -e "\033[33mSKIPPED\033[0m (excluded)" + return + fi + + if [[ " ${fail_array[@]} " =~ " $dir_name " ]]; then + echo -e "\033[33mSKIPPED\033[0m (would fail)" + return + fi + if [[ " ${skip_array[@]} " =~ " $dir_name " ]]; then echo -e "\033[33mSKIPPED\033[0m (hardcoded to skip)" return fi - if [[ ! -f ./$1/target/$dir_name.json || ! -f ./$1/target/witness.tr ]]; then + if [[ ! -f ./$1/target/main.json || ! -f ./$1/target/witness.tr ]]; then echo -e "\033[33mSKIPPED\033[0m (uncompiled)" return fi @@ -49,9 +65,9 @@ function test() { set +e if [ -n "$VERBOSE" ]; then - $BB prove_and_verify -v -c $CRS_PATH -j ./target/$dir_name.json + $BB prove_and_verify -v -c $CRS_PATH else - $BB prove_and_verify -c $CRS_PATH -j ./target/$dir_name.json > /dev/null 2>&1 + $BB prove_and_verify -c $CRS_PATH > /dev/null 2>&1 fi result=$? set -e diff --git a/circuits/cpp/barretenberg/barretenberg-wasm.nix b/circuits/cpp/barretenberg/barretenberg-wasm.nix index bdcb4b84be33..25b504dcbeb7 100644 --- a/circuits/cpp/barretenberg/barretenberg-wasm.nix +++ b/circuits/cpp/barretenberg/barretenberg-wasm.nix @@ -6,7 +6,7 @@ in stdenv.mkDerivation { pname = "barretenberg.wasm"; - version = "0.3.4"; # x-release-please-version + version = "0.3.6"; # x-release-please-version src = ./cpp; diff --git a/circuits/cpp/barretenberg/barretenberg.nix b/circuits/cpp/barretenberg/barretenberg.nix index dd744c37c74f..3ccaee5ddb7a 100644 --- a/circuits/cpp/barretenberg/barretenberg.nix +++ b/circuits/cpp/barretenberg/barretenberg.nix @@ -14,7 +14,7 @@ in buildEnv.mkDerivation { pname = "libbarretenberg"; - version = "0.3.4"; # x-release-please-version + version = "0.3.6"; # x-release-please-version src = ./cpp; diff --git a/circuits/cpp/barretenberg/cpp/CMakeLists.txt b/circuits/cpp/barretenberg/cpp/CMakeLists.txt index 9877fc65cac5..81f0e9e87889 100644 --- a/circuits/cpp/barretenberg/cpp/CMakeLists.txt +++ b/circuits/cpp/barretenberg/cpp/CMakeLists.txt @@ -6,7 +6,7 @@ cmake_minimum_required(VERSION 3.24) project( Barretenberg DESCRIPTION "BN254 elliptic curve library, and PLONK SNARK prover" - VERSION 0.3.4 # x-release-please-version + VERSION 0.3.6 # x-release-please-version LANGUAGES CXX C ) diff --git a/circuits/cpp/barretenberg/cpp/bin-test/Nargo.toml b/circuits/cpp/barretenberg/cpp/bin-test/Nargo.toml index 4f58c0383aa8..670888e37cd6 100644 --- a/circuits/cpp/barretenberg/cpp/bin-test/Nargo.toml +++ b/circuits/cpp/barretenberg/cpp/bin-test/Nargo.toml @@ -1,5 +1,4 @@ [package] -name = "" authors = [""] compiler_version = "0.6.0" diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/common/fuzzer.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/common/fuzzer.hpp index 3462dc53d846..9910a83d6095 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/common/fuzzer.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/common/fuzzer.hpp @@ -1,5 +1,4 @@ #pragma once -#include "barretenberg/numeric/uint256/uint256.hpp" #include "barretenberg/proof_system/circuit_builder/standard_circuit_builder.hpp" #include "barretenberg/proof_system/circuit_builder/turbo_circuit_builder.hpp" #include @@ -185,102 +184,6 @@ concept InstructionWeightsEnabled = requires { typename T::InstructionWeights; T::InstructionWeights::_LIMIT; }; - -/** - * @brief Mutate the value of a field element - * - * @tparam T PRNG class - * @param e Initial element value - * @param rng PRNG - * @param havoc_config Mutation configuration - * @return Mutated element - */ -template -inline static FF mutateFieldElement(FF e, T& rng) - requires SimpleRng -{ - // With a certain probability, we apply changes to the Montgomery form, rather than the plain form. This - // has merit, since the computation is performed in montgomery form and comparisons are often performed - // in it, too. Libfuzzer comparison tracing logic can then be enabled in Montgomery form - bool convert_to_montgomery = (rng.next() & 1); - uint256_t value_data; - // Conversion at the start -#define MONT_CONVERSION_LOCAL \ - if (convert_to_montgomery) { \ - value_data = uint256_t(e.to_montgomery_form()); \ - } else { \ - value_data = uint256_t(e); \ - } - // Inverse conversion at the end -#define INV_MONT_CONVERSION_LOCAL \ - if (convert_to_montgomery) { \ - e = FF(value_data).from_montgomery_form(); \ - } else { \ - e = FF(value_data); \ - } - - // Pick the last value from the mutation distrivution vector - // Choose mutation - const size_t choice = rng.next() % 4; - // 50% probability to use standard mutation - if (choice < 2) { - // Delegate mutation to libfuzzer (bit/byte mutations, autodictionary, etc) - MONT_CONVERSION_LOCAL - LLVMFuzzerMutate((uint8_t*)&value_data, sizeof(uint256_t), sizeof(uint256_t)); - INV_MONT_CONVERSION_LOCAL - } else if (choice < 3) { // 25% to use small additions - - // Small addition/subtraction - if (convert_to_montgomery) { - e = e.to_montgomery_form(); - } - if (rng.next() & 1) { - value_data = e + FF(rng.next() & 0xff); - } else { - value_data = e - FF(rng.next() & 0xff); - } - if (convert_to_montgomery) { - e = e.from_montgomery_form(); - } - } else { // 25% to use special values - - // Substitute field element with a special value - MONT_CONVERSION_LOCAL - switch (rng.next() % 8) { - case 0: - e = FF::zero(); - break; - case 1: - e = FF::one(); - break; - case 2: - e = -FF::one(); - break; - case 3: - e = FF::one().sqrt().second; - break; - case 4: - e = FF::one().sqrt().second.invert(); - break; - case 5: - e = FF::get_root_of_unity(8); - break; - case 6: - e = FF(2); - break; - case 7: - e = FF((FF::modulus - 1) / 2); - break; - default: - abort(); - break; - } - INV_MONT_CONVERSION_LOCAL - } - // Return instruction - return e; -} - /** * @brief A templated class containing most of the fuzzing logic for a generic Arithmetic class * diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.cpp index 4ee21ff4acf9..09f8b0c7d883 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.cpp @@ -54,16 +54,6 @@ WASM_EXPORT void pedersen__compress_with_hash_index(uint8_t const* inputs_buffer barretenberg::fr::serialize_to_buffer(r, output); } -WASM_EXPORT void pedersen_plookup_compress_with_hash_index(uint8_t const* inputs_buffer, - uint8_t* output, - uint32_t hash_index) -{ - std::vector to_compress; - read(inputs_buffer, to_compress); - auto r = crypto::pedersen_commitment::lookup::compress_native(to_compress, hash_index); - barretenberg::fr::serialize_to_buffer(r, output); -} - WASM_EXPORT void pedersen__commit(uint8_t const* inputs_buffer, uint8_t* output) { std::vector to_compress; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.hpp index 571cd7936cf6..971bd717d439 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/crypto/pedersen_commitment/c_bind.hpp @@ -16,9 +16,6 @@ WASM_EXPORT void pedersen__compress(uint8_t const* inputs_buffer, uint8_t* outpu WASM_EXPORT void pedersen_plookup_compress(uint8_t const* inputs_buffer, uint8_t* output); WASM_EXPORT void pedersen__compress_with_hash_index(uint8_t const* inputs_buffer, uint8_t* output, uint32_t hash_index); -WASM_EXPORT void pedersen_plookup_compress_with_hash_index(uint8_t const* inputs_buffer, - uint8_t* output, - uint32_t hash_index); WASM_EXPORT void pedersen__commit(uint8_t const* inputs_buffer, uint8_t* output); WASM_EXPORT void pedersen_plookup_commit(uint8_t const* inputs_buffer, uint8_t* output); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.hpp index c1d27fd54629..395e65e767df 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.hpp @@ -28,7 +28,7 @@ template class SumcheckProver { ProverTranscript& transcript; const size_t multivariate_n; const size_t multivariate_d; - SumcheckProverRound round; + SumcheckRound round; /** * @@ -164,7 +164,7 @@ template class SumcheckVerifier { VerifierTranscript& transcript; const size_t multivariate_d; - SumcheckVerifierRound round; + SumcheckRound round; // verifier instantiates sumcheck with circuit size and a verifier transcript explicit SumcheckVerifier(size_t multivariate_n, VerifierTranscript& transcript) diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_round.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_round.hpp index e3444d61835c..cee41dc00547 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_round.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_round.hpp @@ -55,16 +55,19 @@ namespace proof_system::honk::sumcheck { @todo TODO(#390): Template only on Flavor? Is it useful to have these decoupled? */ -template class SumcheckProverRound { +template class SumcheckRound { using Relations = typename Flavor::Relations; using RelationUnivariates = typename Flavor::RelationUnivariates; + using RelationEvaluations = typename Flavor::RelationValues; public: using FF = typename Flavor::FF; template using ExtendedEdges = typename Flavor::template ExtendedEdges; + using ClaimedEvaluations = typename Flavor::ClaimedEvaluations; + bool round_failed = false; size_t round_size; // a power of 2 Relations relations; @@ -72,19 +75,25 @@ template class SumcheckProverRound { static constexpr size_t MAX_RELATION_LENGTH = Flavor::MAX_RELATION_LENGTH; static constexpr size_t MAX_RANDOM_RELATION_LENGTH = Flavor::MAX_RANDOM_RELATION_LENGTH; + FF target_total_sum = 0; + RelationUnivariates univariate_accumulators; + RelationEvaluations relation_evaluations; // TODO(#224)(Cody): this should go away BarycentricData barycentric_2_to_max = BarycentricData(); // Prover constructor - SumcheckProverRound(size_t initial_round_size) + SumcheckRound(size_t initial_round_size) : round_size(initial_round_size) { // Initialize univariate accumulators to 0 zero_univariates(univariate_accumulators); } + // Verifier constructor + explicit SumcheckRound() { zero_elements(relation_evaluations); }; + /** * @brief Given a tuple t = (t_0, t_1, ..., t_{NUM_RELATIONS-1}) and a challenge α, * return t_0 + αt_1 + ... + α^{NUM_RELATIONS-1}t_{NUM_RELATIONS-1}). @@ -192,6 +201,62 @@ template class SumcheckProverRound { return batch_over_relations(alpha, pow_univariate); } + /** + * @brief Calculate the contribution of each relation to the expected value of the full Honk relation. + * + * @details For each relation, use the purported values (supplied by the prover) of the multivariates to calculate + * a contribution to the purported value of the full Honk relation. These are stored in `evaluations`. Adding these + * together, with appropriate scaling factors, produces the expected value of the full Honk relation. This value is + * checked against the final value of the target total sum, defined as sigma_d. + */ + FF compute_full_honk_relation_purported_value(ClaimedEvaluations purported_evaluations, + const RelationParameters& relation_parameters, + const PowUnivariate& pow_univariate, + const FF alpha) + { + accumulate_relation_evaluations<>( + purported_evaluations, relation_parameters, pow_univariate.partial_evaluation_constant); + + auto running_challenge = FF(1); + auto output = FF(0); + scale_and_batch_elements(relation_evaluations, alpha, running_challenge, output); + return output; + } + + /** + * @brief check if S^{l}(0) + S^{l}(1) = S^{l-1}(u_{l-1}) = sigma_{l} (or 0 if l=0) + * + * @param univariate T^{l}(X), the round univariate that is equal to S^{l}(X)/( (1−X) + X⋅ζ^{ 2^l } ) + */ + bool check_sum(Univariate& univariate) + { + // S^{l}(0) = ( (1−0) + 0⋅ζ^{ 2^l } ) ⋅ T^{l}(0) = T^{l}(0) + // S^{l}(1) = ( (1−1) + 1⋅ζ^{ 2^l } ) ⋅ T^{l}(1) = ζ^{ 2^l } ⋅ T^{l}(1) + FF total_sum = univariate.value_at(0) + univariate.value_at(1); + // target_total_sum = sigma_{l} = + bool sumcheck_round_failed = (target_total_sum != total_sum); + round_failed = round_failed || sumcheck_round_failed; + return !sumcheck_round_failed; + }; + + /** + * @brief After checking that the univariate is good for this round, compute the next target sum. + * + * @param univariate T^l(X), given by its evaluations over {0,1,2,...}, + * equal to S^{l}(X)/( (1−X) + X⋅ζ^{ 2^l } ) + * @param round_challenge u_l + * @return FF sigma_{l+1} = S^l(u_l) + */ + FF compute_next_target_sum(Univariate& univariate, FF& round_challenge) + { + // IMPROVEMENT(Cody): Use barycentric static method, maybe implement evaluation as member + // function on Univariate. + auto barycentric = BarycentricData(); + // Evaluate T^{l}(u_{l}) + target_total_sum = barycentric.evaluate(univariate, round_challenge); + return target_total_sum; + } + private: /** * @brief For a given edge, calculate the contribution of each relation to the prover round univariate (S_l in the @@ -224,6 +289,34 @@ template class SumcheckProverRound { } } + // TODO(#224)(Cody): make uniform with accumulate_relation_univariates + /** + * @brief Calculate the contribution of each relation to the expected value of the full Honk relation. + * + * @details For each relation, use the purported values (supplied by the prover) of the multivariates to calculate + * a contribution to the purported value of the full Honk relation. These are stored in `evaluations`. Adding these + * together, with appropriate scaling factors, produces the expected value of the full Honk relation. This value is + * checked against the final value of the target total sum (called sigma_0 in the thesis). + */ + template + // TODO(#224)(Cody): Input should be an array? + void accumulate_relation_evaluations(ClaimedEvaluations purported_evaluations, + const RelationParameters& relation_parameters, + const FF& partial_evaluation_constant) + { + std::get(relations).add_full_relation_value_contribution( + std::get(relation_evaluations), + purported_evaluations, + relation_parameters, + partial_evaluation_constant); + + // Repeat for the next relation. + if constexpr (relation_idx + 1 < NUM_RELATIONS) { + accumulate_relation_evaluations( + purported_evaluations, relation_parameters, partial_evaluation_constant); + } + } + public: // TODO(luke): Potentially make RelationUnivarites (tuple of tuples of Univariates) a class and make these utility // functions class methods. Alternatively, move all of these tuple utilities (and the ones living elsewhere) to @@ -328,152 +421,6 @@ template class SumcheckProverRound { } } - /** - * @brief Componentwise addition of two tuples - * @details Used for adding tuples of Univariates but in general works for any object for which += is - * defined. The result is stored in the first tuple. - * - * @tparam T Type of the elements contained in the tuples - * @param tuple_1 First summand. Result stored in this tuple - * @param tuple_2 Second summand - */ - template - static constexpr void add_tuples(std::tuple& tuple_1, const std::tuple& tuple_2) - { - auto add_tuples_helper = [&](std::index_sequence) { ((std::get(tuple_1) += std::get(tuple_2)), ...); }; - - add_tuples_helper(std::make_index_sequence{}); - } - - /** - * @brief Componentwise addition of nested tuples (tuples of tuples) - * @details Used for summing tuples of tuples of Univariates. Needed for Sumcheck multithreading. Each thread - * accumulates realtion contributions across a portion of the hypecube and then the results are accumulated into a - * single nested tuple. - * - * @tparam Tuple - * @tparam Index Index into outer tuple - * @param tuple_1 First nested tuple summand. Result stored here - * @param tuple_2 Second summand - */ - template - static constexpr void add_nested_tuples(Tuple& tuple_1, const Tuple& tuple_2) - { - if constexpr (Index < std::tuple_size::value) { - add_tuples(std::get(tuple_1), std::get(tuple_2)); - add_nested_tuples(tuple_1, tuple_2); - } - } -}; - -template class SumcheckVerifierRound { - - using Relations = typename Flavor::Relations; - using RelationEvaluations = typename Flavor::RelationValues; - - public: - using FF = typename Flavor::FF; - using ClaimedEvaluations = typename Flavor::ClaimedEvaluations; - - bool round_failed = false; - - Relations relations; - static constexpr size_t NUM_RELATIONS = Flavor::NUM_RELATIONS; - static constexpr size_t MAX_RANDOM_RELATION_LENGTH = Flavor::MAX_RANDOM_RELATION_LENGTH; - - FF target_total_sum = 0; - - RelationEvaluations relation_evaluations; - - // Verifier constructor - explicit SumcheckVerifierRound() { zero_elements(relation_evaluations); }; - - /** - * @brief Calculate the contribution of each relation to the expected value of the full Honk relation. - * - * @details For each relation, use the purported values (supplied by the prover) of the multivariates to calculate - * a contribution to the purported value of the full Honk relation. These are stored in `evaluations`. Adding these - * together, with appropriate scaling factors, produces the expected value of the full Honk relation. This value is - * checked against the final value of the target total sum, defined as sigma_d. - */ - FF compute_full_honk_relation_purported_value(ClaimedEvaluations purported_evaluations, - const RelationParameters& relation_parameters, - const PowUnivariate& pow_univariate, - const FF alpha) - { - accumulate_relation_evaluations<>( - purported_evaluations, relation_parameters, pow_univariate.partial_evaluation_constant); - - auto running_challenge = FF(1); - auto output = FF(0); - scale_and_batch_elements(relation_evaluations, alpha, running_challenge, output); - return output; - } - - /** - * @brief check if S^{l}(0) + S^{l}(1) = S^{l-1}(u_{l-1}) = sigma_{l} (or 0 if l=0) - * - * @param univariate T^{l}(X), the round univariate that is equal to S^{l}(X)/( (1−X) + X⋅ζ^{ 2^l } ) - */ - bool check_sum(Univariate& univariate) - { - // S^{l}(0) = ( (1−0) + 0⋅ζ^{ 2^l } ) ⋅ T^{l}(0) = T^{l}(0) - // S^{l}(1) = ( (1−1) + 1⋅ζ^{ 2^l } ) ⋅ T^{l}(1) = ζ^{ 2^l } ⋅ T^{l}(1) - FF total_sum = univariate.value_at(0) + univariate.value_at(1); - // target_total_sum = sigma_{l} = - bool sumcheck_round_failed = (target_total_sum != total_sum); - round_failed = round_failed || sumcheck_round_failed; - return !sumcheck_round_failed; - }; - - /** - * @brief After checking that the univariate is good for this round, compute the next target sum. - * - * @param univariate T^l(X), given by its evaluations over {0,1,2,...}, - * equal to S^{l}(X)/( (1−X) + X⋅ζ^{ 2^l } ) - * @param round_challenge u_l - * @return FF sigma_{l+1} = S^l(u_l) - */ - FF compute_next_target_sum(Univariate& univariate, FF& round_challenge) - { - // IMPROVEMENT(Cody): Use barycentric static method, maybe implement evaluation as member - // function on Univariate. - auto barycentric = BarycentricData(); - // Evaluate T^{l}(u_{l}) - target_total_sum = barycentric.evaluate(univariate, round_challenge); - return target_total_sum; - } - - private: - // TODO(#224)(Cody): make uniform with accumulate_relation_univariates - /** - * @brief Calculate the contribution of each relation to the expected value of the full Honk relation. - * - * @details For each relation, use the purported values (supplied by the prover) of the multivariates to calculate - * a contribution to the purported value of the full Honk relation. These are stored in `evaluations`. Adding these - * together, with appropriate scaling factors, produces the expected value of the full Honk relation. This value is - * checked against the final value of the target total sum (called sigma_0 in the thesis). - */ - template - // TODO(#224)(Cody): Input should be an array? - void accumulate_relation_evaluations(ClaimedEvaluations purported_evaluations, - const RelationParameters& relation_parameters, - const FF& partial_evaluation_constant) - { - std::get(relations).add_full_relation_value_contribution( - std::get(relation_evaluations), - purported_evaluations, - relation_parameters, - partial_evaluation_constant); - - // Repeat for the next relation. - if constexpr (relation_idx + 1 < NUM_RELATIONS) { - accumulate_relation_evaluations( - purported_evaluations, relation_parameters, partial_evaluation_constant); - } - } - - public: /** * Utility methods for tuple of arrays */ @@ -521,5 +468,41 @@ template class SumcheckVerifierRound { apply_to_tuple_of_arrays(operation, tuple); } } + + /** + * @brief Componentwise addition of two tuples + * @details Used for adding tuples of Univariates but in general works for any object for which += is + * defined. The result is stored in the first tuple. + * + * @tparam T Type of the elements contained in the tuples + * @param tuple_1 First summand. Result stored in this tuple + * @param tuple_2 Second summand + */ + template + static constexpr void add_tuples(std::tuple& tuple_1, const std::tuple& tuple_2) + { + [&](std::index_sequence) { ((std::get(tuple_1) += std::get(tuple_2)), ...); } + (std::make_index_sequence{}); + } + + /** + * @brief Componentwise addition of nested tuples (tuples of tuples) + * @details Used for summing tuples of tuples of Univariates. Needed for Sumcheck multithreading. Each thread + * accumulates realtion contributions across a portion of the hypecube and then the results are accumulated into a + * single nested tuple. + * + * @tparam Tuple + * @tparam Index Index into outer tuple + * @param tuple_1 First nested tuple summand. Result stored here + * @param tuple_2 Second summand + */ + template + static constexpr void add_nested_tuples(Tuple& tuple_1, const Tuple& tuple_2) + { + if constexpr (Index < std::tuple_size::value) { + add_tuples(std::get(tuple_1), std::get(tuple_2)); + add_nested_tuples(tuple_1, tuple_2); + } + } }; } // namespace proof_system::honk::sumcheck diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_round.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_round.test.cpp index fa6b176e0be8..987f87999d91 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_round.test.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_round.test.cpp @@ -38,7 +38,7 @@ static Univariate compute_round_univariate( { size_t round_size = 2; // Improvement(Cody): This is ugly? Maye supply some/all of this data through "flavor" class? - auto round = SumcheckProverRound(round_size); + auto round = SumcheckRound(round_size); ProverPolynomials full_polynomials; full_polynomials.w_l = input_polynomials[0]; @@ -142,7 +142,7 @@ static FF compute_full_purported_value(std::array& input_va purported_evaluations.lagrange_first = input_values[16]; purported_evaluations.lagrange_last = input_values[17]; - auto round = SumcheckVerifierRound(); + auto round = SumcheckRound(); PowUnivariate pow_univariate(1); FF full_purported_value = round.compute_full_honk_relation_purported_value( purported_evaluations, relation_parameters, pow_univariate, alpha); @@ -298,12 +298,12 @@ TEST(SumcheckRound, TupleOfTuplesOfUnivariates) // Use scale_univariate_accumulators to scale by challenge powers FF challenge = 5; FF running_challenge = 1; - SumcheckProverRound::scale_univariates(tuple_of_tuples, challenge, running_challenge); + SumcheckRound::scale_univariates(tuple_of_tuples, challenge, running_challenge); // Use extend_and_batch_univariates to extend to MAX_LENGTH then accumulate PowUnivariate pow_univariate(1); auto result = Univariate(); - SumcheckProverRound::extend_and_batch_univariates(tuple_of_tuples, pow_univariate, result); + SumcheckRound::extend_and_batch_univariates(tuple_of_tuples, pow_univariate, result); // Repeat the batching process manually auto result_expected = barycentric_util_1.extend(univariate_1) * 1 + @@ -314,7 +314,7 @@ TEST(SumcheckRound, TupleOfTuplesOfUnivariates) EXPECT_EQ(result, result_expected); // Reinitialize univariate accumulators to zero - SumcheckProverRound::zero_univariates(tuple_of_tuples); + SumcheckRound::zero_univariates(tuple_of_tuples); // Check that reinitialization was successful Univariate expected_1({ 0, 0, 0 }); @@ -345,7 +345,7 @@ TEST(SumcheckRound, TuplesOfEvaluationArrays) FF challenge = 5; FF running_challenge = 1; FF result = 0; - SumcheckVerifierRound::scale_and_batch_elements(tuple_of_arrays, challenge, running_challenge, result); + SumcheckRound::scale_and_batch_elements(tuple_of_arrays, challenge, running_challenge, result); // Repeat the batching process manually auto result_expected = @@ -355,7 +355,7 @@ TEST(SumcheckRound, TuplesOfEvaluationArrays) EXPECT_EQ(result, result_expected); // Reinitialize univariate accumulators to zero - SumcheckVerifierRound::zero_elements(tuple_of_arrays); + SumcheckRound::zero_elements(tuple_of_arrays); EXPECT_EQ(std::get<0>(tuple_of_arrays)[0], 0); EXPECT_EQ(std::get<1>(tuple_of_arrays)[0], 0); @@ -390,7 +390,7 @@ TEST(SumcheckRound, AddTuplesOfTuplesOfUnivariates) auto tuple_of_tuples_2 = std::make_tuple(std::make_tuple(univariate_4), std::make_tuple(univariate_5, univariate_6)); - SumcheckProverRound::add_nested_tuples(tuple_of_tuples_1, tuple_of_tuples_2); + SumcheckRound::add_nested_tuples(tuple_of_tuples_1, tuple_of_tuples_2); EXPECT_EQ(std::get<0>(std::get<0>(tuple_of_tuples_1)), expected_sum_1); EXPECT_EQ(std::get<0>(std::get<1>(tuple_of_tuples_1)), expected_sum_2); diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/transcript/transcript.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/transcript/transcript.hpp index b503bd49e8dd..1ee7a101af80 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/transcript/transcript.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/honk/transcript/transcript.hpp @@ -65,9 +65,7 @@ class TranscriptManifest { */ template class BaseTranscript { // TODO(Adrian): Make these tweakable - public: static constexpr size_t HASH_OUTPUT_SIZE = 32; - private: static constexpr size_t MIN_BYTES_PER_CHALLENGE = 128 / 8; // 128 bit challenges size_t round_number = 0; diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.cpp deleted file mode 100644 index 445e10cfd495..000000000000 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.cpp +++ /dev/null @@ -1,275 +0,0 @@ -/** - * @file goblin_translator_circuit_builder.cpp - * @author @Rumata888 - * @brief Circuit Logic generation for Goblin Plonk translator (checks equivalence of Queues/Transcripts for ECCVM and - * Recursive Circuits) - * - * @copyright Copyright (c) 2023 - * - */ -#include "goblin_translator_circuit_builder.hpp" -#include "barretenberg/ecc/curves/bn254/fr.hpp" -namespace proof_system { -template -GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( - Fr op_code, Fr p_x_lo, Fr p_x_hi, Fr p_y_lo, Fr p_y_hi, Fr z_1, Fr z_2, Fq previous_accumulator, Fq v, Fq x) -{ - constexpr size_t NUM_LIMB_BITS = GoblinTranslatorCircuitBuilder::NUM_LIMB_BITS; - constexpr size_t MICRO_LIMB_BITS = GoblinTranslatorCircuitBuilder::MICRO_LIMB_BITS; - constexpr auto shift_1 = GoblinTranslatorCircuitBuilder::SHIFT_1; - constexpr auto shift_2 = GoblinTranslatorCircuitBuilder::SHIFT_2; - // constexpr auto modulus_u512 = GoblinTranslatorCircuitBuilder::MODULUS_U512; - constexpr auto neg_modulus_limbs = GoblinTranslatorCircuitBuilder::NEGATIVE_MODULUS_LIMBS; - constexpr auto shift_2_inverse = GoblinTranslatorCircuitBuilder::SHIFT_2_INVERSE; - - /** - * @brief A small function to transform a native element Fq into its bigfield representation in Fr scalars - * - */ - auto base_element_to_bigfield = [](Fq& original) { - uint256_t original_uint = original; - return std::array({ Fr(original_uint.slice(0, NUM_LIMB_BITS)), - Fr(original_uint.slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS)), - Fr(original_uint.slice(2 * NUM_LIMB_BITS, 3 * NUM_LIMB_BITS)), - Fr(original_uint.slice(3 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS)), - Fr(original_uint) }); - }; - /** - * @brief A small function to transform a uint512_t element into its bigfield representation in Fr scalars - * - */ - auto uint512_t_to_bigfield = [&shift_2](uint512_t& original) { - return std::make_tuple(Fr(original.slice(0, NUM_LIMB_BITS).lo), - Fr(original.slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS).lo), - Fr(original.slice(2 * NUM_LIMB_BITS, 3 * NUM_LIMB_BITS).lo), - Fr(original.slice(3 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS).lo), - Fr(original.slice(0, NUM_LIMB_BITS * 2).lo) + - Fr(original.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 4).lo) * shift_2); - }; - - /** - * @brief A method for splitting wide limbs (P_x_lo, P_y_hi, etc) into two limbs - * - */ - auto split_wide_limb_into_2_limbs = [](Fr& wide_limb) { - return std::make_tuple(Fr(uint256_t(wide_limb).slice(0, NUM_LIMB_BITS)), - Fr(uint256_t(wide_limb).slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS))); - }; - auto split_standard_limb_into_micro_limbs = [](Fr& limb) { - return std::array{ - uint256_t(limb).slice(0, MICRO_LIMB_BITS), - uint256_t(limb).slice(MICRO_LIMB_BITS, 2 * MICRO_LIMB_BITS), - uint256_t(limb).slice(2 * MICRO_LIMB_BITS, 3 * MICRO_LIMB_BITS), - uint256_t(limb).slice(3 * MICRO_LIMB_BITS, 4 * MICRO_LIMB_BITS), - uint256_t(limb).slice(4 * MICRO_LIMB_BITS, 5 * MICRO_LIMB_BITS), - uint256_t(limb).slice(5 * MICRO_LIMB_BITS, 6 * MICRO_LIMB_BITS), - }; - }; - // x and powers of v are given to use in challenge form, so the verifier has to deal with this :) - Fq v_squared; - Fq v_cubed; - Fq v_quarted; - v_squared = v * v; - v_cubed = v_squared * v; - v_quarted = v_cubed * v; - - // Convert the accumulator, powers of v and x into "bigfield" form - auto previous_accumulator_witnesses = base_element_to_bigfield(previous_accumulator); - auto v_witnesses = base_element_to_bigfield(v); - auto v_squared_witnesses = base_element_to_bigfield(v_squared); - auto v_cubed_witnesses = base_element_to_bigfield(v_cubed); - auto v_quarted_witnesses = base_element_to_bigfield(v_quarted); - auto x_witnesses = base_element_to_bigfield(x); - - // To calculate the quotient, we need to evaluate the expression in integers. So we need uint512_t versions of all - // elements involved - auto uint_previous_accumulator = uint512_t(previous_accumulator); - auto uint_x = uint512_t(x); - auto uint_op = uint512_t(op_code); - auto uint_p_x = uint512_t(uint256_t(p_x_lo) + (uint256_t(p_x_hi) << (NUM_LIMB_BITS << 1))); - auto uint_p_y = uint512_t(uint256_t(p_y_lo) + (uint256_t(p_y_hi) << (NUM_LIMB_BITS << 1))); - auto uint_z_1 = uint512_t(z_1); - auto uint_z_2 = uint512_t(z_2); - auto uint_v = uint512_t(v); - auto uint_v_squared = uint512_t(v_squared); - auto uint_v_cubed = uint512_t(v_cubed); - auto uint_v_quarted = uint512_t(v_quarted); - - // Construct Fq for op, P.x, P.y, z_1, z_2 for use in witness computation - Fq base_op = Fq(uint256_t(op_code)); - Fq base_p_x = Fq(uint256_t(p_x_lo) + (uint256_t(p_x_hi) << (NUM_LIMB_BITS << 1))); - Fq base_p_y = Fq(uint256_t(p_y_lo) + (uint256_t(p_y_hi) << (NUM_LIMB_BITS << 1))); - Fq base_z_1 = Fq(uint256_t(z_1)); - Fq base_z_2 = Fq(uint256_t(z_2)); - - // Construct bigfield representations of P.x and P.y - auto [p_x_0, p_x_1] = split_wide_limb_into_2_limbs(p_x_lo); - auto [p_x_2, p_x_3] = split_wide_limb_into_2_limbs(p_x_hi); - Fr p_x_prime = p_x_lo + p_x_hi * Fr(shift_2); - std::array p_x_witnesses = { p_x_0, p_x_1, p_x_2, p_x_3, p_x_prime }; - auto [p_y_0, p_y_1] = split_wide_limb_into_2_limbs(p_y_lo); - auto [p_y_2, p_y_3] = split_wide_limb_into_2_limbs(p_y_hi); - Fr p_y_prime = p_y_lo + p_y_hi * Fr(shift_2); - std::array p_y_witnesses = { p_y_0, p_y_1, p_y_2, p_y_3, p_y_prime }; - - // Construct bigfield representations of z1 and z2 only using 2 limbs each - // z_1 and z_2 are low enough to act as their own prime limbs - auto [z_1_lo, z_1_hi] = split_wide_limb_into_2_limbs(z_1); - auto [z_2_lo, z_2_hi] = split_wide_limb_into_2_limbs(z_2); - - // The formula is `accumulator = accumulator⋅x + (op + v⋅p.x + v²⋅p.y + v³⋅z₁ + v⁴z₂)`. We need to compute the - // remainder (new accumulator value) - - Fq remainder = previous_accumulator * x + base_z_2 * v_quarted + base_z_1 * v_cubed + base_p_y * v_squared + - base_p_x * v + base_op; - uint512_t quotient_by_modulus = uint_previous_accumulator * uint_x + uint_z_2 * uint_v_quarted + - uint_z_1 * uint_v_cubed + uint_p_y * uint_v_squared + uint_p_x * uint_v + uint_op - - uint512_t(remainder); - - uint512_t quotient = quotient_by_modulus / uint512_t(Fq::modulus); - // constexpr uint512_t MAX_CONSTRAINED_SIZE = uint512_t(1) << 254; - // constexpr uint512_t MAX_Z_SIZE = uint512_t(1) << (NUM_LIMB_BITS * 2); - // numeric::uint1024_t max_quotient = - // (uint1024_t(MAX_CONSTRAINED_SIZE) * MAX_CONSTRAINED_SIZE * 3 + MAX_Z_SIZE * MAX_CONSTRAINED_SIZE * 2 + 4) / - // modulus_u512; - // info("Max quotient: ", max_quotient); - // info("Max quotient range constraint: ", max_quotient.get_msb() + 1); - - auto [remainder_0, remainder_1, remainder_2, remainder_3, remainder_prime] = base_element_to_bigfield(remainder); - std::array remainder_witnesses = { remainder_0, remainder_1, remainder_2, remainder_3, remainder_prime }; - auto [quotient_0, quotient_1, quotient_2, quotient_3, quotient_prime] = uint512_t_to_bigfield(quotient); - std::array quotient_witnesses = { quotient_0, quotient_1, quotient_2, quotient_3, quotient_prime }; - - // We will divide by shift_2 instantly in the relation itself, but first we need to compute the low part (0*0) and - // the high part (0*1, 1*0) multiplied by a signle limb shift - Fr low_wide_relation_limb_part_1 = - previous_accumulator_witnesses[0] * x_witnesses[0] + op_code + v_witnesses[0] * p_x_witnesses[0] + - v_squared_witnesses[0] * p_y_witnesses[0] + v_cubed_witnesses[0] * z_1_lo + v_quarted_witnesses[0] * z_2_lo + - quotient_witnesses[0] * neg_modulus_limbs[0] - remainder_witnesses[0]; // This covers the lowest limb - // info("LW1:", low_wide_relation_limb_part_1); - Fr low_wide_relation_limb = - low_wide_relation_limb_part_1 + - (previous_accumulator_witnesses[1] * x_witnesses[0] + previous_accumulator_witnesses[0] * x_witnesses[1] + - v_witnesses[1] * p_x_witnesses[0] + p_x_witnesses[1] * v_witnesses[0] + - v_squared_witnesses[1] * p_y_witnesses[0] + v_squared_witnesses[0] * p_y_witnesses[1] + - v_cubed_witnesses[1] * z_1_lo + z_1_hi * v_cubed_witnesses[0] + v_quarted_witnesses[1] * z_2_lo + - v_quarted_witnesses[0] * z_2_hi + quotient_witnesses[0] * neg_modulus_limbs[1] + - quotient_witnesses[1] * neg_modulus_limbs[0] - remainder_witnesses[1]) * - shift_1; // And this covers the limb shifted by 68 - // for (auto& limb : quotient_witnesses) { - // info("Q: ", limb); - // } - // Treating accumulator as 254-bit constrained value - // constexpr auto max_limb_size = (uint512_t(1) << NUM_LIMB_BITS) - 1; - // constexpr auto shift_1_u512 = uint512_t(shift_1); - // constexpr auto op_max_size = uint512_t(4); - // constexpr uint512_t low_wide_limb_maximum_value = - // op_max_size + (max_limb_size * max_limb_size) * ((shift_1_u512 * 12) + 6); - // constexpr uint512_t low_wide_limb_maximum_value_constraint = - // (low_wide_limb_maximum_value >> (2 * NUM_LIMB_BITS)).lo + - // uint256_t(uint64_t((low_wide_limb_maximum_value % uint512_t(1) << (2 * NUM_LIMB_BITS)) != 0)); - // constexpr auto low_wide_limb_range_consraint_size = low_wide_limb_maximum_value_constraint.get_msb() + 1; - // info("Low limb range constraint: ", low_wide_limb_range_consraint_size); - // Low bits have to be zero - ASSERT(uint256_t(low_wide_relation_limb).slice(0, 2 * NUM_LIMB_BITS) == 0); - - Fr low_wide_relation_limb_divided = low_wide_relation_limb * shift_2_inverse; - // We need to range constrain the low_wide_relation_limb_divided - // constexpr size_t NUM_LAST_BN254_LIMB_BITS = modulus_u512.get_msb() + 1 - NUM_LIMB_BITS * 3; - - // constexpr auto max_high_limb_size = (uint512_t(1) << NUM_LAST_BN254_LIMB_BITS) - 1; - // constexpr uint512_t high_wide_limb_maximum_value = - // low_wide_limb_maximum_value_constraint + (max_limb_size * max_limb_size) * 16 + - // (max_limb_size * max_limb_size * 10 + max_limb_size * max_high_limb_size * 10) * shift_1_u512; - // constexpr uint512_t high_wide_limb_maximum_value_constraint = - // (high_wide_limb_maximum_value >> (2 * NUM_LIMB_BITS)).lo + - // uint256_t(uint64_t((high_wide_limb_maximum_value % uint512_t(1) << (2 * NUM_LIMB_BITS)) != 0)); - // constexpr auto high_wide_limb_range_constraint_size = high_wide_limb_maximum_value_constraint.get_msb() + 1; - // info(high_wide_limb_range_constraint_size); - // 4 high combinations = 8 ml*ml + 8 ml*last_ml. 2 low combinations = 2*ml*ml + 2*ml*last_ml - Fr high_wide_relation_limb = - low_wide_relation_limb_divided + previous_accumulator_witnesses[2] * x_witnesses[0] + - previous_accumulator_witnesses[1] * x_witnesses[1] + previous_accumulator_witnesses[0] * x_witnesses[2] + - v_witnesses[2] * p_x_witnesses[0] + v_witnesses[1] * p_x_witnesses[1] + v_witnesses[0] * p_x_witnesses[2] + - v_squared_witnesses[2] * p_y_witnesses[0] + v_squared_witnesses[1] * p_y_witnesses[1] + - v_squared_witnesses[0] * p_y_witnesses[2] + v_cubed_witnesses[2] * z_1_lo + v_cubed_witnesses[1] * z_1_hi + - v_quarted_witnesses[2] * z_2_lo + v_quarted_witnesses[1] * z_2_hi + - quotient_witnesses[2] * neg_modulus_limbs[0] + quotient_witnesses[1] * neg_modulus_limbs[1] + - quotient_witnesses[0] * neg_modulus_limbs[2] - remainder_witnesses[2] + - (previous_accumulator_witnesses[3] * x_witnesses[0] + previous_accumulator_witnesses[2] * x_witnesses[1] + - previous_accumulator_witnesses[1] * x_witnesses[2] + previous_accumulator_witnesses[0] * x_witnesses[3] + - v_witnesses[3] * p_x_witnesses[0] + v_witnesses[2] * p_x_witnesses[1] + v_witnesses[1] * p_x_witnesses[2] + - v_witnesses[0] * p_x_witnesses[3] + v_squared_witnesses[3] * p_y_witnesses[0] + - v_squared_witnesses[2] * p_y_witnesses[1] + v_squared_witnesses[1] * p_y_witnesses[2] + - v_squared_witnesses[0] * p_y_witnesses[3] + v_cubed_witnesses[3] * z_1_lo + v_cubed_witnesses[2] * z_1_hi + - v_quarted_witnesses[3] * z_2_lo + v_quarted_witnesses[2] * z_2_hi + - quotient_witnesses[3] * neg_modulus_limbs[0] + quotient_witnesses[2] * neg_modulus_limbs[1] + - quotient_witnesses[1] * neg_modulus_limbs[2] + quotient_witnesses[0] * neg_modulus_limbs[3] - - remainder_witnesses[3]) * - shift_1; - // info("Value: ", high_wide_relation_limb); - // info("Value: ", high_wide_relation_limb * shift_2_inverse); - ASSERT(uint256_t(high_wide_relation_limb).slice(0, 2 * NUM_LIMB_BITS) == 0); - - GoblinTranslatorCircuitBuilder::AccumulationInput input{ - .op_code = op_code, - .P_x_lo = p_x_lo, - .P_x_hi = p_x_hi, - .P_x_limbs = p_x_witnesses, - .P_x_microlimbs = {}, - .P_y_lo = p_y_lo, - .P_y_hi = p_y_hi, - .P_y_limbs = p_y_witnesses, - .P_y_microlimbs = {}, - .z_1 = z_1, - .z_1_limbs = { z_1_lo, z_1_hi }, - .z_1_microlimbs = {}, - .z_2 = z_2, - .z_2_limbs = { z_2_lo, z_2_hi }, - .z_2_microlimbs = {}, - .previous_accumulator = previous_accumulator_witnesses, - .current_accumulator = remainder_witnesses, - .current_accumulator_microlimbs = {}, - .quotient_binary_limbs = quotient_witnesses, - .quotient_microlimbs = {}, - .relation_wide_limbs = { low_wide_relation_limb_divided, high_wide_relation_limb * shift_2_inverse }, - .x_limbs = x_witnesses, - .v_limbs = v_witnesses, - .v_squared_limbs = v_squared_witnesses, - .v_cubed_limbs = v_cubed_witnesses, - .v_quarted_limbs = v_quarted_witnesses, - - }; - for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; i++) { - input.P_x_microlimbs[i] = split_standard_limb_into_micro_limbs(input.P_x_limbs[i]); - } - for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; i++) { - input.P_y_microlimbs[i] = split_standard_limb_into_micro_limbs(input.P_y_limbs[i]); - } - - for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_Z_LIMBS; i++) { - input.z_1_microlimbs[i] = split_standard_limb_into_micro_limbs(input.z_1_limbs[i]); - input.z_2_microlimbs[i] = split_standard_limb_into_micro_limbs(input.z_2_limbs[i]); - } - for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; i++) { - input.current_accumulator_microlimbs[i] = split_standard_limb_into_micro_limbs(input.current_accumulator[i]); - // info("Stored: ", single_accumulation_step.current_accumulator_microlimbs[i][5], " at ", i); - } - for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; i++) { - input.quotient_microlimbs[i] = split_standard_limb_into_micro_limbs(input.quotient_binary_limbs[i]); - // info("Stored: ", single_accumulation_step.current_accumulator_microlimbs[i][5], " at ", i); - } - return input; -} -template GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( - barretenberg::fr op_code, - barretenberg::fr p_x_lo, - barretenberg::fr p_x_hi, - barretenberg::fr p_y_lo, - barretenberg::fr p_y_hi, - barretenberg::fr z_1, - barretenberg::fr z_2, - barretenberg::fq previous_accumulator, - barretenberg::fq v, - barretenberg::fq x); -} // namespace proof_system \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.hpp index 052a986238a1..49edf02b275e 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.hpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.hpp @@ -299,18 +299,12 @@ class GoblinTranslatorCircuitBuilder : CircuitBuilderBase(const std::array& limbs, - bool relaxed_last_limb = false) { + auto check_binary_limbs_maximum_values = [](const std::array& limbs) { if constexpr (total_limbs == (NUM_BINARY_LIMBS + 1)) { for (size_t i = 0; i < NUM_BINARY_LIMBS - 1; i++) { ASSERT(uint256_t(limbs[i]) < SHIFT_1); } - if (!relaxed_last_limb) { - ASSERT(uint256_t(limbs[NUM_BINARY_LIMBS - 1]) < (uint256_t(1) << NUM_LAST_LIMB_BITS)); - } else { - - ASSERT(uint256_t(limbs[NUM_BINARY_LIMBS - 1]) < (SHIFT_1)); - } + ASSERT(uint256_t(limbs[NUM_BINARY_LIMBS - 1]) < (uint256_t(1) << NUM_LAST_LIMB_BITS)); } else { for (size_t i = 0; i < total_limbs; i++) { ASSERT(uint256_t(limbs[i]) < SHIFT_1); @@ -338,7 +332,7 @@ class GoblinTranslatorCircuitBuilder : CircuitBuilderBase -GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( - Fr op_code, Fr p_x_lo, Fr p_x_hi, Fr p_y_lo, Fr p_y_hi, Fr z_1, Fr z_2, Fq previous_accumulator, Fq v, Fq x); -extern template GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( - barretenberg::fr op_code, - barretenberg::fr p_x_lo, - barretenberg::fr p_x_hi, - barretenberg::fr p_y_lo, - barretenberg::fr p_y_hi, - barretenberg::fr z_1, - barretenberg::fr z_2, - barretenberg::fq previous_accumulator, - barretenberg::fq v, - barretenberg::fq x); } // namespace proof_system \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.test.cpp index ca4ac215e785..b7f90a260369 100644 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.test.cpp +++ b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.test.cpp @@ -1,5 +1,5 @@ -#include "goblin_translator_circuit_builder.hpp" #include "barretenberg/ecc/curves/bn254/bn254.hpp" +#include "goblin_translator_circuit_builder.hpp" #include #include #include @@ -9,7 +9,257 @@ namespace { auto& engine = numeric::random::get_debug_engine(); } namespace proof_system { +template +GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( + Fr op_code, Fr p_x_lo, Fr p_x_hi, Fr p_y_lo, Fr p_y_hi, Fr z_1, Fr z_2, Fq previous_accumulator, Fq v, Fq x) +{ + constexpr size_t NUM_LIMB_BITS = GoblinTranslatorCircuitBuilder::NUM_LIMB_BITS; + constexpr size_t MICRO_LIMB_BITS = GoblinTranslatorCircuitBuilder::MICRO_LIMB_BITS; + constexpr auto shift_1 = GoblinTranslatorCircuitBuilder::SHIFT_1; + constexpr auto shift_2 = GoblinTranslatorCircuitBuilder::SHIFT_2; + constexpr auto modulus_u512 = GoblinTranslatorCircuitBuilder::MODULUS_U512; + constexpr auto neg_modulus_limbs = GoblinTranslatorCircuitBuilder::NEGATIVE_MODULUS_LIMBS; + constexpr auto shift_2_inverse = GoblinTranslatorCircuitBuilder::SHIFT_2_INVERSE; + + /** + * @brief A small function to transform a native element Fq into its bigfield representation in Fr scalars + * + */ + auto base_element_to_bigfield = [](Fq& original) { + uint256_t original_uint = original; + return std::array({ Fr(original_uint.slice(0, NUM_LIMB_BITS)), + Fr(original_uint.slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS)), + Fr(original_uint.slice(2 * NUM_LIMB_BITS, 3 * NUM_LIMB_BITS)), + Fr(original_uint.slice(3 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS)), + Fr(original_uint) }); + }; + /** + * @brief A small function to transform a uint512_t element into its bigfield representation in Fr scalars + * + */ + auto uint512_t_to_bigfield = [&shift_2](uint512_t& original) { + return std::make_tuple(Fr(original.slice(0, NUM_LIMB_BITS).lo), + Fr(original.slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS).lo), + Fr(original.slice(2 * NUM_LIMB_BITS, 3 * NUM_LIMB_BITS).lo), + Fr(original.slice(3 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS).lo), + Fr(original.slice(0, NUM_LIMB_BITS * 2).lo) + + Fr(original.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 4).lo) * shift_2); + }; + + /** + * @brief A method for splitting wide limbs (P_x_lo, P_y_hi, etc) into two limbs + * + */ + auto split_wide_limb_into_2_limbs = [](Fr& wide_limb) { + return std::make_tuple(Fr(uint256_t(wide_limb).slice(0, NUM_LIMB_BITS)), + Fr(uint256_t(wide_limb).slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS))); + }; + auto split_standard_limb_into_micro_limbs = [](Fr& limb) { + return std::array{ + uint256_t(limb).slice(0, MICRO_LIMB_BITS), + uint256_t(limb).slice(MICRO_LIMB_BITS, 2 * MICRO_LIMB_BITS), + uint256_t(limb).slice(2 * MICRO_LIMB_BITS, 3 * MICRO_LIMB_BITS), + uint256_t(limb).slice(3 * MICRO_LIMB_BITS, 4 * MICRO_LIMB_BITS), + uint256_t(limb).slice(4 * MICRO_LIMB_BITS, 5 * MICRO_LIMB_BITS), + uint256_t(limb).slice(5 * MICRO_LIMB_BITS, 6 * MICRO_LIMB_BITS), + }; + }; + // x and powers of v are given to use in challenge form, so the verifier has to deal with this :) + Fq v_squared; + Fq v_cubed; + Fq v_quarted; + v_squared = v * v; + v_cubed = v_squared * v; + v_quarted = v_cubed * v; + + // Convert the accumulator, powers of v and x into "bigfield" form + auto previous_accumulator_witnesses = base_element_to_bigfield(previous_accumulator); + auto v_witnesses = base_element_to_bigfield(v); + auto v_squared_witnesses = base_element_to_bigfield(v_squared); + auto v_cubed_witnesses = base_element_to_bigfield(v_cubed); + auto v_quarted_witnesses = base_element_to_bigfield(v_quarted); + auto x_witnesses = base_element_to_bigfield(x); + + // To calculate the quotient, we need to evaluate the expression in integers. So we need uint512_t versions of all + // elements involved + auto uint_previous_accumulator = uint512_t(previous_accumulator); + auto uint_x = uint512_t(x); + auto uint_op = uint512_t(op_code); + auto uint_p_x = uint512_t(uint256_t(p_x_lo) + (uint256_t(p_x_hi) << (NUM_LIMB_BITS << 1))); + auto uint_p_y = uint512_t(uint256_t(p_y_lo) + (uint256_t(p_y_hi) << (NUM_LIMB_BITS << 1))); + auto uint_z_1 = uint512_t(z_1); + auto uint_z_2 = uint512_t(z_2); + auto uint_v = uint512_t(v); + auto uint_v_squared = uint512_t(v_squared); + auto uint_v_cubed = uint512_t(v_cubed); + auto uint_v_quarted = uint512_t(v_quarted); + + // Construct Fq for op, P.x, P.y, z_1, z_2 for use in witness computation + Fq base_op = Fq(uint256_t(op_code)); + Fq base_p_x = Fq(uint256_t(p_x_lo) + (uint256_t(p_x_hi) << (NUM_LIMB_BITS << 1))); + Fq base_p_y = Fq(uint256_t(p_y_lo) + (uint256_t(p_y_hi) << (NUM_LIMB_BITS << 1))); + Fq base_z_1 = Fq(uint256_t(z_1)); + Fq base_z_2 = Fq(uint256_t(z_2)); + + // Construct bigfield representations of P.x and P.y + auto [p_x_0, p_x_1] = split_wide_limb_into_2_limbs(p_x_lo); + auto [p_x_2, p_x_3] = split_wide_limb_into_2_limbs(p_x_hi); + Fr p_x_prime = p_x_lo + p_x_hi * Fr(shift_2); + std::array p_x_witnesses = { p_x_0, p_x_1, p_x_2, p_x_3, p_x_prime }; + auto [p_y_0, p_y_1] = split_wide_limb_into_2_limbs(p_y_lo); + auto [p_y_2, p_y_3] = split_wide_limb_into_2_limbs(p_y_hi); + Fr p_y_prime = p_y_lo + p_y_hi * Fr(shift_2); + std::array p_y_witnesses = { p_y_0, p_y_1, p_y_2, p_y_3, p_y_prime }; + + // Construct bigfield representations of z1 and z2 only using 2 limbs each + // z_1 and z_2 are low enough to act as their own prime limbs + auto [z_1_lo, z_1_hi] = split_wide_limb_into_2_limbs(z_1); + auto [z_2_lo, z_2_hi] = split_wide_limb_into_2_limbs(z_2); + // The formula is `accumulator = accumulator⋅x + (op + v⋅p.x + v²⋅p.y + v³⋅z₁ + v⁴z₂)`. We need to compute the + // remainder (new accumulator value) + + Fq remainder = previous_accumulator * x + base_z_2 * v_quarted + base_z_1 * v_cubed + base_p_y * v_squared + + base_p_x * v + base_op; + uint512_t quotient_by_modulus = uint_previous_accumulator * uint_x + uint_z_2 * uint_v_quarted + + uint_z_1 * uint_v_cubed + uint_p_y * uint_v_squared + uint_p_x * uint_v + uint_op - + uint512_t(remainder); + + uint512_t quotient = quotient_by_modulus / uint512_t(Fq::modulus); + constexpr uint512_t MAX_CONSTRAINED_SIZE = uint512_t(1) << 254; + constexpr uint512_t MAX_Z_SIZE = uint512_t(1) << (NUM_LIMB_BITS * 2); + numeric::uint1024_t max_quotient = + (uint1024_t(MAX_CONSTRAINED_SIZE) * MAX_CONSTRAINED_SIZE * 3 + MAX_Z_SIZE * MAX_CONSTRAINED_SIZE * 2 + 4) / + modulus_u512; + info("Max quotient: ", max_quotient); + info("Max quotient range constraint: ", max_quotient.get_msb() + 1); + + auto [remainder_0, remainder_1, remainder_2, remainder_3, remainder_prime] = base_element_to_bigfield(remainder); + std::array remainder_witnesses = { remainder_0, remainder_1, remainder_2, remainder_3, remainder_prime }; + auto [quotient_0, quotient_1, quotient_2, quotient_3, quotient_prime] = uint512_t_to_bigfield(quotient); + std::array quotient_witnesses = { quotient_0, quotient_1, quotient_2, quotient_3, quotient_prime }; + + // We will divide by shift_2 instantly in the relation itself, but first we need to compute the low part (0*0) and + // the high part (0*1, 1*0) multiplied by a signle limb shift + Fr low_wide_relation_limb_part_1 = + previous_accumulator_witnesses[0] * x_witnesses[0] + op_code + v_witnesses[0] * p_x_witnesses[0] + + v_squared_witnesses[0] * p_y_witnesses[0] + v_cubed_witnesses[0] * z_1_lo + v_quarted_witnesses[0] * z_2_lo + + quotient_witnesses[0] * neg_modulus_limbs[0] - remainder_witnesses[0]; // This covers the lowest limb + info("LW1:", low_wide_relation_limb_part_1); + Fr low_wide_relation_limb = + low_wide_relation_limb_part_1 + + (previous_accumulator_witnesses[1] * x_witnesses[0] + previous_accumulator_witnesses[0] * x_witnesses[1] + + v_witnesses[1] * p_x_witnesses[0] + p_x_witnesses[1] * v_witnesses[0] + + v_squared_witnesses[1] * p_y_witnesses[0] + v_squared_witnesses[0] * p_y_witnesses[1] + + v_cubed_witnesses[1] * z_1_lo + z_1_hi * v_cubed_witnesses[0] + v_quarted_witnesses[1] * z_2_lo + + v_quarted_witnesses[0] * z_2_hi + quotient_witnesses[0] * neg_modulus_limbs[1] + + quotient_witnesses[1] * neg_modulus_limbs[0] - remainder_witnesses[1]) * + shift_1; // And this covers the limb shifted by 68 + for (auto& limb : quotient_witnesses) { + info("Q: ", limb); + } + // Treating accumulator as 254-bit constrained value + constexpr auto max_limb_size = (uint512_t(1) << NUM_LIMB_BITS) - 1; + constexpr auto shift_1_u512 = uint512_t(shift_1); + constexpr auto op_max_size = uint512_t(4); + constexpr uint512_t low_wide_limb_maximum_value = + op_max_size + (max_limb_size * max_limb_size) * ((shift_1_u512 * 12) + 6); + constexpr uint512_t low_wide_limb_maximum_value_constraint = + (low_wide_limb_maximum_value >> (2 * NUM_LIMB_BITS)).lo + + uint256_t(uint64_t((low_wide_limb_maximum_value % uint512_t(1) << (2 * NUM_LIMB_BITS)) != 0)); + constexpr auto low_wide_limb_range_consraint_size = low_wide_limb_maximum_value_constraint.get_msb() + 1; + info("Low limb range constraint: ", low_wide_limb_range_consraint_size); + // Low bits have to be zero + ASSERT(uint256_t(low_wide_relation_limb).slice(0, 2 * NUM_LIMB_BITS) == 0); + + Fr low_wide_relation_limb_divided = low_wide_relation_limb * shift_2_inverse; + // We need to range constrain the low_wide_relation_limb_divided + constexpr size_t NUM_LAST_BN254_LIMB_BITS = modulus_u512.get_msb() + 1 - NUM_LIMB_BITS * 3; + + constexpr auto max_high_limb_size = (uint512_t(1) << NUM_LAST_BN254_LIMB_BITS) - 1; + constexpr uint512_t high_wide_limb_maximum_value = + low_wide_limb_maximum_value_constraint + (max_limb_size * max_limb_size) * 16 + + (max_limb_size * max_limb_size * 10 + max_limb_size * max_high_limb_size * 10) * shift_1_u512; + constexpr uint512_t high_wide_limb_maximum_value_constraint = + (high_wide_limb_maximum_value >> (2 * NUM_LIMB_BITS)).lo + + uint256_t(uint64_t((high_wide_limb_maximum_value % uint512_t(1) << (2 * NUM_LIMB_BITS)) != 0)); + constexpr auto high_wide_limb_range_constraint_size = high_wide_limb_maximum_value_constraint.get_msb() + 1; + info(high_wide_limb_range_constraint_size); + // 4 high combinations = 8 ml*ml + 8 ml*last_ml. 2 low combinations = 2*ml*ml + 2*ml*last_ml + Fr high_wide_relation_limb = + low_wide_relation_limb_divided + previous_accumulator_witnesses[2] * x_witnesses[0] + + previous_accumulator_witnesses[1] * x_witnesses[1] + previous_accumulator_witnesses[0] * x_witnesses[2] + + v_witnesses[2] * p_x_witnesses[0] + v_witnesses[1] * p_x_witnesses[1] + v_witnesses[0] * p_x_witnesses[2] + + v_squared_witnesses[2] * p_y_witnesses[0] + v_squared_witnesses[1] * p_y_witnesses[1] + + v_squared_witnesses[0] * p_y_witnesses[2] + v_cubed_witnesses[2] * z_1_lo + v_cubed_witnesses[1] * z_1_hi + + v_quarted_witnesses[2] * z_2_lo + v_quarted_witnesses[1] * z_2_hi + + quotient_witnesses[2] * neg_modulus_limbs[0] + quotient_witnesses[1] * neg_modulus_limbs[1] + + quotient_witnesses[0] * neg_modulus_limbs[2] - remainder_witnesses[2] + + (previous_accumulator_witnesses[3] * x_witnesses[0] + previous_accumulator_witnesses[2] * x_witnesses[1] + + previous_accumulator_witnesses[1] * x_witnesses[2] + previous_accumulator_witnesses[0] * x_witnesses[3] + + v_witnesses[3] * p_x_witnesses[0] + v_witnesses[2] * p_x_witnesses[1] + v_witnesses[1] * p_x_witnesses[2] + + v_witnesses[0] * p_x_witnesses[3] + v_squared_witnesses[3] * p_y_witnesses[0] + + v_squared_witnesses[2] * p_y_witnesses[1] + v_squared_witnesses[1] * p_y_witnesses[2] + + v_squared_witnesses[0] * p_y_witnesses[3] + v_cubed_witnesses[3] * z_1_lo + v_cubed_witnesses[2] * z_1_hi + + v_quarted_witnesses[3] * z_2_lo + v_quarted_witnesses[2] * z_2_hi + + quotient_witnesses[3] * neg_modulus_limbs[0] + quotient_witnesses[2] * neg_modulus_limbs[1] + + quotient_witnesses[1] * neg_modulus_limbs[2] + quotient_witnesses[0] * neg_modulus_limbs[3] - + remainder_witnesses[3]) * + shift_1; + info("Value: ", high_wide_relation_limb); + info("Value: ", high_wide_relation_limb * shift_2_inverse); + ASSERT(uint256_t(high_wide_relation_limb).slice(0, 2 * NUM_LIMB_BITS) == 0); + + GoblinTranslatorCircuitBuilder::AccumulationInput input{ + .op_code = op_code, + .P_x_lo = p_x_lo, + .P_x_hi = p_x_hi, + .P_x_limbs = p_x_witnesses, + .P_x_microlimbs = {}, + .P_y_lo = p_y_lo, + .P_y_hi = p_y_hi, + .P_y_limbs = p_y_witnesses, + .P_y_microlimbs = {}, + .z_1 = z_1, + .z_1_limbs = { z_1_lo, z_1_hi }, + .z_1_microlimbs = {}, + .z_2 = z_2, + .z_2_limbs = { z_2_lo, z_2_hi }, + .z_2_microlimbs = {}, + .previous_accumulator = previous_accumulator_witnesses, + .current_accumulator = remainder_witnesses, + .current_accumulator_microlimbs = {}, + .quotient_binary_limbs = quotient_witnesses, + .quotient_microlimbs = {}, + .relation_wide_limbs = { low_wide_relation_limb_divided, high_wide_relation_limb * shift_2_inverse }, + .x_limbs = x_witnesses, + .v_limbs = v_witnesses, + .v_squared_limbs = v_squared_witnesses, + .v_cubed_limbs = v_cubed_witnesses, + .v_quarted_limbs = v_quarted_witnesses, + + }; + for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; i++) { + input.P_x_microlimbs[i] = split_standard_limb_into_micro_limbs(input.P_x_limbs[i]); + } + for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; i++) { + input.P_y_microlimbs[i] = split_standard_limb_into_micro_limbs(input.P_y_limbs[i]); + } + + for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_Z_LIMBS; i++) { + input.z_1_microlimbs[i] = split_standard_limb_into_micro_limbs(input.z_1_limbs[i]); + input.z_2_microlimbs[i] = split_standard_limb_into_micro_limbs(input.z_2_limbs[i]); + } + for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; i++) { + input.current_accumulator_microlimbs[i] = split_standard_limb_into_micro_limbs(input.current_accumulator[i]); + // info("Stored: ", single_accumulation_step.current_accumulator_microlimbs[i][5], " at ", i); + } + for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; i++) { + input.quotient_microlimbs[i] = split_standard_limb_into_micro_limbs(input.quotient_binary_limbs[i]); + // info("Stored: ", single_accumulation_step.current_accumulator_microlimbs[i][5], " at ", i); + } + return input; +} TEST(translator_circuit_builder, scoping_out_the_circuit) { // Questions: diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_mini.fuzzer.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_mini.fuzzer.cpp deleted file mode 100644 index 7aeec7e4f011..000000000000 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_mini.fuzzer.cpp +++ /dev/null @@ -1,44 +0,0 @@ -#include "barretenberg/numeric/uint256/uint256.hpp" -#include "goblin_translator_circuit_builder.hpp" -using Fr = ::curve::BN254::ScalarField; -using Fq = ::curve::BN254::BaseField; - -extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data, size_t size) -{ - constexpr size_t NUM_LIMB_BITS = proof_system::GoblinTranslatorCircuitBuilder::NUM_LIMB_BITS; - constexpr size_t WIDE_LIMB_BYTES = 2 * NUM_LIMB_BITS / 8; - constexpr size_t TOTAL_SIZE = 1 + 5 * sizeof(numeric::uint256_t) + 2 * WIDE_LIMB_BYTES; - char buffer[32] = { 0 }; - if (size < (TOTAL_SIZE)) { - return 0; - } - Fr op; - op = Fr(data[0] & 3); - - Fq p_x = Fq(*(uint256_t*)(data + 1)); - Fr p_x_lo = uint256_t(p_x).slice(0, 2 * NUM_LIMB_BITS); - Fr p_x_hi = uint256_t(p_x).slice(2 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS); - - Fq p_y = Fq(*(uint256_t*)(data + sizeof(uint256_t) + 1)); - Fr p_y_lo = uint256_t(p_y).slice(0, 2 * NUM_LIMB_BITS); - Fr p_y_hi = uint256_t(p_y).slice(2 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS); - - Fq v = Fq(*(uint256_t*)(data + 2 * sizeof(uint256_t) + 1)); - Fq x = Fq(*(uint256_t*)(data + 3 * sizeof(uint256_t) + 1)); - Fq previous_accumulator = Fq(*(uint256_t*)(data + 4 * sizeof(uint256_t) + 1)); - - memcpy(buffer, data + 1 + 5 * sizeof(uint256_t), WIDE_LIMB_BYTES); - Fr z_1 = Fr(*(uint256_t*)(buffer)); - memcpy(buffer, data + 1 + 5 * sizeof(uint256_t) + WIDE_LIMB_BYTES, WIDE_LIMB_BYTES); - Fr z_2 = Fr(*(uint256_t*)(buffer)); - - proof_system::GoblinTranslatorCircuitBuilder::AccumulationInput single_accumulation_step = - proof_system::generate_witness_values(op, p_x_lo, p_x_hi, p_y_lo, p_y_hi, z_1, z_2, previous_accumulator, v, x); - - auto circuit_builder = proof_system::GoblinTranslatorCircuitBuilder(); - circuit_builder.create_accumulation_gate(single_accumulation_step); - if (!circuit_builder.check_circuit(x, v)) { - return 1; - } - return 0; -} \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/trancript.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/trancript.hpp deleted file mode 100644 index be4707af4a56..000000000000 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/trancript.hpp +++ /dev/null @@ -1,98 +0,0 @@ -#pragma once - -#include "barretenberg/ecc/curves/bn254/fq.hpp" -#include "barretenberg/ecc/curves/bn254/fr.hpp" -#include "barretenberg/ecc/curves/bn254/g1.hpp" -#include "barretenberg/honk/sumcheck/polynomials/univariate.hpp" -#include "barretenberg/honk/transcript/transcript.hpp" - -#include "barretenberg/stdlib/primitives/bigfield/bigfield.hpp" -#include "barretenberg/stdlib/primitives/biggroup/biggroup.hpp" -#include "barretenberg/stdlib/primitives/field/field.hpp" -#include "barretenberg/stdlib/utility/utility.hpp" - -//TODO(luke): this namespace will be sensible once stdlib is moved out of the plonk namespace -namespace proof_system::plonk::stdlib::recursion::honk { -template class Transcript { - public: - using field_pt = field_t; - using FF = barretenberg::fr; - using VerifierTranscript = proof_system::honk::VerifierTranscript; - using StdlibTypes = utility::StdlibTypesUtility; - - static constexpr size_t HASH_OUTPUT_SIZE = VerifierTranscript::HASH_OUTPUT_SIZE; - - VerifierTranscript native_transcript; - Builder* builder; - - Transcript(Builder* builder, auto proof_data) - : native_transcript(proof_data) - , builder(builder){}; - - /** - * @brief Get the underlying native transcript manifest (primarily for debugging) - * - */ - auto get_manifest() const { return native_transcript.get_manifest(); }; - - /** - * @brief Compute the challenges (more than 1) indicated by labels - * - * @tparam Strings - * @param labels Names of the challenges to be computed - * @return std::array Array of challenges - */ - template std::array get_challenges(const Strings&... labels) - { - // Compute the indicated challenges from the native transcript - constexpr size_t num_challenges = sizeof...(Strings); - std::array native_challenges{}; - native_challenges = native_transcript.get_challenges(labels...); - - /* - * TODO(#1351): Do stdlib hashing here. E.g., for the current pedersen/blake setup, we could write data into a - * byte_array as it is received from prover, then compress via pedersen and apply blake3s. Not doing this now - * since it's a pain and we'll be revamping our hashing anyway. For now, simply convert the native hashes to - * stdlib types without adding any hashing constraints. - */ - std::array challenges; - for (size_t i = 0; i < num_challenges; ++i) { - challenges[i] = native_challenges[i]; - } - - return challenges; - } - - /** - * @brief Compute the single challenge indicated by the input label - * - * @param label Name of challenge - * @return field_pt Challenge - */ - field_pt get_challenge(const std::string& label) - { - // Compute the indicated challenge from the native transcript - auto native_challenge = native_transcript.get_challenge(label); - - // TODO(1351): Stdlib hashing here... - - return field_pt(native_challenge); - } - - /** - * @brief Extract a native element from the transcript and return a corresponding stdlib type - * - * @tparam T Type of the native element to be extracted - * @param label Name of the element - * @return The corresponding element of appropriate stdlib type - */ - template auto receive_from_prover(const std::string& label) - { - // Extract the native element from the native transcript - T element = native_transcript.template receive_from_prover(label); - - // Return the corresponding stdlib type - return StdlibTypes::from_witness(builder, element); - } -}; -} // namespace proof_system::plonk::stdlib::recursion::honk diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/transcript.test.cpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/transcript.test.cpp deleted file mode 100644 index 33f70306c3a9..000000000000 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/transcript.test.cpp +++ /dev/null @@ -1,159 +0,0 @@ -#include - -#include "barretenberg/ecc/curves/bn254/fr.hpp" -#include "barretenberg/ecc/curves/bn254/g1.hpp" -#include "barretenberg/honk/sumcheck/polynomials/univariate.hpp" -#include "barretenberg/honk/transcript/transcript.hpp" -#include "barretenberg/stdlib/recursion/honk/transcript/trancript.hpp" - -namespace proof_system::plonk::stdlib::recursion::honk { - -using Builder = UltraCircuitBuilder; - -using FF = barretenberg::fr; -using Commitment = barretenberg::g1::affine_element; -using Point = barretenberg::g1::element; -constexpr size_t LENGTH = 8; // arbitrary -using Univariate = proof_system::honk::sumcheck::Univariate; -using ProverTranscript = ::proof_system::honk::ProverTranscript; -using VerifierTranscript = ::proof_system::honk::VerifierTranscript; - -/** - * @brief Create some mock data and then add it to the transcript in various mock rounds - * - * @param prover_transcript - * @return auto proof_data - */ -auto generate_mock_proof_data(auto prover_transcript) -{ - uint32_t data = 25; - auto scalar = FF::random_element(); - auto commitment = Commitment::one(); - - std::array evaluations; - for (auto& eval : evaluations) { - eval = FF::random_element(); - } - auto univariate = Univariate(evaluations); - - // round 0 - prover_transcript.send_to_verifier("data", data); - prover_transcript.get_challenge("alpha"); - - // round 1 - prover_transcript.send_to_verifier("scalar", scalar); - prover_transcript.send_to_verifier("commitment", commitment); - prover_transcript.get_challenges("beta, gamma"); - - // round 2 - prover_transcript.send_to_verifier("univariate", univariate); - prover_transcript.get_challenges("gamma", "delta"); - - return prover_transcript.proof_data; -} - -/** - * @brief Perform series of verifier transcript operations - * @details Operations are designed to correspond to those performed by a prover transcript from which the verifier - * transcript was initialized. - * - * @param transcript Either a native or stdlib verifier transcript - */ -void perform_mock_verifier_transcript_operations(auto transcript) -{ - // round 0 - transcript.template receive_from_prover("data"); - transcript.get_challenge("alpha"); - - // round 1 - transcript.template receive_from_prover("scalar"); - transcript.template receive_from_prover("commitment"); - transcript.get_challenges("beta, gamma"); - - // round 2 - transcript.template receive_from_prover("univariate"); - transcript.get_challenges("gamma", "delta"); -} - -/** - * @brief Test basic transcript functionality and check circuit - * @details Implicitly ensures stdlib interface is identical to native - * @todo(luke): Underlying circuit is nearly trivial until transcript implements hashing constraints - */ -TEST(stdlib_honk_transcript, basic_transcript_operations) -{ - Builder builder; - - // Instantiate a Prover Transcript and use it to generate some mock proof data - ProverTranscript prover_transcript; - auto proof_data = generate_mock_proof_data(prover_transcript); - - // Instantiate a (native) Verifier Transcript with the proof data and perform some mock transcript operations - VerifierTranscript native_transcript(proof_data); - perform_mock_verifier_transcript_operations(native_transcript); - - // Confirm that Prover and Verifier transcripts have generated the same manifest via the operations performed - EXPECT_EQ(prover_transcript.get_manifest(), native_transcript.get_manifest()); - - // Instantiate a stdlib Transcript and perform the same operations - Transcript transcript{ &builder, proof_data }; - perform_mock_verifier_transcript_operations(transcript); - - // Confirm that the native and stdlib transcripts have generated the same manifest - EXPECT_EQ(transcript.get_manifest(), native_transcript.get_manifest()); - - // TODO(luke): This doesn't check much of anything until hashing is constrained in the stdlib transcript - EXPECT_TRUE(builder.check_circuit()); -} - -/** - * @brief Check that native and stdlib verifier transcript functions produce equivalent outputs - * - */ -TEST(stdlib_honk_transcript, return_values) -{ - Builder builder; - - // Define some mock data for a mock proof - auto scalar = FF::random_element(); - auto commitment = Commitment::one() * FF::random_element(); - - const size_t LENGTH = 10; // arbitrary - std::array evaluations; - for (auto& eval : evaluations) { - eval = FF::random_element(); - } - - // Construct a mock proof via the prover transcript - ProverTranscript prover_transcript; - prover_transcript.send_to_verifier("scalar", scalar); - prover_transcript.send_to_verifier("commitment", commitment); - prover_transcript.send_to_verifier("evaluations", evaluations); - prover_transcript.get_challenges("alpha, beta"); - auto proof_data = prover_transcript.proof_data; - - // Perform the corresponding operations with the native verifier transcript - VerifierTranscript native_transcript(proof_data); - auto native_scalar = native_transcript.template receive_from_prover("scalar"); - auto native_commitment = native_transcript.template receive_from_prover("commitment"); - auto native_evaluations = native_transcript.template receive_from_prover>("evaluations"); - auto [native_alpha, native_beta] = native_transcript.get_challenges("alpha", "beta"); - - // Perform the corresponding operations with the stdlib verifier transcript - Transcript stdlib_transcript{ &builder, proof_data }; - auto stdlib_scalar = stdlib_transcript.template receive_from_prover("scalar"); - auto stdlib_commitment = stdlib_transcript.template receive_from_prover("commitment"); - auto stdlib_evaluations = stdlib_transcript.template receive_from_prover>("evaluations"); - auto [stdlib_alpha, stdlib_beta] = stdlib_transcript.get_challenges("alpha", "beta"); - - // Confirm that return values are equivalent - EXPECT_EQ(native_scalar, stdlib_scalar.get_value()); - EXPECT_EQ(native_commitment, stdlib_commitment.get_value()); - for (size_t i = 0; i < LENGTH; ++i) { - EXPECT_EQ(native_evaluations[i], stdlib_evaluations[i].get_value()); - } - EXPECT_EQ(native_alpha, stdlib_alpha.get_value()); - EXPECT_EQ(native_beta, stdlib_beta.get_value()); -} - -} // namespace proof_system::plonk::stdlib::recursion::honk \ No newline at end of file diff --git a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/utility/utility.hpp b/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/utility/utility.hpp deleted file mode 100644 index c7bca963fc47..000000000000 --- a/circuits/cpp/barretenberg/cpp/src/barretenberg/stdlib/utility/utility.hpp +++ /dev/null @@ -1,99 +0,0 @@ -#pragma once - -#include "barretenberg/ecc/curves/bn254/fq.hpp" -#include "barretenberg/ecc/curves/bn254/fr.hpp" -#include "barretenberg/ecc/curves/bn254/g1.hpp" -#include "barretenberg/honk/sumcheck/polynomials/univariate.hpp" -#include "barretenberg/honk/transcript/transcript.hpp" - -#include "barretenberg/stdlib/primitives/bigfield/bigfield.hpp" -#include "barretenberg/stdlib/primitives/biggroup/biggroup.hpp" -#include "barretenberg/stdlib/primitives/field/field.hpp" - -namespace proof_system::plonk::stdlib::recursion::utility { - -/** - * @brief Utility class for converting native types to corresponding stdlib types - * - * @details Used to facilitate conversion of various native types (uint32_t, field, group, Univarite, etc.) to - * corresponding stdlib types. Useful for example for obtaining stdlib types in the recursive trancript from native - * types upon deserialization from the native transcript. - * - * @todo Eliminate the need for these somehow? - * @tparam Builder - */ -template class StdlibTypesUtility { - using field_ct = field_t; - using witness_ct = witness_t; - using fq_ct = bigfield; - using element_ct = element; - using FF = barretenberg::fr; - using Commitment = barretenberg::g1::affine_element; - template using Univariate = proof_system::honk::sumcheck::Univariate; - - public: - /** - * @brief Construct stdlib field from uint32_t - * - * @param element - * @return field_ct - */ - static field_ct from_witness(Builder* builder, uint32_t native_element) - { - return field_ct::from_witness(builder, native_element); - } - - /** - * @brief Construct stdlib field from native field type - * - * @param native_element - * @return field_ct - */ - static field_ct from_witness(Builder* builder, FF native_element) - { - return field_ct::from_witness(builder, native_element); - } - - /** - * @brief Construct stdlib group from native affine group element type - * - * @param native_element - * @return field_ct - */ - static element_ct from_witness(Builder* builder, Commitment native_element) - { - return element_ct::from_witness(builder, native_element); - } - - /** - * @brief Construct field_t array from native field array - * @param native_element Array of FF - * @return std::array - */ - template - static std::array from_witness(Builder* builder, std::array native_element) - { - std::array element; - for (size_t i = 0; i < LENGTH; ++i) { - element[i] = field_ct::from_witness(builder, native_element[i]); - } - return element; - } - - /** - * @brief Construct field_t array from native Univariate type - * TODO(luke): do we need a stdlib Univariate or is std::array good enough? - * @param native_element - * @return std::array - */ - template - static std::array from_witness(Builder* builder, Univariate native_element) - { - std::array element; - for (size_t i = 0; i < LENGTH; ++i) { - element[i] = field_ct::from_witness(builder, native_element.value_at(i)); - } - return element; - } -}; -} // namespace proof_system::plonk::stdlib::recursion::utility \ No newline at end of file diff --git a/circuits/cpp/barretenberg/ts/CHANGELOG.md b/circuits/cpp/barretenberg/ts/CHANGELOG.md index 05b4a8aa7b01..7e0295a46c6e 100644 --- a/circuits/cpp/barretenberg/ts/CHANGELOG.md +++ b/circuits/cpp/barretenberg/ts/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## [0.3.6](https://github.com/AztecProtocol/barretenberg/compare/barretenberg.js-v0.3.5...barretenberg.js-v0.3.6) (2023-08-08) + + +### Miscellaneous Chores + +* **barretenberg.js:** Synchronize barretenberg versions + +## [0.3.5](https://github.com/AztecProtocol/barretenberg/compare/barretenberg.js-v0.3.4...barretenberg.js-v0.3.5) (2023-08-07) + + +### Miscellaneous Chores + +* **barretenberg.js:** Synchronize barretenberg versions + ## [0.3.4](https://github.com/AztecProtocol/barretenberg/compare/barretenberg.js-v0.3.3...barretenberg.js-v0.3.4) (2023-07-25) diff --git a/circuits/cpp/barretenberg/ts/package.json b/circuits/cpp/barretenberg/ts/package.json index 4444070e8c1a..b5a7f3cfe119 100644 --- a/circuits/cpp/barretenberg/ts/package.json +++ b/circuits/cpp/barretenberg/ts/package.json @@ -1,6 +1,6 @@ { "name": "@aztec/bb.js", - "version": "0.3.4", + "version": "0.3.6", "homepage": "https://github.com/AztecProtocol/aztec-packages/tree/master/circuits/cpp/barretenberg/ts", "license": "MIT", "type": "module",