diff --git a/beacon-light-client/solidity/package.json b/beacon-light-client/solidity/package.json index 0cbf842f0..c35e9686b 100644 --- a/beacon-light-client/solidity/package.json +++ b/beacon-light-client/solidity/package.json @@ -4,6 +4,7 @@ "description": "", "main": "index.js", "scripts": { + "ts": "yarn node --import tsx", "build:tsc": "tsc -p ./tsconfig.json", "build:sol": "hardhat compile", "test": "yarn hardhat test", @@ -44,6 +45,7 @@ "bullmq": "^5.4.0", "dotenv": "^16.4.4", "env-paths": "^3.0.0", + "tsx": "^4.7.1", "typescript": "5.2.2" }, "exports": { diff --git a/beacon-light-client/solidity/test/utils/bls.ts b/beacon-light-client/solidity/test/utils/bls.ts index 28081c8f7..3025c97f3 100644 --- a/beacon-light-client/solidity/test/utils/bls.ts +++ b/beacon-light-client/solidity/test/utils/bls.ts @@ -24,46 +24,46 @@ import * as nodeCrypto from 'crypto'; // Fp₂(v) / (v³ - ξ) where ξ = u + 1 // Fp₆(w) / (w² - γ) where γ = v export const CURVE = { - // G1 is the order-q subgroup of E1(Fp) : y² = x³ + 4, #E1(Fp) = h1q, where - // characteristic; z + (z⁴ - z² + 1)(z - 1)²/3 - P: 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaabn, - // order; z⁴ − z² + 1 - r: 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001n, - // cofactor; (z - 1)²/3 - h: 0x396c8c005555e1568c00aaab0000aaabn, - // generator's coordinates - // x = 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 - // y = 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569 - Gx: 0x17f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bbn, - Gy: 0x08b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e1n, - b: 4n, + // G1 is the order-q subgroup of E1(Fp) : y² = x³ + 4, #E1(Fp) = h1q, where + // characteristic; z + (z⁴ - z² + 1)(z - 1)²/3 + P: 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaabn, + // order; z⁴ − z² + 1 + r: 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001n, + // cofactor; (z - 1)²/3 + h: 0x396c8c005555e1568c00aaab0000aaabn, + // generator's coordinates + // x = 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 + // y = 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569 + Gx: 0x17f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bbn, + Gy: 0x08b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e1n, + b: 4n, - // G2 is the order-q subgroup of E2(Fp²) : y² = x³+4(1+√−1), - // where Fp2 is Fp[√−1]/(x2+1). #E2(Fp2 ) = h2q, where - // G² - 1 - // h2q - P2: - 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaabn * - 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaabn - - 1n, - // cofactor - h2: 0x5d543a95414e7f1091d50792876a202cd91de4547085abaa68a205b2e5a7ddfa628f1cb4d9e82ef21537e293a6691ae1616ec6e786f0c70cf1c38e31c7238e5n, - G2x: [ - 0x024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb8n, - 0x13e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7en, - ], - // y = - // 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582, - // 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905 - G2y: [ - 0x0ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801n, - 0x0606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79ben, - ], - b2: [4n, 4n], - // The BLS parameter x for BLS12-381 - x: 0xd201000000010000n, - h2Eff: - 0xbc69f08f2ee75b3584c6a0ea91b352888e2a8e9145ad7689986ff031508ffe1329c2f178731db956d82bf015d1212b02ec0ec69d7477c1ae954cbc06689f6a359894c0adebbf6b4e8020005aaa95551n, + // G2 is the order-q subgroup of E2(Fp²) : y² = x³+4(1+√−1), + // where Fp2 is Fp[√−1]/(x2+1). #E2(Fp2 ) = h2q, where + // G² - 1 + // h2q + P2: + 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaabn * + 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaabn - + 1n, + // cofactor + h2: 0x5d543a95414e7f1091d50792876a202cd91de4547085abaa68a205b2e5a7ddfa628f1cb4d9e82ef21537e293a6691ae1616ec6e786f0c70cf1c38e31c7238e5n, + G2x: [ + 0x024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb8n, + 0x13e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7en, + ], + // y = + // 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582, + // 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905 + G2y: [ + 0x0ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801n, + 0x0606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79ben, + ], + b2: [4n, 4n], + // The BLS parameter x for BLS12-381 + x: 0xd201000000010000n, + h2Eff: + 0xbc69f08f2ee75b3584c6a0ea91b352888e2a8e9145ad7689986ff031508ffe1329c2f178731db956d82bf015d1212b02ec0ec69d7477c1ae954cbc06689f6a359894c0adebbf6b4e8020005aaa95551n, }; export function mod(a: bigint, b: bigint) { @@ -83,7 +83,7 @@ const SHA256_DIGEST_SIZE = 32; // p = 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab // m = 2 (or 1 for G1 see section 8.8.1) // k = 128 -const htfDefaults = { +export const htfDefaults = { // DST: a domain separation tag // defined in section 2.2.5 DST: 'BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_', // to comply with https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#bls-signatures @@ -184,7 +184,7 @@ function concatBytes(...arrays: Uint8Array[]): Uint8Array { } // UTF8 to ui8a -function stringToBytes(str: string) { +export function stringToBytes(str: string) { const bytes = new Uint8Array(str.length); for (let i = 0; i < str.length; i++) { bytes[i] = str.charCodeAt(i); @@ -225,7 +225,7 @@ function strxor(a: Uint8Array, b: Uint8Array): Uint8Array { // Produces a uniformly random byte string using a cryptographic hash function H that outputs b bits // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-11#section-5.4.1 -async function expand_message_xmd( +export async function expand_message_xmd( msg: Uint8Array, DST: Uint8Array, lenInBytes: number, @@ -259,7 +259,7 @@ async function expand_message_xmd( // count - the number of elements of F to output. // Outputs: // [u_0, ..., u_(count - 1)], a list of field elements. -async function hash_to_field( +export async function hash_to_field( msg: Uint8Array, count: number, options = {}, diff --git a/beacon-light-client/solidity/test/utils/test.ts b/beacon-light-client/solidity/test/utils/test.ts new file mode 100644 index 000000000..b45a06aa2 --- /dev/null +++ b/beacon-light-client/solidity/test/utils/test.ts @@ -0,0 +1,203 @@ +import { expand_message_xmd, stringToBytes, htfDefaults, hash_to_field } from "./bls"; +import { Fp2, isogenyMapG2, map_to_curve_simple_swu_9mod16 } from "../../../../vendor/circom-pairing/test/math" +import { PointG2 } from "../../../../vendor/circom-pairing/test/index" +import { Field } from '@noble/bls12-381'; +import { formatHex } from '@dendreth/utils/ts-utils/bls'; + +function bigintToBytes(value: bigint): Uint8Array { + // Determine the required number of bytes to represent the bigint + const byteLength = Math.ceil(value.toString(16).length / 2); + + // Initialize a Uint8Array to hold the bytes + const byteArray = new Uint8Array(byteLength); + + // Convert the bigint to bytes + for (let i = 0; i < byteLength; i++) { + // Get the least significant byte and store it in the array + byteArray[byteLength - i - 1] = Number(value & BigInt(0xFF)); + // Shift the value to the right by 8 bits (1 byte) + value >>= BigInt(8); + } + + return byteArray; +} + +function bigintTo12Limbs(value: bigint): bigint[] { + const numLimbs = 12; // Number of limbs + const limbSize = 64; // Each limb size in bits + + // Create an array to hold the limbs + const limbs = new Array(numLimbs); + + // Loop through each limb and extract 64 bits at a time + for (let i = 0; i < numLimbs; i++) { + // Use a mask to extract the least significant 64 bits + const mask = (BigInt(1) << BigInt(limbSize)) - BigInt(1); + limbs[i] = value & mask; + // Shift the value to the right by 64 bits for the next limb + value >>= BigInt(limbSize); + } + + return limbs; +} + +function uint8ArrayToHexString(arr: Uint8Array): string { + return Array.from(arr) + .map(byte => byte.toString(16).padStart(2, '0')) + .join(''); +} + +type Fp2_4 = [Fp2, Fp2, Fp2, Fp2]; + +const xnum = [ + [ + 0x171d6541fa38ccfaed6dea691f5fb614cb14b4e7f4e810aa22d6108f142b85757098e38d0f671c7188e2aaaaaaaa5ed1n, + 0x0n, + ], + [ + 0x11560bf17baa99bc32126fced787c88f984f87adf7ae0c7f9a208c6b4f20a4181472aaa9cb8d555526a9ffffffffc71en, + 0x8ab05f8bdd54cde190937e76bc3e447cc27c3d6fbd7063fcd104635a790520c0a395554e5c6aaaa9354ffffffffe38dn, + ], + [ + 0x0n, + 0x11560bf17baa99bc32126fced787c88f984f87adf7ae0c7f9a208c6b4f20a4181472aaa9cb8d555526a9ffffffffc71an, + ], + [ + 0x5c759507e8e333ebb5b7a9a47d7ed8532c52d39fd3a042a88b58423c50ae15d5c2638e343d9c71c6238aaaaaaaa97d6n, + 0x5c759507e8e333ebb5b7a9a47d7ed8532c52d39fd3a042a88b58423c50ae15d5c2638e343d9c71c6238aaaaaaaa97d6n, + ], +].map((pair) => Fp2.fromBigTuple(pair)) as Fp2_4; +const xden = [ + [0x0n, 0x0n], + [0x1n, 0x0n], + [ + 0xcn, + 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaa9fn, + ], + [ + 0x0n, + 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaa63n, + ], +].map((pair) => Fp2.fromBigTuple(pair)) as Fp2_4; +const ynum = [ + [ + 0x124c9ad43b6cf79bfbf7043de3811ad0761b0f37a1e26286b0e977c69aa274524e79097a56dc4bd9e1b371c71c718b10n, + 0x0n, + ], + [ + 0x11560bf17baa99bc32126fced787c88f984f87adf7ae0c7f9a208c6b4f20a4181472aaa9cb8d555526a9ffffffffc71cn, + 0x8ab05f8bdd54cde190937e76bc3e447cc27c3d6fbd7063fcd104635a790520c0a395554e5c6aaaa9354ffffffffe38fn, + ], + [ + 0x0n, + 0x5c759507e8e333ebb5b7a9a47d7ed8532c52d39fd3a042a88b58423c50ae15d5c2638e343d9c71c6238aaaaaaaa97ben, + ], + [ + 0x1530477c7ab4113b59a4c18b076d11930f7da5d4a07f649bf54439d87d27e500fc8c25ebf8c92f6812cfc71c71c6d706n, + 0x1530477c7ab4113b59a4c18b076d11930f7da5d4a07f649bf54439d87d27e500fc8c25ebf8c92f6812cfc71c71c6d706n, + ], +].map((pair) => Fp2.fromBigTuple(pair)) as Fp2_4; +const yden = [ + [0x1n, 0x0n], + [ + 0x12n, + 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaa99n, + ], + [ + 0x0n, + 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffa9d3n, + ], + [ + 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffa8fbn, + 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffa8fbn, + ], +].map((pair) => Fp2.fromBigTuple(pair)) as Fp2_4; +const ISOGENY_COEFFICIENTS_G2: [Fp2_4, Fp2_4, Fp2_4, Fp2_4] = [xnum, xden, ynum, yden]; + + +function hexToBytes(hex: string): Uint8Array { + if (typeof hex !== "string") { + throw new TypeError("hexToBytes: expected string, got " + typeof hex); + } + if (hex.length % 2) + throw new Error("hexToBytes: received invalid unpadded hex"); + const array = new Uint8Array(hex.length / 2); + for (let i = 0; i < array.length; i++) { + const j = i * 2; + const hexByte = hex.slice(j, j + 2); + if (hexByte.length !== 2) throw new Error("Invalid byte sequence"); + const byte = Number.parseInt(hexByte, 16); + if (Number.isNaN(byte) || byte < 0) + throw new Error("Invalid byte sequence"); + array[i] = byte; + } + return array; +} + + +// 3-isogeny map from E' to E +// https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-11#appendix-E.3 +function nobleIsogenyMap>(COEFF: [T[], T[], T[], T[]], x: T, y: T): [T, T] { + const [xNum, xDen, yNum, yDen] = COEFF.map((val) => + val.reduce((acc, i) => acc.multiply(x).add(i)) + ); + x = xNum.div(xDen); // xNum / xDen + y = y.multiply(yNum.div(yDen)); // y * (yNum / yDev) + return [x, y]; +} + +function ensureBytes(hex: string | Uint8Array): Uint8Array { + // Uint8Array.from() instead of hash.slice() because node.js Buffer + // is instance of Uint8Array, and its slice() creates **mutable** copy + return hex instanceof Uint8Array ? Uint8Array.from(hex) : hexToBytes(hex); +} +type Hex = Uint8Array | string; + +// Encodes byte string to elliptic curve +// https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-11#section-3 +// async function testHashToCurve(msg: Hex) { +// msg = ensureBytes(msg); +// const u = await hash_to_field(msg, 2); +// // console.log(`hash_to_curve(msg}) u0=${new Fp2(u[0])} u1=${new Fp2(u[1])}`); +// console.log("map_to_curve_simple_swu_9mod16", map_to_curve_simple_swu_9mod16(u[0])); +// const Q0 = new PointG2( +// ...isogenyMapG2(map_to_curve_simple_swu_9mod16(u[0])) +// ); +// const Q1 = new PointG2( +// ...isogenyMapG2(map_to_curve_simple_swu_9mod16(u[1])) +// ); +// // const R = Q0.add(Q1); + +// return u; +// } + +(async () => { + let msg = new Uint8Array([103, 140, 163, 210, 238, 252, 75, 8, 227, 27, 60, 229, 125, 150, 241, 222, 217, 156, 178, 17, 14, 199, 15, 172, 94, 179, 249, 0, 197, 206, 104, 200, 165, 253, 55, 147, 171, 191, 118, 189, 133, 138, 2, 22, 237, 6, 62, 10, 68, 105, 208, 102, 66, 70, 170, 114, 194, 80, 215, 5, 63, 95, 202, 1, 99, 153, 67, 115, 7, 122, 235, 255, 142, 44, 3, 65, 190, 166, 218, 72, 230, 196, 24, 88, 146, 193, 211, 90, 37, 173, 71, 152, 21, 226, 89, 79, 239, 81, 149, 135, 188, 51, 52, 116, 26, 30, 126, 31, 35, 240, 201, 101, 33, 61, 220, 192, 86, 47, 214, 243, 224, 136, 50, 56, 42, 233, 148, 244, 203, 198, 195, 120, 36, 221, 181, 53, 160, 58, 167, 131, 216, 183, 83, 232, 151, 87, 46, 54, 128, 123, 231, 212, 130, 19, 28, 96, 108, 111, 137, 154, 40, 184, 74, 69, 100, 64, 177, 98, 248, 32, 12, 97, 49, 187, 39, 159, 168, 247, 29, 246, 209, 110, 77, 73, 20, 23, 174, 143, 93, 92, 162, 48, 134, 119, 213, 139, 234, 205, 91, 113, 204, 121, 57, 4, 41, 180, 144, 76, 107, 59, 176, 43, 11, 127, 34, 38, 164, 9, 141, 78, 245, 175, 145, 112, 129, 109, 18, 250, 85, 16, 124, 182, 242, 158, 84, 219, 13, 207, 186, 82, 157, 132, 225, 236, 45, 185, 228, 161, 169, 106, 25, 155, 251, 254, 223]); + + const DST = stringToBytes(htfDefaults.DST); + + let hash_to_field_result = await hash_to_field(msg, 2); + // let map_to_curve: PointG2 = map_to_curve_simple_swu_9mod16(hash_to_field_result[0]); + // let iso_map_r = nobleIsogenyMap(ISOGENY_COEFFICIENTS_G2, map_to_curve[0], map_to_curve[1]); + // let clear_cof_g2_r = clearCofactor(iso_map_r); + + let hash_to_curve_test_res: PointG2 = await PointG2.hashToCurve( + formatHex(uint8ArrayToHexString(msg)), + ); + + // let without_cofactor_hash2curve = await testHashToCurve(msg); + + // console.log('hash_to_field_result is: ', hash_to_field_result); + console.log('####################################################'); + console.log('hash_to_curve_test_res', hash_to_curve_test_res.toAffine()); + console.log('####################################################'); + // console.log('without_cofactor_hash2curve is: ', without_cofactor_hash2curve); + + + // let a = 164432780807461518526223636504241229349588394649409730072519387299403412015098917482545551400313990282635303577913n; + + // for (let i = 1n; i <= 12n; i++) { + // console.log((a % (2n ** 32n))); + // a = a / (2n ** 32n); + // } +})(); \ No newline at end of file diff --git a/casper-finality-proofs/Cargo.lock b/casper-finality-proofs/Cargo.lock index 4347570ad..49ca8027b 100644 --- a/casper-finality-proofs/Cargo.lock +++ b/casper-finality-proofs/Cargo.lock @@ -114,9 +114,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ "getrandom", "once_cell", @@ -125,9 +125,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "const-random", @@ -166,6 +166,12 @@ dependencies = [ "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "ansi_term" version = "0.12.1" @@ -253,6 +259,124 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +[[package]] +name = "ark-bls12-381" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c775f0d12169cba7aae4caeb547bb6a50781c7449a8aa53793827c9ec4abf488" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", +] + +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.13.2", + "itertools 0.10.5", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm", + "ark-ff-macros", + "ark-serialize", + "ark-std", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint 0.4.4", + "num-traits", + "paste", + "rustc_version", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint 0.4.4", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-poly" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.13.2", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-serialize-derive", + "ark-std", + "digest 0.10.7", + "num-bigint 0.4.4", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand", +] + [[package]] name = "array-macro" version = "2.1.5" @@ -373,7 +497,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -384,7 +508,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -883,16 +1007,22 @@ name = "casper-finality-proofs" version = "0.1.0" dependencies = [ "anyhow", + "ark-bls12-381", + "ark-ec", + "ark-std", "cached_tree_hash", "clap 4.4.7", "colored", + "criterion", "crossbeam", - "curta 0.1.0 (git+https://github.com/succinctlabs/curta.git?branch=main)", "ef_tests", "ethers", "hex", "itertools 0.10.5", + "jemallocator", "merkle_proof", + "num-bigint 0.4.4", + "num-iter", "once_cell", "plonky2", "plonky2x", @@ -902,12 +1032,19 @@ dependencies = [ "serde_json", "serde_yaml 0.9.27", "snap", + "starky", "state_processing", "strum 0.25.0", "types", "walkdir", ] +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cc" version = "1.0.83" @@ -962,6 +1099,33 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "cipher" version = "0.3.0" @@ -1027,7 +1191,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -1242,6 +1406,42 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap 4.4.7", + "criterion-plot", + "is-terminal", + "itertools 0.10.5", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools 0.10.5", +] + [[package]] name = "crossbeam" version = "0.8.2" @@ -1397,88 +1597,23 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "curta" -version = "0.1.0" -source = "git+https://github.com/succinctlabs/curta.git?branch=main#7b10c6d041bb33ec1a3f7a85fc6f74e27cf655d4" -dependencies = [ - "anyhow", - "bincode", - "curve25519-dalek 4.1.1", - "env_logger 0.9.3", - "hex", - "itertools 0.10.5", - "log", - "num", - "plonky2", - "plonky2_maybe_rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand", - "serde", - "subtle-encoding", -] - -[[package]] -name = "curta" -version = "0.1.0" -source = "git+https://github.com/succinctlabs/curta.git#7b10c6d041bb33ec1a3f7a85fc6f74e27cf655d4" -dependencies = [ - "anyhow", - "bincode", - "curve25519-dalek 4.1.1", - "env_logger 0.9.3", - "hex", - "itertools 0.10.5", - "log", - "num", - "plonky2", - "plonky2_maybe_rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand", - "serde", - "subtle-encoding", -] - [[package]] name = "curve25519-dalek" -version = "4.0.0" -source = "git+https://github.com/succinctlabs/curve25519-dalek.git?branch=feature/edwards-point-getters#e2d1bd10d6d772af07cac5c8161cd7655016af6d" -dependencies = [ - "cfg-if", - "cpufeatures", - "curve25519-dalek-derive 0.1.0", - "fiat-crypto 0.1.20", - "platforms 3.2.0", - "rustc_version", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "4.1.1" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if", "cpufeatures", - "curve25519-dalek-derive 0.1.1", + "curve25519-dalek-derive", "digest 0.10.7", - "fiat-crypto 0.2.2", + "fiat-crypto", "platforms 3.2.0", "rustc_version", "subtle", "zeroize", ] -[[package]] -name = "curve25519-dalek-derive" -version = "0.1.0" -source = "git+https://github.com/succinctlabs/curve25519-dalek.git?branch=feature/edwards-point-getters#e2d1bd10d6d772af07cac5c8161cd7655016af6d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - [[package]] name = "curve25519-dalek-derive" version = "0.1.1" @@ -1487,20 +1622,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", -] - -[[package]] -name = "curve25519-dalek-ng" -version = "4.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c359b7249347e46fb28804470d071c921156ad62b3eef5d34e2ba867533dec8" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core", - "subtle-ng", - "zeroize", + "syn 2.0.58", ] [[package]] @@ -1548,7 +1670,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -1570,7 +1692,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -1714,7 +1836,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -1868,7 +1990,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -1925,28 +2047,13 @@ dependencies = [ "signature 2.1.0", ] -[[package]] -name = "ed25519-consensus" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c8465edc8ee7436ffea81d21a019b16676ee3db267aa8d5a8d729581ecf998b" -dependencies = [ - "curve25519-dalek-ng", - "hex", - "rand_core", - "serde", - "sha2 0.9.9", - "thiserror", - "zeroize", -] - [[package]] name = "ed25519-dalek" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" dependencies = [ - "curve25519-dalek 4.1.1", + "curve25519-dalek", "ed25519", "rand_core", "serde", @@ -2109,7 +2216,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -2590,7 +2697,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "syn 2.0.39", + "syn 2.0.58", "toml 0.7.8", "walkdir", ] @@ -2608,7 +2715,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -2660,7 +2767,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.39", + "syn 2.0.58", "tempfile", "thiserror", "tiny-keccak", @@ -2949,12 +3056,6 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" -[[package]] -name = "fiat-crypto" -version = "0.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" - [[package]] name = "fiat-crypto" version = "0.2.2" @@ -3179,7 +3280,7 @@ checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -3393,6 +3494,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + [[package]] name = "hash-db" version = "0.15.2" @@ -3414,7 +3525,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash 0.7.7", + "ahash 0.7.8", ] [[package]] @@ -3423,16 +3534,25 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash 0.7.7", + "ahash 0.7.8", ] [[package]] name = "hashbrown" -version = "0.14.2" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.6", + "ahash 0.8.11", +] + +[[package]] +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +dependencies = [ + "ahash 0.8.11", "allocator-api2", "rayon", "serde", @@ -3462,7 +3582,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.2", + "hashbrown 0.14.3", ] [[package]] @@ -3860,7 +3980,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", - "hashbrown 0.14.2", + "hashbrown 0.14.3", "serde", ] @@ -3963,6 +4083,26 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jemalloc-sys" +version = "0.5.4+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "jemallocator" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc" +dependencies = [ + "jemalloc-sys", + "libc", +] + [[package]] name = "jobserver" version = "0.1.27" @@ -4386,7 +4526,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2eeec39ad3ad0677551907dd304b2f13f17208ccebe333bef194076cd2e8921" dependencies = [ "bytes", - "curve25519-dalek 4.1.1", + "curve25519-dalek", "futures", "libp2p-core", "libp2p-identity", @@ -4477,7 +4617,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -4796,7 +4936,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efa59af2ddfad1854ae27d75009d538d0998b4b2fd47083e743ac1a10e46c60" dependencies = [ - "hashbrown 0.14.2", + "hashbrown 0.14.3", ] [[package]] @@ -5318,19 +5458,18 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-iter" -version = "0.1.43" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -5387,7 +5526,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -5431,6 +5570,12 @@ dependencies = [ "parking_lot 0.12.1", ] +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + [[package]] name = "opaque-debug" version = "0.3.0" @@ -5485,7 +5630,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -5796,7 +5941,7 @@ dependencies = [ "phf_shared 0.11.2", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -5834,7 +5979,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -5889,31 +6034,32 @@ checksum = "14e6ab3f592e6fb464fc9712d8d6e6912de6473954635fd76a589d832cffcbb0" [[package]] name = "plonky2" -version = "0.1.4" -source = "git+https://github.com/mir-protocol/plonky2.git#fa93454c5ca88a8b5075b4026f6042cd5b766eb4" +version = "0.2.1" +source = "git+https://github.com/mir-protocol/plonky2.git#53c5bc3e956379875bad7e4f371325210a6ec06b" dependencies = [ - "ahash 0.8.6", + "ahash 0.8.11", "anyhow", "getrandom", - "hashbrown 0.14.2", + "hashbrown 0.14.3", "itertools 0.11.0", "keccak-hash 0.8.0", "log", "num", "plonky2_field", - "plonky2_maybe_rayon 0.1.1 (git+https://github.com/mir-protocol/plonky2.git)", + "plonky2_maybe_rayon 0.2.0 (git+https://github.com/mir-protocol/plonky2.git)", "plonky2_util", "rand", + "rand_chacha", "serde", - "serde_json", "static_assertions", "unroll", + "web-time", ] [[package]] name = "plonky2_field" -version = "0.1.1" -source = "git+https://github.com/mir-protocol/plonky2.git#fa93454c5ca88a8b5075b4026f6042cd5b766eb4" +version = "0.2.1" +source = "git+https://github.com/mir-protocol/plonky2.git#53c5bc3e956379875bad7e4f371325210a6ec06b" dependencies = [ "anyhow", "itertools 0.11.0", @@ -5927,30 +6073,30 @@ dependencies = [ [[package]] name = "plonky2_maybe_rayon" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194db0cbdd974e92d897cd92b74adb3968dc1b967315eb280357c49a7637994e" +checksum = "92ff44a90aaca13e10e7ddf8fab815ba1b404c3f7c3ca82aaf11c46beabaa923" dependencies = [ "rayon", ] [[package]] name = "plonky2_maybe_rayon" -version = "0.1.1" -source = "git+https://github.com/mir-protocol/plonky2.git#fa93454c5ca88a8b5075b4026f6042cd5b766eb4" +version = "0.2.0" +source = "git+https://github.com/mir-protocol/plonky2.git#53c5bc3e956379875bad7e4f371325210a6ec06b" dependencies = [ "rayon", ] [[package]] name = "plonky2_util" -version = "0.1.1" -source = "git+https://github.com/mir-protocol/plonky2.git#fa93454c5ca88a8b5075b4026f6042cd5b766eb4" +version = "0.2.0" +source = "git+https://github.com/mir-protocol/plonky2.git#53c5bc3e956379875bad7e4f371325210a6ec06b" [[package]] name = "plonky2x" version = "0.1.0" -source = "git+https://github.com/succinctlabs/succinctx.git?branch=main#598e609dcfc7985afcb706c8775346ec9de081cb" +source = "git+https://github.com/metacraft-labs/succinctx.git?branch=bls_verification_helper#873071056f240af033310c0f0fa7f148502dfd0f" dependencies = [ "anyhow", "array-macro", @@ -5959,11 +6105,10 @@ dependencies = [ "base64 0.13.1", "bincode", "clap 4.4.7", - "curta 0.1.0 (git+https://github.com/succinctlabs/curta.git)", - "curve25519-dalek 4.0.0", + "curve25519-dalek", "digest 0.10.7", "dotenv", - "ed25519-consensus", + "ed25519-dalek", "env_logger 0.10.0", "ethers", "ff 0.13.0", @@ -5984,6 +6129,7 @@ dependencies = [ "serde_with 3.4.0", "sha2 0.10.8", "sha256", + "starkyx", "tokio", "tracing", "uuid 1.5.0", @@ -5992,11 +6138,39 @@ dependencies = [ [[package]] name = "plonky2x-derive" version = "0.1.0" -source = "git+https://github.com/succinctlabs/succinctx.git?branch=main#598e609dcfc7985afcb706c8775346ec9de081cb" +source = "git+https://github.com/metacraft-labs/succinctx.git?branch=bls_verification_helper#873071056f240af033310c0f0fa7f148502dfd0f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", +] + +[[package]] +name = "plotters" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" + +[[package]] +name = "plotters-svg" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +dependencies = [ + "plotters-backend", ] [[package]] @@ -6072,7 +6246,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -6168,14 +6342,14 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" dependencies = [ "unicode-ident", ] @@ -6230,7 +6404,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -6367,9 +6541,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -7072,29 +7246,29 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.192" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.192" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ "itoa", "ryu", @@ -7128,7 +7302,7 @@ checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -7200,7 +7374,7 @@ dependencies = [ "darling 0.20.3", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -7541,7 +7715,7 @@ dependencies = [ "aes-gcm", "blake2", "chacha20poly1305", - "curve25519-dalek 4.1.1", + "curve25519-dalek", "rand_core", "ring 0.16.20", "rustc_version", @@ -7658,6 +7832,42 @@ dependencies = [ "typenum", ] +[[package]] +name = "starky" +version = "0.3.0" +source = "git+https://github.com/mir-protocol/plonky2.git#53c5bc3e956379875bad7e4f371325210a6ec06b" +dependencies = [ + "ahash 0.8.11", + "anyhow", + "hashbrown 0.14.3", + "itertools 0.11.0", + "log", + "num-bigint 0.4.4", + "plonky2", + "plonky2_maybe_rayon 0.2.0 (git+https://github.com/mir-protocol/plonky2.git)", + "plonky2_util", +] + +[[package]] +name = "starkyx" +version = "0.1.0" +source = "git+https://github.com/succinctlabs/starkyx.git#ad8eb4bae41268dcb2964dab52e66d4e551abf39" +dependencies = [ + "anyhow", + "bincode", + "curve25519-dalek", + "env_logger 0.9.3", + "hex", + "itertools 0.10.5", + "log", + "num", + "plonky2", + "plonky2_maybe_rayon 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand", + "serde", + "subtle-encoding", +] + [[package]] name = "state_processing" version = "0.2.0" @@ -7779,7 +7989,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -7797,12 +8007,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "subtle-ng" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" - [[package]] name = "superstruct" version = "0.6.0" @@ -7859,9 +8063,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.39" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", "quote", @@ -8007,7 +8211,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -8088,6 +8292,16 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.6.0" @@ -8140,7 +8354,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -8323,7 +8537,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] @@ -8885,7 +9099,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", "wasm-bindgen-shared", ] @@ -8919,7 +9133,7 @@ checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8953,6 +9167,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki-roots" version = "0.25.2" @@ -9224,7 +9448,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 4.1.1", + "curve25519-dalek", "rand_core", "serde", "zeroize", @@ -9303,29 +9527,29 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.25" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.25" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] @@ -9338,7 +9562,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.58", ] [[package]] diff --git a/casper-finality-proofs/Cargo.toml b/casper-finality-proofs/Cargo.toml index accf7eea4..58f1bdd5c 100644 --- a/casper-finality-proofs/Cargo.toml +++ b/casper-finality-proofs/Cargo.toml @@ -21,12 +21,19 @@ path = "bin/compute_shuffled_index.rs" name = "test_engine" path = "src/test_engine/bin/main.rs" +[[bin]] +name = "test_biguint" +path = "bin/test_biguint.rs" + [dependencies] -plonky2 = { git = "https://github.com/mir-protocol/plonky2.git", default-features = false } -plonky2x = { git = "https://github.com/succinctlabs/succinctx.git", branch = "main" } -curta = { git = "https://github.com/succinctlabs/curta.git", branch = "main" } +plonky2 = { git = "https://github.com/mir-protocol/plonky2.git" } +plonky2x = { git = "https://github.com/metacraft-labs/succinctx.git", branch = "bls_verification_helper" } +starky = { git = "https://github.com/mir-protocol/plonky2.git" } +ark-bls12-381 = "0.4.0" +ark-ec = "0.4.2" +ark-std = "0.4.0" serde = { version = "1.0.187", features = ["derive"] } -serde_json = "1.0.103" +serde_json = "1.0.115" serde_derive = "1.0.188" ethers = { version = "2.0" } primitive-types = "0.12.2" @@ -46,3 +53,17 @@ lighthouse_state_processing = { git = "https://github.com/sigp/lighthouse", tag lighthouse_state_merkle_proof = { git = "https://github.com/sigp/lighthouse", tag = "v4.5.0", package = "merkle_proof" } lighthouse_cached_tree_hash = { git = "https://github.com/sigp/lighthouse", tag = "v4.5.0", package = "cached_tree_hash" } snap = "1.1.0" +num-bigint = "0.4.4" +num-iter = "0.1.45" +jemallocator = "0.5.4" + +[dev-dependencies] +criterion = "0.5.1" + +[[bench]] +name = "g1_ecp_aggregation_benchmark" +harness = false + +[[bench]] +name = "g1_ecp_aggregation_starky_benchmark" +harness = false diff --git a/casper-finality-proofs/benches/g1_ecp_aggregation_benchmark.rs b/casper-finality-proofs/benches/g1_ecp_aggregation_benchmark.rs new file mode 100644 index 000000000..b6f6970cf --- /dev/null +++ b/casper-finality-proofs/benches/g1_ecp_aggregation_benchmark.rs @@ -0,0 +1,57 @@ +use casper_finality_proofs::verification::curves::g1::g1_ecc_aggregate; +use casper_finality_proofs::verification::pubkey_to_g1::pubkey_to_g1_check; +use num_bigint::BigUint; +use plonky2::field::goldilocks_field::GoldilocksField; +use plonky2::field::types::Field; +use plonky2::iop::target::Target; +use plonky2::plonk::config::GenericConfig; +use plonky2::plonk::config::PoseidonGoldilocksConfig; +use std::str::FromStr; + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use plonky2x::frontend::uint::num::biguint::CircuitBuilderBiguint; + +fn g1_ecp_aggregation_benchmark(c: &mut Criterion) { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let circuit_config = plonky2::plonk::circuit_data::CircuitConfig::standard_recursion_config(); + let mut builder = plonky2::plonk::circuit_builder::CircuitBuilder::::new(circuit_config); + + let a_bigu = BigUint::from_str( + "1216495682195235861952885506871698490232894470117269383940381148575524314493849307811227440691167647909822763414941" + ).unwrap(); + let b_bigu = BigUint::from_str( + "2153848155426317245700560287567131132765685008362732985860101000686875894603366983854567186180519945327668975076337" + ).unwrap(); + + let g1_a = black_box([ + builder.constant_biguint(&a_bigu), + builder.constant_biguint(&b_bigu), + ]); + + let g1_b = black_box([ + builder.constant_biguint(&a_bigu), + builder.constant_biguint(&b_bigu), + ]); + + let point = g1_ecc_aggregate(&mut builder, g1_a.clone(), g1_b.clone()); + + let pk: Vec = [ + 137, 43, 218, 171, 28, 7, 187, 176, 109, 242, 254, 250, 130, 131, 36, 52, 5, 250, 52, 180, + 134, 10, 178, 231, 178, 58, 55, 126, 255, 212, 103, 96, 128, 72, 218, 203, 176, 158, 145, + 7, 181, 216, 163, 154, 82, 112, 159, 221, + ] + .iter() + .map(|f| builder.constant(GoldilocksField::from_canonical_u8(*f))) + .collect(); + + let pk: [Target; 48] = pk.into_iter().collect::>().try_into().unwrap(); + + c.bench_function("aggregation of g1 points on EC", |b| { + b.iter(|| pubkey_to_g1_check(&mut builder, &[point[0].clone(), point[1].clone()], &pk)) + }); +} + +criterion_group!(benches, g1_ecp_aggregation_benchmark); +criterion_main!(benches); diff --git a/casper-finality-proofs/benches/g1_ecp_aggregation_starky_benchmark.rs b/casper-finality-proofs/benches/g1_ecp_aggregation_starky_benchmark.rs new file mode 100644 index 000000000..792619641 --- /dev/null +++ b/casper-finality-proofs/benches/g1_ecp_aggregation_starky_benchmark.rs @@ -0,0 +1,69 @@ +use ark_std::iterable::Iterable; +use casper_finality_proofs::verification::pubkey_to_g1::pubkey_to_g1_check; +use casper_finality_proofs::verification::utils::native_bls::Fp; +use casper_finality_proofs::verification::verify::verify_pubkeys_aggregation; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use num_bigint::BigUint; +use plonky2::field::goldilocks_field::GoldilocksField; +use plonky2::field::types::Field; +use plonky2::iop::target::Target; +use plonky2::plonk::config::GenericConfig; +use plonky2::plonk::config::PoseidonGoldilocksConfig; +use plonky2x::frontend::uint::num::biguint::CircuitBuilderBiguint; +use std::str::FromStr; + +fn g1_ecp_aggregation_starky_benchmark(c: &mut Criterion) { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let circuit_config = plonky2::plonk::circuit_data::CircuitConfig::standard_recursion_config(); + let mut builder = plonky2::plonk::circuit_builder::CircuitBuilder::::new(circuit_config); + + let a_bigu = BigUint::from_str( + "1216495682195235861952885506871698490232894470117269383940381148575524314493849307811227440691167647909822763414941" + ).unwrap(); + let b_bigu = BigUint::from_str( + "2153848155426317245700560287567131132765685008362732985860101000686875894603366983854567186180519945327668975076337" + ).unwrap(); + + let a_fp = black_box(Fp::get_fp_from_biguint(a_bigu.clone())); + let b_fp = black_box(Fp::get_fp_from_biguint(b_bigu.clone())); + + let ec_proof = verify_pubkeys_aggregation(vec![[a_fp, b_fp]], [a_fp, b_fp], vec![true]); + + let ec_proof_pub_inputs = ec_proof.0.public_inputs; + for i in 0..12 { + println!("g1_x_input is: {:?}", ec_proof_pub_inputs[i].0) + } + + let g1_x_input = builder.constant_biguint(&BigUint::new( + ec_proof_pub_inputs[0..12] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + let g1_y_input = builder.constant_biguint(&BigUint::new( + ec_proof_pub_inputs[12..24] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + + let pk: Vec = [ + 137, 43, 218, 171, 28, 7, 187, 176, 109, 242, 254, 250, 130, 131, 36, 52, 5, 250, 52, 180, + 134, 10, 178, 231, 178, 58, 55, 126, 255, 212, 103, 96, 128, 72, 218, 203, 176, 158, 145, + 7, 181, 216, 163, 154, 82, 112, 159, 221, + ] + .iter() + .map(|f| builder.constant(GoldilocksField::from_canonical_u8(f))) + .collect(); + + let pk: [Target; 48] = pk.into_iter().collect::>().try_into().unwrap(); + + c.bench_function("aggregation of g1 points on EC with stark", |b| { + b.iter(|| pubkey_to_g1_check(&mut builder, &[g1_x_input.clone(), g1_y_input.clone()], &pk)) + }); +} + +criterion_group!(benches, g1_ecp_aggregation_starky_benchmark); +criterion_main!(benches); diff --git a/casper-finality-proofs/bin/test_biguint.rs b/casper-finality-proofs/bin/test_biguint.rs new file mode 100644 index 000000000..154c70948 --- /dev/null +++ b/casper-finality-proofs/bin/test_biguint.rs @@ -0,0 +1,11 @@ +use std::str::FromStr; + +use num_bigint::BigUint; + +fn main() { + let a = BigUint::from_str("1015072001812290770271495995578254894147382487313523610684315265448920391983183057185266070149383515536696015791412").unwrap(); + + println!("A: {}", a); + + println!("A limbs: {:?}", a.to_u32_digits()); +} diff --git a/casper-finality-proofs/src/lib.rs b/casper-finality-proofs/src/lib.rs index 94ff90e88..30f4ad672 100644 --- a/casper-finality-proofs/src/lib.rs +++ b/casper-finality-proofs/src/lib.rs @@ -4,4 +4,5 @@ pub mod prove_finality; pub mod test_engine; pub mod types; mod utils; +pub mod verification; pub mod weigh_justification_and_finalization; diff --git a/casper-finality-proofs/src/utils/plonky2x_extensions.rs b/casper-finality-proofs/src/utils/plonky2x_extensions.rs index 9701e537f..c8e389b83 100644 --- a/casper-finality-proofs/src/utils/plonky2x_extensions.rs +++ b/casper-finality-proofs/src/utils/plonky2x_extensions.rs @@ -78,3 +78,25 @@ pub fn shift_right, const D: usize>( new_bits } + +/// Split the given integer into a list of wires, where each one represents a +/// bit of the integer, with little-endian ordering. +pub fn variable_to_le_bits, const D: usize>( + builder: &mut CircuitBuilder, + variable: Variable, + num_bits: usize, +) -> Vec { + builder + .api + .split_le(variable.0, num_bits) + .into_iter() + .map(|v| BoolVariable::from(v)) + .collect() +} + +pub fn assert_zero, const D: usize>( + builder: &mut CircuitBuilder, + variable: Variable, +) { + builder.api.assert_zero(variable.0) +} diff --git a/casper-finality-proofs/src/verification/aggregation/hash_to_curve.rs b/casper-finality-proofs/src/verification/aggregation/hash_to_curve.rs new file mode 100644 index 000000000..1201c7f0e --- /dev/null +++ b/casper-finality-proofs/src/verification/aggregation/hash_to_curve.rs @@ -0,0 +1,866 @@ +use std::str::FromStr; + +use num_bigint::{BigUint, ToBigUint}; +use plonky2::{ + field::extension::Extendable, + hash::hash_types::RichField, + iop::{ + generator::{GeneratedValues, SimpleGenerator}, + target::{BoolTarget, Target}, + witness::{PartitionWitness, WitnessWrite}, + }, + plonk::circuit_data::CommonCircuitData, + util::serialization::{Buffer, IoResult, Read, Write}, +}; +use plonky2x::{ + backend::circuit::PlonkParameters, + frontend::{ + builder::CircuitBuilder, + uint::num::{ + biguint::{ + BigUintTarget, CircuitBuilderBiguint, GeneratedValuesBigUint, WitnessBigUint, + }, + u32::gadgets::arithmetic_u32::U32Target, + }, + vars::ByteVariable, + }, +}; + +use crate::verification::{ + curves::g2::{g2_add, g2_double, g2_negate, g2_scalar_mul, PointG2Target}, + fields::{ + fp::{mul_fp, LIMBS}, + fp2::{ + add_fp2, div_fp2, frobenius_map, is_zero, mul_fp2, negate_fp2, range_check_fp2, + sgn0_fp2, Fp2Target, + }, + }, + utils::native_bls::{modulus, Fp, Fp2, Pow}, +}; + +use super::hash_to_field::hash_to_field; + +pub const ISOGENY_COEFFICIENTS_G2: [[[&str; 2]; 4]; 4] = [ + [ + [ + "3557697382419259905260257622876359250272784728834673675850718343221361467102966990615722337003569479144794908942033", + "0", + ], + [ + "2668273036814444928945193217157269437704588546626005256888038757416021100327225242961791752752677109358596181706526", + "1334136518407222464472596608578634718852294273313002628444019378708010550163612621480895876376338554679298090853261", + ], + [ + "0", + "2668273036814444928945193217157269437704588546626005256888038757416021100327225242961791752752677109358596181706522", + ], + [ + "889424345604814976315064405719089812568196182208668418962679585805340366775741747653930584250892369786198727235542", + "889424345604814976315064405719089812568196182208668418962679585805340366775741747653930584250892369786198727235542", + ], + ], + [ + [ + "0", + "0", + ], + [ + "1", + "0", + ], + [ + "12", + "4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559775", + ], + [ + "0", + "4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559715", + ], + ], + [ + [ + "2816510427748580758331037284777117739799287910327449993381818688383577828123182200904113516794492504322962636245776", + "0", + ], + [ + "2668273036814444928945193217157269437704588546626005256888038757416021100327225242961791752752677109358596181706524", + "1334136518407222464472596608578634718852294273313002628444019378708010550163612621480895876376338554679298090853263", + ], + [ + "0", + "889424345604814976315064405719089812568196182208668418962679585805340366775741747653930584250892369786198727235518", + ], + [ + "3261222600550988246488569487636662646083386001431784202863158481286248011511053074731078808919938689216061999863558", + "3261222600550988246488569487636662646083386001431784202863158481286248011511053074731078808919938689216061999863558", + ], + ], + [ + [ + "1", + "0", + ], + [ + "18", + "4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559769", + ], + [ + "0", + "4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559571", + ], + [ + "4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559355", + "4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559355", + ], + ], +]; + +pub fn map_to_curve_simple_swu_9mod16, const D: usize>( + builder: &mut CircuitBuilder, + t: &Fp2Target, +) -> PointG2Target { + let zero = builder.api.zero(); + + let iso_3_a = [ + builder.api.constant_biguint(&0.to_biguint().unwrap()), + builder.api.constant_biguint(&240.to_biguint().unwrap()), + ]; + let iso_3_b = [ + builder.api.constant_biguint(&1012.to_biguint().unwrap()), + builder.api.constant_biguint(&1012.to_biguint().unwrap()), + ]; + let iso_3_z = [ + builder.api.constant_biguint(&(modulus() - 2u32)), + builder.api.constant_biguint(&(modulus() - 1u32)), + ]; + let one = [ + builder.api.constant_biguint(&1.to_biguint().unwrap()), + builder.api.constant_biguint(&0.to_biguint().unwrap()), + ]; + + let t2 = mul_fp2(builder, &t, &t); + let iso_3_z_t2 = mul_fp2(builder, &iso_3_z, &t2); + let iso_3_z_t2_2 = mul_fp2(builder, &iso_3_z_t2, &iso_3_z_t2); + let ztzt = add_fp2(builder, &iso_3_z_t2, &iso_3_z_t2_2); + let iso_3_a_ztzt = mul_fp2(builder, &iso_3_a, &ztzt); + let denominator_tmp = negate_fp2(builder, &iso_3_a_ztzt); + let ztzt_1 = add_fp2(builder, &ztzt, &one); + let numerator = mul_fp2(builder, &iso_3_b, &ztzt_1); + + let cmp = is_zero(builder, &denominator_tmp); + let iso_3_z_iso_3_a = [ + builder.api.constant_biguint(&240.to_biguint().unwrap()), + builder.api.constant_biguint(&(modulus() - 480u32)), + ]; + let denominator = [ + BigUintTarget { + limbs: (0..LIMBS) + .into_iter() + .map(|i| { + U32Target::from_target_unsafe(if i < iso_3_z_iso_3_a[0].num_limbs() { + builder.api.select( + cmp.into(), + iso_3_z_iso_3_a[0].limbs[i].target, + denominator_tmp[0].limbs[i].target, + ) + } else { + builder + .api + .select(cmp.into(), zero, denominator_tmp[0].limbs[i].target) + }) + }) + .collect::>(), + }, + BigUintTarget { + limbs: (0..LIMBS) + .into_iter() + .map(|i| { + U32Target::from_target_unsafe(builder.api.select( + cmp.into(), + iso_3_z_iso_3_a[1].limbs[i].target, + denominator_tmp[1].limbs[i].target, + )) + }) + .collect::>(), + }, + ]; + let x0 = div_fp2(builder, &numerator, &denominator); + let x0_2 = mul_fp2(builder, &x0, &x0); + let x0_3 = mul_fp2(builder, &x0_2, &x0); + let a_x0 = mul_fp2(builder, &iso_3_a, &x0); + let x0_3_a_x0 = add_fp2(builder, &x0_3, &a_x0); + let gx0 = add_fp2(builder, &x0_3_a_x0, &iso_3_b); + + let x1 = mul_fp2(builder, &iso_3_z_t2, &x0); + let xi3t6 = mul_fp2(builder, &iso_3_z_t2_2, &iso_3_z_t2); + let gx1 = mul_fp2(builder, &xi3t6, &gx0); + + let is_square = builder.api.add_virtual_bool_target_unsafe(); + let sqrt = [ + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + ]; + + builder.api.add_simple_generator(SqrtGenerator { + t: t.clone(), + x0: gx0.clone(), + x1: gx1.clone(), + is_square, + sqrt: sqrt.clone(), + }); + + builder.api.assert_bool(is_square); + range_check_fp2(builder, &sqrt); + let sqrt2 = mul_fp2(builder, &sqrt, &sqrt); + let gx0_gx1_select = [ + BigUintTarget { + limbs: (0..LIMBS) + .into_iter() + .map(|i| { + U32Target::from_target_unsafe(builder.api.select( + is_square.into(), + gx0[0].limbs[i].target, + gx1[0].limbs[i].target, + )) + }) + .collect::>(), + }, + BigUintTarget { + limbs: (0..LIMBS) + .into_iter() + .map(|i| { + U32Target::from_target_unsafe(builder.api.select( + is_square.into(), + gx0[1].limbs[i].target, + gx1[1].limbs[i].target, + )) + }) + .collect::>(), + }, + ]; + builder.api.connect_biguint(&gx0_gx1_select[0], &sqrt2[0]); + builder.api.connect_biguint(&gx0_gx1_select[1], &sqrt2[1]); + + let sgn_t = sgn0_fp2(builder, t); + let sgn_sqrt = sgn0_fp2(builder, &sqrt); + let sgn_eq = builder.api.is_equal(sgn_t.variable.0, sgn_sqrt.variable.0); + let sqrt_negate = negate_fp2(builder, &sqrt); + let y = [ + BigUintTarget { + limbs: (0..LIMBS) + .into_iter() + .map(|i| { + U32Target::from_target_unsafe(builder.api.select( + sgn_eq.into(), + sqrt[0].limbs[i].target, + sqrt_negate[0].limbs[i].target, + )) + }) + .collect::>(), + }, + BigUintTarget { + limbs: (0..LIMBS) + .into_iter() + .map(|i| { + U32Target::from_target_unsafe(builder.api.select( + sgn_eq, + sqrt[1].limbs[i].target, + sqrt_negate[1].limbs[i].target, + )) + }) + .collect::>(), + }, + ]; + let x0_x1_select = [ + BigUintTarget { + limbs: (0..LIMBS) + .into_iter() + .map(|i| { + U32Target::from_target_unsafe(builder.api.select( + is_square, + x0[0].limbs[i].target, + x1[0].limbs[i].target, + )) + }) + .collect::>(), + }, + BigUintTarget { + limbs: (0..LIMBS) + .into_iter() + .map(|i| { + U32Target::from_target_unsafe(builder.api.select( + is_square, + x0[1].limbs[i].target, + x1[1].limbs[i].target, + )) + }) + .collect::>(), + }, + ]; + + [x0_x1_select, y] +} + +pub fn isogeny_map, const D: usize>( + builder: &mut CircuitBuilder, + input: &PointG2Target, +) -> PointG2Target { + let x = &input[0]; + let x_sq = mul_fp2(builder, x, x); + let x_cu = mul_fp2(builder, &x_sq, x); + + let coeffs = ISOGENY_COEFFICIENTS_G2 + .iter() + .map(|c_arr| { + c_arr + .iter() + .map(|c| { + let c0 = BigUint::from_str(c[0]).unwrap(); + let c1 = BigUint::from_str(c[1]).unwrap(); + [ + builder.api.constant_biguint(&c0), + builder.api.constant_biguint(&c1), + ] + }) + .collect::>() + }) + .collect::>>(); + + let x_coeffs = mul_fp2(builder, x, &coeffs[0][2]); + let x_sq_coeffs = mul_fp2(builder, &x_sq, &coeffs[0][1]); + let x_cu_coeffs = mul_fp2(builder, &x_cu, &coeffs[0][0]); + let x_num = add_fp2(builder, &coeffs[0][3], &x_coeffs); + let x_num = add_fp2(builder, &x_num, &x_sq_coeffs); + let x_num = add_fp2(builder, &x_num, &x_cu_coeffs); + + let x_coeffs = mul_fp2(builder, x, &coeffs[1][2]); + let x_den = add_fp2(builder, &coeffs[1][3], &x_coeffs); + let x_den = add_fp2(builder, &x_den, &x_sq); + + let x_coeffs = mul_fp2(builder, x, &coeffs[2][2]); + let x_sq_coeffs = mul_fp2(builder, &x_sq, &coeffs[2][1]); + let x_cu_coeffs = mul_fp2(builder, &x_cu, &coeffs[2][0]); + let y_num = add_fp2(builder, &coeffs[2][3], &x_coeffs); + let y_num = add_fp2(builder, &y_num, &x_sq_coeffs); + let y_num = add_fp2(builder, &y_num, &x_cu_coeffs); + + let x_coeffs = mul_fp2(builder, x, &coeffs[3][2]); + let x_sq_coeffs = mul_fp2(builder, &x_sq, &coeffs[3][1]); + let y_den = add_fp2(builder, &coeffs[3][3], &x_coeffs); + let y_den = add_fp2(builder, &y_den, &x_sq_coeffs); + let y_den = add_fp2(builder, &y_den, &x_cu); + + let x_new = div_fp2(builder, &x_num, &x_den); + let y_coeff = div_fp2(builder, &y_num, &y_den); + let y_new = mul_fp2(builder, &input[1], &y_coeff); + + [x_new, y_new] +} + +pub fn endomorphism_psi, const D: usize>( + builder: &mut CircuitBuilder, + inp: &PointG2Target, +) -> PointG2Target { + let c0 = [ + builder.api.constant_biguint(&BigUint::from_str("0").unwrap()), + builder.api.constant_biguint(&BigUint::from_str("4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939437").unwrap()), + ]; + let c1 = [ + builder.api.constant_biguint(&BigUint::from_str("2973677408986561043442465346520108879172042883009249989176415018091420807192182638567116318576472649347015917690530").unwrap()), + builder.api.constant_biguint(&BigUint::from_str("1028732146235106349975324479215795277384839936929757896155643118032610843298655225875571310552543014690878354869257").unwrap()), + ]; + let frob = [ + frobenius_map(builder, &inp[0], 1), + frobenius_map(builder, &inp[1], 1), + ]; + [ + mul_fp2(builder, &c0, &frob[0]), + mul_fp2(builder, &c1, &frob[1]), + ] +} + +pub fn endomorphism_psi2, const D: usize>( + builder: &mut CircuitBuilder, + inp: &PointG2Target, +) -> PointG2Target { + let c = builder.api.constant_biguint(&BigUint::from_str("4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939436").unwrap()); + [ + [ + mul_fp(builder, &inp[0][0], &c), + mul_fp(builder, &inp[0][1], &c), + ], + negate_fp2(builder, &inp[1]), + ] +} + +pub fn clear_cofactor_g2, const D: usize>( + builder: &mut CircuitBuilder, + inp: &PointG2Target, +) -> PointG2Target { + let a = [ + builder + .api + .constant_biguint(&BigUint::from_str("0").unwrap()), + builder + .api + .constant_biguint(&BigUint::from_str("0").unwrap()), + ]; + let b = [ + builder + .api + .constant_biguint(&BigUint::from_str("4").unwrap()), + builder + .api + .constant_biguint(&BigUint::from_str("4").unwrap()), + ]; + let fals = builder._false(); + let x_p = g2_scalar_mul(builder, inp, &b); + let psi_p = endomorphism_psi(builder, inp); + let neg_p = g2_negate(builder, &inp); + let neg_psi_p = g2_negate(builder, &psi_p); + let double_p = g2_double(builder, &inp, &a, &b); + let psi2_2p = endomorphism_psi2(builder, &double_p); + + let add0 = g2_add(builder, &x_p, fals, &inp, fals, &a, &b); + let add1 = g2_add(builder, &add0, fals, &neg_psi_p, fals, &a, &b); + let x_add1 = g2_scalar_mul(builder, &add1, &b); + let add2 = g2_add(builder, &x_add1, fals, &neg_p, fals, &a, &b); + let add3 = g2_add(builder, &add2, fals, &neg_psi_p, fals, &a, &b); + let add4 = g2_add(builder, &add3, fals, &psi2_2p, fals, &a, &b); + add4 +} + +pub fn hash_to_curve, const D: usize>( + builder: &mut CircuitBuilder, + msg: &[ByteVariable], +) -> PointG2Target { + let iso_3_a = [ + builder.api.constant_biguint(&0.to_biguint().unwrap()), + builder.api.constant_biguint(&240.to_biguint().unwrap()), + ]; + let iso_3_b = [ + builder.api.constant_biguint(&1012.to_biguint().unwrap()), + builder.api.constant_biguint(&1012.to_biguint().unwrap()), + ]; + + let u = hash_to_field(builder, msg, 2); + let pt1 = map_to_curve_simple_swu_9mod16(builder, &u[0]); + let pt2 = map_to_curve_simple_swu_9mod16(builder, &u[1]); + let no = builder._false(); + let pt1pt2 = g2_add( + builder, + &pt1, + no.into(), + &pt2, + no.into(), + &iso_3_a, + &iso_3_b, + ); + let isogeny_mapping = isogeny_map(builder, &pt1pt2); + let clear_cofactor = clear_cofactor_g2(builder, &isogeny_mapping); + + clear_cofactor +} + +#[derive(Debug, Default)] +pub struct SqrtGenerator { + t: Fp2Target, + x0: Fp2Target, + x1: Fp2Target, + is_square: BoolTarget, + sqrt: Fp2Target, +} + +impl, const D: usize> SimpleGenerator for SqrtGenerator { + fn id(&self) -> String { + "Fp2SqrtGenerator".to_string() + } + + fn dependencies(&self) -> Vec { + self.t + .iter() + .chain(self.x0.iter().chain(self.x1.iter())) + .flat_map(|f| f.limbs.iter().map(|l| l.target)) + .collect::>() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let x0_c0 = witness.get_biguint_target(self.x0[0].clone()); + let x0_c1 = witness.get_biguint_target(self.x0[1].clone()); + + let x0_fp2 = Fp2([ + Fp::get_fp_from_biguint(x0_c0), + Fp::get_fp_from_biguint(x0_c1), + ]); + let p2_7_16 = (modulus().pow(2) + 7u32) / 16u32; + let sqrt_candidate = x0_fp2.pow(Fp2::one(), p2_7_16); + let roots = Fp2::roots_of_unity_8th(); + let mut is_square = false; + let mut sqrt_witness = Fp2::zero(); + for root in roots { + let sqrt_tmp = sqrt_candidate * root; + if sqrt_tmp * sqrt_tmp == x0_fp2 { + is_square = true; + sqrt_witness = sqrt_tmp; + break; + } + } + out_buffer.set_bool_target(self.is_square, is_square); + if is_square { + out_buffer.set_biguint_target(&self.sqrt[0], &sqrt_witness.0[0].to_biguint()); + out_buffer.set_biguint_target(&self.sqrt[1], &sqrt_witness.0[1].to_biguint()); + return; + } + + let t_c0 = witness.get_biguint_target(self.t[0].clone()); + let t_c1 = witness.get_biguint_target(self.t[1].clone()); + let t_fp2 = Fp2([Fp::get_fp_from_biguint(t_c0), Fp::get_fp_from_biguint(t_c1)]); + + let x1_c0 = witness.get_biguint_target(self.x1[0].clone()); + let x1_c1 = witness.get_biguint_target(self.x1[1].clone()); + let x1_fp2 = Fp2([ + Fp::get_fp_from_biguint(x1_c0), + Fp::get_fp_from_biguint(x1_c1), + ]); + + let t3 = t_fp2 * t_fp2 * t_fp2; + let sqrt_candidate = sqrt_candidate * t3; + let etas = Fp2::etas(); + let mut is_square1 = false; + for eta in etas { + let sqrt_tmp = sqrt_candidate * eta; + if sqrt_tmp * sqrt_tmp == x1_fp2 { + is_square1 = true; + sqrt_witness = sqrt_tmp; + break; + } + } + assert!(is_square1, "Failed in square root generator"); + out_buffer.set_biguint_target(&self.sqrt[0], &sqrt_witness.0[0].to_biguint()); + out_buffer.set_biguint_target(&self.sqrt[1], &sqrt_witness.0[1].to_biguint()); + } + + fn serialize(&self, dst: &mut Vec, _common_data: &CommonCircuitData) -> IoResult<()> { + self.t[0].serialize(dst)?; + self.t[1].serialize(dst)?; + self.x0[0].serialize(dst)?; + self.x0[1].serialize(dst)?; + self.x1[0].serialize(dst)?; + self.x1[1].serialize(dst)?; + dst.write_target_bool(self.is_square)?; + self.sqrt[0].serialize(dst)?; + self.sqrt[1].serialize(dst) + } + + fn deserialize(src: &mut Buffer, _common_data: &CommonCircuitData) -> IoResult + where + Self: Sized, + { + let t_c0 = BigUintTarget::deserialize(src)?; + let t_c1 = BigUintTarget::deserialize(src)?; + let x0_c0 = BigUintTarget::deserialize(src)?; + let x0_c1 = BigUintTarget::deserialize(src)?; + let x1_c0 = BigUintTarget::deserialize(src)?; + let x1_c1 = BigUintTarget::deserialize(src)?; + let is_square = src.read_target_bool()?; + let sqrt_c0 = BigUintTarget::deserialize(src)?; + let sqrt_c1 = BigUintTarget::deserialize(src)?; + Ok(Self { + t: [t_c0, t_c1], + x0: [x0_c0, x0_c1], + x1: [x1_c0, x1_c1], + is_square, + sqrt: [sqrt_c0, sqrt_c1], + }) + } +} + +#[cfg(test)] +mod tests { + use std::{str::FromStr, time::Instant}; + + use itertools::Itertools; + use num_bigint::BigUint; + use plonky2::field::{ + goldilocks_field::GoldilocksField, + types::{Field, Field64}, + }; + use plonky2x::frontend::{ + builder::DefaultBuilder, + uint::num::biguint::CircuitBuilderBiguint, + vars::{ByteVariable, Variable}, + }; + + use crate::verification::{ + aggregation::hash_to_curve::map_to_curve_simple_swu_9mod16, fields::fp::LIMBS, + }; + + use super::{clear_cofactor_g2, hash_to_curve, isogeny_map}; + + #[test] + fn test_hash_to_curve() { + let mut builder = DefaultBuilder::new(); + let msg = vec![ + 103, 140, 163, 210, 238, 252, 75, 8, 227, 27, 60, 229, 125, 150, 241, 222, 217, 156, + 178, 17, 14, 199, 15, 172, 94, 179, 249, 0, 197, 206, 104, 200, 165, 253, 55, 147, 171, + 191, 118, 189, 133, 138, 2, 22, 237, 6, 62, 10, 68, 105, 208, 102, 66, 70, 170, 114, + 194, 80, 215, 5, 63, 95, 202, 1, 99, 153, 67, 115, 7, 122, 235, 255, 142, 44, 3, 65, + 190, 166, 218, 72, 230, 196, 24, 88, 146, 193, 211, 90, 37, 173, 71, 152, 21, 226, 89, + 79, 239, 81, 149, 135, 188, 51, 52, 116, 26, 30, 126, 31, 35, 240, 201, 101, 33, 61, + 220, 192, 86, 47, 214, 243, 224, 136, 50, 56, 42, 233, 148, 244, 203, 198, 195, 120, + 36, 221, 181, 53, 160, 58, 167, 131, 216, 183, 83, 232, 151, 87, 46, 54, 128, 123, 231, + 212, 130, 19, 28, 96, 108, 111, 137, 154, 40, 184, 74, 69, 100, 64, 177, 98, 248, 32, + 12, 97, 49, 187, 39, 159, 168, 247, 29, 246, 209, 110, 77, 73, 20, 23, 174, 143, 93, + 92, 162, 48, 134, 119, 213, 139, 234, 205, 91, 113, 204, 121, 57, 4, 41, 180, 144, 76, + 107, 59, 176, 43, 11, 127, 34, 38, 164, 9, 141, 78, 245, 175, 145, 112, 129, 109, 18, + 250, 85, 16, 124, 182, 242, 158, 84, 219, 13, 207, 186, 82, 157, 132, 225, 236, 45, + 185, 228, 161, 169, 106, 25, 155, 251, 254, 223, + ] + .iter() + .map(|b| { + let b_v = builder.constant(GoldilocksField::from_canonical_u8(*b)); + ByteVariable::from_variable(&mut builder, b_v) + }) + .collect::>(); + let hash_to_curve_res = hash_to_curve(&mut builder, &msg); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for i in 0..hash_to_curve_res.len() { + for j in 0..hash_to_curve_res[i].len() { + for k in 0..LIMBS { + builder.write(Variable(hash_to_curve_res[i][j].limbs[k].target)); + } + } + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let s = Instant::now(); + let (proof, mut output) = circuit.prove(&input); + println!("Time to generate a proof {:?}", s.elapsed()); + + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + for i in 0..hash_to_curve_res.len() { + for _ in 0..hash_to_curve_res[i].len() { + for _ in 0..LIMBS { + res_output.push(output.read::()) + } + } + } + + let mut biguint_res: Vec = Vec::new(); + for i in 0..4 { + biguint_res.push(BigUint::new( + res_output[(i * 12)..(i * 12) + 12] + .iter() + .map(|f| (f.0 % GoldilocksField::ORDER) as u32) + .collect_vec(), + )); + } + + let expected_biguint_targets = vec![ + BigUint::from_str("263045359310876400672266134102422923142170786488971463144260837991310793708919904974750654695723771449817953534932").unwrap(), + BigUint::from_str("705085714867347375204839501082774976133427291820427587421388912165231801117635419620551803041968063138400265133663").unwrap(), + BigUint::from_str("3303090097836311338780356548102458653001297014651905027382930947462021925827856111160646227318455068671696298599273").unwrap(), + BigUint::from_str("2746000687320669913100540339419677393886381993350402195421358168305846473266968075760380449244083602094512053359154").unwrap() + ]; + + for i in 0..4 { + assert_eq!(biguint_res[i], expected_biguint_targets[i]); + } + } + + #[test] + fn test_map_to_curve_simple_swu_9mod16() { + let mut builder = DefaultBuilder::new(); + let x = [builder.api.constant_biguint(&BigUint::from_str("474682481268733588266168000983897038833463740369371343293271315606510847229825856506681723856424762498931536081381").unwrap()), builder.api.constant_biguint(&BigUint::from_str("1366297191634768530389324840135632614622170346303255080801396974208665528754948924260000453159829725659141010218083").unwrap())]; + let new_point = map_to_curve_simple_swu_9mod16(&mut builder, &x); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for i in 0..new_point.len() { + for j in 0..new_point[i].len() { + for k in 0..new_point[i][j].limbs.len() { + builder.write(Variable(new_point[i][j].limbs[k].target)); + } + } + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + for i in 0..new_point.len() { + for j in 0..new_point[i].len() { + for _ in 0..new_point[i][j].limbs.len() { + res_output.push(output.read::()) + } + } + } + + let mut biguint_res: Vec = Vec::new(); + + for i in 0..4 { + biguint_res.push(BigUint::new( + res_output[(i * 12)..((i * 12) + 12)] + .iter() + .map(|f| ((f.0 % GoldilocksField::ORDER) as u32)) + .collect_vec(), + )); + } + + let expected_biguint_targets = vec![ + BigUint::from_str("3060844272194546509744375366937392691364803424242981321948532731206236794105714573248676325992693995641546323869947").unwrap(), + BigUint::from_str("2178088723896136927227615444202612183719092972593095669593917181168791652031398769747908182877951150253834691003695").unwrap(), + BigUint::from_str("2414062066557001374784906001337739211138362843766395178252280511119838997923178981557780591344278921569184403008099").unwrap(), + BigUint::from_str("902142789549649010950853691727709369432566981811071618377331254273490164668206477123333794980363358097421619541372").unwrap() + ]; + + for i in 0..4 { + assert_eq!(biguint_res[i], expected_biguint_targets[i]); + } + } + + #[test] + fn test_isogeny_map() { + let mut builder = DefaultBuilder::new(); + let x = [builder.api.constant_biguint(&BigUint::from_str("474682481268733588266168000983897038833463740369371343293271315606510847229825856506681723856424762498931536081381").unwrap()), builder.api.constant_biguint(&BigUint::from_str("1366297191634768530389324840135632614622170346303255080801396974208665528754948924260000453159829725659141010218083").unwrap())]; + let new_point = map_to_curve_simple_swu_9mod16(&mut builder, &x); + let iso_map_r = isogeny_map(&mut builder, &new_point); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for i in 0..iso_map_r.len() { + for j in 0..iso_map_r[i].len() { + for k in 0..LIMBS { + builder.write(Variable(iso_map_r[i][j].limbs[k].target)); + } + } + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let s = Instant::now(); + let (proof, mut output) = circuit.prove(&input); + println!("Time to generate a proof {:?}", s.elapsed()); + // Verify proof. + let s = Instant::now(); + circuit.verify(&proof, &input, &output); + println!("Time to verify a proof {:?}", s.elapsed()); + + // Read output. + for i in 0..iso_map_r.len() { + for _ in 0..iso_map_r[i].len() { + for _ in 0..LIMBS { + res_output.push(output.read::()) + } + } + } + + let mut biguint_res: Vec = Vec::new(); + + for i in 0..4 { + biguint_res.push(BigUint::new( + res_output[(i * 12)..((i * 12) + 12)] + .iter() + .map(|f| ((f.0 % GoldilocksField::ORDER) as u32)) + .collect_vec(), + )); + } + + let expected_biguint_targets = vec![ + BigUint::from_str("3020098988166152265957458699713409264776064412968511868273334310978607420463777702053743668373252848938048859569472").unwrap(), + BigUint::from_str("1458981974613365650201781947361855472098362440235925030682710979747620221343697516696212172566912716109989777361662").unwrap(), + BigUint::from_str("1834291692231285600047846672091248684005847013394827595644756391313325861691761060706376473203409023894171500990751").unwrap(), + BigUint::from_str("2613278682710607327768853275311538731542148746765923401506548661907721927393566272464025106984186092820519334410455").unwrap() + ]; + + for i in 0..4 { + assert_eq!(biguint_res[i], expected_biguint_targets[i]); + } + } + + #[test] + fn test_clear_cofactor() { + let mut builder = DefaultBuilder::new(); + let x = [builder.api.constant_biguint(&BigUint::from_str("474682481268733588266168000983897038833463740369371343293271315606510847229825856506681723856424762498931536081381").unwrap()), builder.api.constant_biguint(&BigUint::from_str("1366297191634768530389324840135632614622170346303255080801396974208665528754948924260000453159829725659141010218083").unwrap())]; + let new_point = map_to_curve_simple_swu_9mod16(&mut builder, &x); + let iso_map_r = isogeny_map(&mut builder, &new_point); + let clear_cofactor = clear_cofactor_g2(&mut builder, &iso_map_r); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for i in 0..clear_cofactor.len() { + for j in 0..clear_cofactor[i].len() { + for k in 0..LIMBS { + builder.write(Variable(clear_cofactor[i][j].limbs[k].target)); + } + } + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + for i in 0..clear_cofactor.len() { + for _ in 0..clear_cofactor[i].len() { + for _ in 0..LIMBS { + res_output.push(output.read::()) + } + } + } + + let mut biguint_res: Vec = Vec::new(); + + for i in 0..4 { + biguint_res.push(BigUint::new( + res_output[(i * 12)..((i * 12) + 12)] + .iter() + .map(|f| ((f.0 % GoldilocksField::ORDER) as u32)) + .collect_vec(), + )); + } + + let expected_biguint_targets = vec![ + BigUint::from_str("1333544920615259474714661371327518954416732544068349411293275363187401395459274109080234631197310432595159920946891").unwrap(), + BigUint::from_str("3534898797471258007317464582418403172692698020727006028871480408936368621561281829419543100267410234420500056142147").unwrap(), + BigUint::from_str("3937050676002649672972543005965063406357492217339476444626945930452046333693534966501454095684077919472794301839550").unwrap(), + BigUint::from_str("2505850057307810573716759564908795048162371887702901637040931176762748544745723014444120460791457110594458168503549").unwrap() + ]; + + for i in 0..4 { + assert_eq!(biguint_res[i], expected_biguint_targets[i]); + } + } +} diff --git a/casper-finality-proofs/src/verification/aggregation/hash_to_field.rs b/casper-finality-proofs/src/verification/aggregation/hash_to_field.rs new file mode 100644 index 000000000..de3f02c9c --- /dev/null +++ b/casper-finality-proofs/src/verification/aggregation/hash_to_field.rs @@ -0,0 +1,389 @@ +use crate::verification::{ + fields::{fp::FpTarget, fp2::Fp2Target}, + utils::native_bls::modulus, +}; +use num_bigint::BigUint; +use plonky2::field::types::Field; +use plonky2x::{ + backend::circuit::PlonkParameters, + frontend::{ + builder::CircuitBuilder, + uint::num::{ + biguint::{BigUintTarget, CircuitBuilderBiguint}, + u32::gadgets::arithmetic_u32::U32Target, + }, + vars::{ByteVariable, Bytes32Variable, BytesVariable, CircuitVariable}, + }, +}; +use std::iter::Iterator; + +const SHA256_DIGEST_SIZE: u8 = 32; +const DST: &str = "BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"; +const DST_LEN: usize = DST.len(); +const M: usize = 2; +const L: usize = (381 + 128 + 7) / 8; + +pub fn i2osp, const D: usize, const LENGHT: usize>( + builder: &mut CircuitBuilder, + value: usize, +) -> BytesVariable { + if value >= 1 << (8 * LENGHT) { + assert!(false); + } + + let mut value = value; + + let mut res_u8 = [0; LENGHT]; + + for i in (0..LENGHT).rev() { + res_u8[i] = (value as u8) & 0xff; + value = value >> 8; + } + + builder.constant(res_u8) +} + +pub fn strxor, const D: usize>( + builder: &mut CircuitBuilder, + a: &[ByteVariable], + b: &[ByteVariable], +) -> Vec { + let mut res: Vec = Vec::with_capacity(a.len()); + res.resize(a.len(), ByteVariable::init_unsafe(builder)); + + for i in 0..a.len() { + res[i] = builder.xor(a[i], b[i]); + } + res +} + +pub fn concatenate_bytes(bytes: &[&[ByteVariable]]) -> Vec { + let total_length: usize = bytes.iter().map(|byte| byte.len()).sum(); + let mut result = Vec::with_capacity(total_length); + for byte in bytes { + result.extend_from_slice(byte); + } + result +} + +pub fn expand_message_xmd, const D: usize>( + builder: &mut CircuitBuilder, + msg: &[ByteVariable], + dst: &[ByteVariable], + len_in_bytes: usize, +) -> Vec { + const B_IN_BYTES: u8 = SHA256_DIGEST_SIZE; + const R_IN_BYTES: u8 = B_IN_BYTES * 2; + let ell = (len_in_bytes + B_IN_BYTES as usize - 1) / B_IN_BYTES as usize; + assert!(ell <= 255, "Invalid xmd length"); + + let dst_len_octet_stream = i2osp::(builder, DST_LEN); + let dst_prime = concatenate_bytes(&[dst, &dst_len_octet_stream.0]); + let z_pad = i2osp::(builder, 0); + let l_i_b_str = i2osp::(builder, len_in_bytes); + let mut b: Vec = Vec::with_capacity(ell); + b.resize(ell + 1, Bytes32Variable::init_unsafe(builder)); + let temp = i2osp::(builder, 0); + let b_0 = builder.curta_sha256(&concatenate_bytes(&[ + &z_pad.0, + msg, + &l_i_b_str.0, + &temp.0, + &dst_prime.as_slice(), + ])); + let temp = i2osp::(builder, 1); + b[0] = builder.curta_sha256(&concatenate_bytes(&[ + &b_0.as_bytes(), + &temp.0, + &dst_prime.as_slice(), + ])); + + for i in 1..=ell { + let b_0_xor_bi_m1 = strxor(builder, &b_0.as_bytes(), &b[i - 1].as_bytes()); + let i_1_2osp = i2osp::(builder, (i + 1).into()); + let args = [&b_0_xor_bi_m1, i_1_2osp.0.as_slice(), &dst_prime]; + b[i] = builder.curta_sha256(&concatenate_bytes(&args[..])); + } + + let mut r_b: Vec = Vec::with_capacity(b.len() * 32); + for i in 0..b.len() { + for j in 0..32 { + r_b.push(b[i].as_bytes()[j]); + } + } + let pseudo_random_bytes = concatenate_bytes(&[&r_b]); + pseudo_random_bytes[0..len_in_bytes].to_vec() +} + +pub fn hash_to_field, const D: usize>( + builder: &mut CircuitBuilder, + msg: &[ByteVariable], + count: usize, +) -> Vec { + let dst_bytes = DST.as_bytes(); + let len_in_bytes = count * M * L; + + let modulus = builder.api.constant_biguint(&modulus()); + + let dst = dst_bytes + .iter() + .map(|b| { + let b_v = builder.constant(L::Field::from_canonical_u8(*b)); + ByteVariable::from_variable(builder, b_v) + }) + .collect::>(); + + let pseudo_random_bytes = expand_message_xmd(builder, &msg, &dst, len_in_bytes); + let mut u: Vec = Vec::with_capacity(count); + for i in 0..count { + let mut e: Vec = Vec::with_capacity(M); + for j in 0..M { + let elm_offset = L * (j + i * M); + let tv = + octet_stream_to_integer(builder, &pseudo_random_bytes[elm_offset..elm_offset + L]); + let point = builder.api.rem_biguint(&tv, &modulus); + e.push(point); + } + u.push(e.try_into().unwrap()); + } + + u +} + +pub fn octet_stream_to_integer, const D: usize>( + builder: &mut CircuitBuilder, + bytes: &[ByteVariable], +) -> BigUintTarget { + let mut result = builder.api.zero_biguint(); + let _256 = builder.api.constant_biguint(&BigUint::from(256u64)); + + for i in 0..bytes.len() { + result = builder.api.mul_biguint(&result, &_256); + let current_byte = bytes[i].to_variable(builder); + let current_byte_biguint = BigUintTarget { + limbs: vec![U32Target::from_target_unsafe(current_byte.0)], + }; + result = builder.api.add_biguint(&result, ¤t_byte_biguint); + } + + result +} + +pub fn string_to_bytes_target, const D: usize, const LENGHT: usize>( + builder: &mut CircuitBuilder, + s: &str, +) -> BytesVariable { + let b = string_to_bytes_native(s); + let mut bytes = [ByteVariable::constant(builder, 0); LENGHT]; + + for i in 0..LENGHT { + let curr_u8 = builder.api.constant(L::Field::from_canonical_u8(b[i])); + bytes[i] = ByteVariable::from_target(builder, curr_u8); + } + + BytesVariable(bytes) +} + +fn string_to_bytes_native(s: &str) -> Vec { + let mut bytes = Vec::with_capacity(s.len()); + for c in s.chars() { + bytes.push(c as u8); + } + bytes +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::i2osp; + use crate::verification::aggregation::hash_to_field::{ + expand_message_xmd, hash_to_field, string_to_bytes_target, strxor, DST, + }; + use itertools::Itertools; + use num_bigint::BigUint; + use plonky2::field::{goldilocks_field::GoldilocksField, types::Field}; + use plonky2x::{ + backend::circuit::DefaultParameters, + frontend::{ + builder::DefaultBuilder, + uint::num::biguint::BigUintTarget, + vars::{ByteVariable, BytesVariable, Variable}, + }, + }; + + const D: usize = 2; + + #[test] + fn test_i2osp() { + let mut builder = DefaultBuilder::new(); + let x = i2osp::(&mut builder, 258); + + // Define your circuit. + builder.write(x[0]); + builder.write(x[1]); + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + let res = [output.read::(), output.read::()]; + assert_eq!(res[0], 1); + assert_eq!(res[1], 2); + } + + #[test] + fn test_strxor() { + let mut builder = DefaultBuilder::new(); + let x = i2osp::(&mut builder, 258); + let y = i2osp::(&mut builder, 12444); + let z = strxor(&mut builder, &x.0, &y.0); + + // Define your circuit. + builder.write(z[0]); + builder.write(z[1]); + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + let res = [output.read::(), output.read::()]; + assert_eq!(res[0], 1); + assert_eq!(res[1], 50); + } + + #[test] + fn test_expand_message_xmd() { + let mut builder = DefaultBuilder::new(); + let msg = vec![1, 2, 3] + .iter() + .map(|b| { + let b_v = builder.constant(GoldilocksField::from_canonical_u8(*b)); + ByteVariable::from_variable(&mut builder, b_v) + }) + .collect::>(); + let dst: BytesVariable<43> = string_to_bytes_target(&mut builder, DST); + let x = expand_message_xmd(&mut builder, &msg, &dst.0, 3); + + // Define your circuit. + builder.write(x[0]); + builder.write(x[1]); + builder.write(x[2]); + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + let res = [ + output.read::(), + output.read::(), + output.read::(), + ]; + assert_eq!(res[0], 112); + assert_eq!(res[1], 160); + assert_eq!(res[2], 103); + } + + #[test] + fn test_hash_to_field() { + let mut builder = DefaultBuilder::new(); + let msg = vec![ + 103, 140, 163, 210, 238, 252, 75, 8, 227, 27, 60, 229, 125, 150, 241, 222, 217, 156, + 178, 17, 14, 199, 15, 172, 94, 179, 249, 0, 197, 206, 104, 200, 165, 253, 55, 147, 171, + 191, 118, 189, 133, 138, 2, 22, 237, 6, 62, 10, 68, 105, 208, 102, 66, 70, 170, 114, + 194, 80, 215, 5, 63, 95, 202, 1, 99, 153, 67, 115, 7, 122, 235, 255, 142, 44, 3, 65, + 190, 166, 218, 72, 230, 196, 24, 88, 146, 193, 211, 90, 37, 173, 71, 152, 21, 226, 89, + 79, 239, 81, 149, 135, 188, 51, 52, 116, 26, 30, 126, 31, 35, 240, 201, 101, 33, 61, + 220, 192, 86, 47, 214, 243, 224, 136, 50, 56, 42, 233, 148, 244, 203, 198, 195, 120, + 36, 221, 181, 53, 160, 58, 167, 131, 216, 183, 83, 232, 151, 87, 46, 54, 128, 123, 231, + 212, 130, 19, 28, 96, 108, 111, 137, 154, 40, 184, 74, 69, 100, 64, 177, 98, 248, 32, + 12, 97, 49, 187, 39, 159, 168, 247, 29, 246, 209, 110, 77, 73, 20, 23, 174, 143, 93, + 92, 162, 48, 134, 119, 213, 139, 234, 205, 91, 113, 204, 121, 57, 4, 41, 180, 144, 76, + 107, 59, 176, 43, 11, 127, 34, 38, 164, 9, 141, 78, 245, 175, 145, 112, 129, 109, 18, + 250, 85, 16, 124, 182, 242, 158, 84, 219, 13, 207, 186, 82, 157, 132, 225, 236, 45, + 185, 228, 161, 169, 106, 25, 155, 251, 254, 223, + ] + .iter() + .map(|b| { + let b_v = builder.constant(GoldilocksField::from_canonical_u8(*b)); + ByteVariable::from_variable(&mut builder, b_v) + }) + .collect::>(); + let hash_to_field_res: Vec<[BigUintTarget; 2]> = hash_to_field(&mut builder, &msg, 2); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for i in 0..hash_to_field_res.len() { + for j in 0..hash_to_field_res[i].len() { + for k in 0..hash_to_field_res[i][j].limbs.len() { + builder.write(Variable(hash_to_field_res[i][j].limbs[k].target)); + } + } + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + for i in 0..hash_to_field_res.len() { + for j in 0..hash_to_field_res[i].len() { + for _ in 0..hash_to_field_res[i][j].limbs.len() { + res_output.push(output.read::()) + } + } + } + + let mut biguint_res: Vec = Vec::new(); + for i in 0..4 { + biguint_res.push(BigUint::new( + res_output[(i * 12)..((i * 12) + 12)] + .iter() + .map(|f| f.0 as u32) + .collect_vec(), + )); + } + + let expected_biguint_targets = vec![ + BigUint::from_str("2942673459794500824580128114941241077633586577641169850693178278930447424685332677826186374811489148782362443284608").unwrap(), + BigUint::from_str("961863142708046042273452691523472524074450767124819253154800002018881071828353246847707036179733382702893758998301").unwrap(), + BigUint::from_str("1730253443889188243699347267983827407041125190502469490045674785753813798266321964653512323237347200806418660750026").unwrap(), + BigUint::from_str("373669168086355933912269929736599922994165593229668523008784932595414673068627276883453384670961970510484970528923").unwrap() + ]; + + for i in 0..4 { + assert_eq!(biguint_res[i], expected_biguint_targets[i]); + } + } +} diff --git a/casper-finality-proofs/src/verification/aggregation/mod.rs b/casper-finality-proofs/src/verification/aggregation/mod.rs new file mode 100644 index 000000000..5880db3e1 --- /dev/null +++ b/casper-finality-proofs/src/verification/aggregation/mod.rs @@ -0,0 +1,2 @@ +pub mod hash_to_curve; +pub mod hash_to_field; diff --git a/casper-finality-proofs/src/verification/curves/g1.rs b/casper-finality-proofs/src/verification/curves/g1.rs new file mode 100644 index 000000000..35f4df2be --- /dev/null +++ b/casper-finality-proofs/src/verification/curves/g1.rs @@ -0,0 +1,40 @@ +use ark_std::One; +use num_bigint::BigUint; +use plonky2::plonk::{ + circuit_builder::CircuitBuilder, + config::{GenericConfig, PoseidonGoldilocksConfig}, +}; +use plonky2x::frontend::uint::num::biguint::CircuitBuilderBiguint; + +use crate::verification::fields::fp::FpTarget; + +pub type PointG1Target = [FpTarget; 2]; + +const D: usize = 2; +type C = PoseidonGoldilocksConfig; +type F = >::F; + +pub fn g1_ecc_aggregate( + builder: &mut CircuitBuilder, + lhs: PointG1Target, + rhs: PointG1Target, +) -> PointG1Target { + let x1 = lhs[0].clone(); + let y1 = lhs[1].clone(); + let x2 = rhs[0].clone(); + let y2 = rhs[1].clone(); + + let one = builder.constant_biguint(&BigUint::one()); + let u = builder.sub_biguint(&y2, &y1); + let v = builder.sub_biguint(&x2, &x1); + let v_inv = builder.div_biguint(&one, &v); + let s = builder.mul_biguint(&u, &v_inv); + let s_squared = builder.mul_biguint(&s, &s); + let x_sum = builder.add_biguint(&x2, &x1); + let x3 = builder.sub_biguint(&s_squared, &x_sum); + let x_diff = builder.sub_biguint(&x1, &x3); + let prod = builder.mul_biguint(&s, &x_diff); + let y3 = builder.sub_biguint(&prod, &y1); + + [x3, y3] +} diff --git a/casper-finality-proofs/src/verification/curves/g2.rs b/casper-finality-proofs/src/verification/curves/g2.rs new file mode 100644 index 000000000..b10e88b4b --- /dev/null +++ b/casper-finality-proofs/src/verification/curves/g2.rs @@ -0,0 +1,1234 @@ +use num_bigint::{BigUint, ToBigUint}; +use plonky2::field::types::Field; +use plonky2::{ + field::extension::Extendable, + hash::hash_types::RichField, + iop::{generator::SimpleGenerator, target::Target}, +}; +use plonky2x::frontend::vars::{BoolVariable, Variable}; +use plonky2x::{ + backend::circuit::PlonkParameters, + frontend::{ + builder::CircuitBuilder, + uint::num::{ + biguint::{ + BigUintTarget, CircuitBuilderBiguint, GeneratedValuesBigUint, WitnessBigUint, + }, + u32::gadgets::arithmetic_u32::{CircuitBuilderU32, U32Target}, + }, + }, +}; +use std::iter::Iterator; +pub const SIG_LEN: usize = 96; + +use crate::verification::fields::fp2::inv_fp2; +use crate::verification::{ + fields::{ + fp::{fp_is_zero, LIMBS}, + fp2::{ + add_fp2, is_equal, is_zero, mul_fp2, negate_fp2, range_check_fp2, sub_fp2, Fp2Target, + }, + }, + utils::native_bls::{get_bls_12_381_parameter, modulus, Fp, Fp2}, +}; +const TWO: usize = 2; +pub type PointG2Target = [Fp2Target; TWO]; + +pub fn g2_add_without_generator, const D: usize>( + builder: &mut CircuitBuilder, + a: &PointG2Target, + b: &PointG2Target, +) -> PointG2Target { + let x1 = &a[0]; + let y1 = &a[1]; + let x2 = &b[0]; + let y2 = &b[1]; + + let u = sub_fp2(builder, &y2, &y1); + let v = sub_fp2(builder, &x2, &x1); + let v_inv = inv_fp2(builder, &v); + let s = mul_fp2(builder, &u, &v_inv); + let s_squared = mul_fp2(builder, &s, &s); + let x_sum = add_fp2(builder, &x2, &x1); + let x3 = sub_fp2(builder, &s_squared, &x_sum); + let x_diff = sub_fp2(builder, &x1, &x3); + let prod = mul_fp2(builder, &s, &x_diff); + let y3 = sub_fp2(builder, &prod, &y1); + + [x3, y3] +} + +pub fn g2_add_unequal, const D: usize>( + builder: &mut CircuitBuilder, + a: &PointG2Target, + b: &PointG2Target, +) -> PointG2Target { + let dy = sub_fp2(builder, &b[1], &a[1]); + let dx = sub_fp2(builder, &b[0], &a[0]); + let outx_c0 = builder.api.add_virtual_biguint_target_unsafe(LIMBS); + let outx_c1 = builder.api.add_virtual_biguint_target_unsafe(LIMBS); + let outy_c0 = builder.api.add_virtual_biguint_target_unsafe(LIMBS); + let outy_c1 = builder.api.add_virtual_biguint_target_unsafe(LIMBS); + let out = [[outx_c0, outx_c1], [outy_c0, outy_c1]]; + builder.api.add_simple_generator(G2AdditionGenerator { + a: a.clone(), + b: b.clone(), + dx: dx.clone(), + dy: dy.clone(), + out: out.clone(), + }); + range_check_fp2(builder, &out[0]); + range_check_fp2(builder, &out[1]); + let dx_sq = mul_fp2(builder, &dx, &dx); + let dy_sq = mul_fp2(builder, &dy, &dy); + + let x1x2 = add_fp2(builder, &a[0], &b[0]); + let x1x2x3 = add_fp2(builder, &x1x2, &out[0]); + let cubic = mul_fp2(builder, &x1x2x3, &dx_sq); + + let cubic_dysq = sub_fp2(builder, &cubic, &dy_sq); + let cubic_dysq_check = is_zero(builder, &cubic_dysq); + builder.api.assert_one(cubic_dysq_check.variable.0); + + let y1y3 = add_fp2(builder, &a[1], &out[1]); + let y1y3dx = mul_fp2(builder, &y1y3, &dx); + + let x1x3 = sub_fp2(builder, &a[0], &out[0]); + let x1x3dy = mul_fp2(builder, &x1x3, &dy); + + let check = is_equal(builder, &y1y3dx, &x1x3dy); + builder.api.assert_one(check.variable.0); + + out +} + +pub fn g2_double, const D: usize>( + builder: &mut CircuitBuilder, + a: &PointG2Target, + iso_3_a: &Fp2Target, + iso_3_b: &Fp2Target, +) -> PointG2Target { + let outx_c0 = builder.api.add_virtual_biguint_target_unsafe(LIMBS); + let outx_c1 = builder.api.add_virtual_biguint_target_unsafe(LIMBS); + let outy_c0 = builder.api.add_virtual_biguint_target_unsafe(LIMBS); + let outy_c1 = builder.api.add_virtual_biguint_target_unsafe(LIMBS); + let out = [[outx_c0, outx_c1], [outy_c0, outy_c1]]; + builder.api.add_simple_generator(G2DoublingGenerator { + a: a.clone(), + iso_3_a: iso_3_a.clone(), + out: out.clone(), + }); + range_check_fp2(builder, &out[0]); + range_check_fp2(builder, &out[1]); + + // point on tangent + let x_sq = mul_fp2(builder, &a[0], &a[0]); + let x_sq2 = add_fp2(builder, &x_sq, &x_sq); + let x_sq3 = add_fp2(builder, &x_sq2, &x_sq); + let x_sq3_a = add_fp2(builder, &x_sq3, iso_3_a); + let x1_x3 = sub_fp2(builder, &a[0], &out[0]); + let right = mul_fp2(builder, &x_sq3_a, &x1_x3); + + let y1_2 = add_fp2(builder, &a[1], &a[1]); + let y1_y3 = add_fp2(builder, &a[1], &out[1]); + let left = mul_fp2(builder, &y1_2, &y1_y3); + + let check = is_equal(builder, &right, &left); + builder.api.assert_one(check.variable.0); + + // point on curve + let outx_sq = mul_fp2(builder, &out[0], &out[0]); + let outx_cu = mul_fp2(builder, &outx_sq, &out[0]); + let a_outx = mul_fp2(builder, &out[0], iso_3_a); + let outx_cu_a_outx = add_fp2(builder, &outx_cu, &a_outx); + let right = add_fp2(builder, &outx_cu_a_outx, iso_3_b); + + let left = mul_fp2(builder, &out[1], &out[1]); + + let check = is_equal(builder, &right, &left); + builder.api.assert_one(check.variable.0); + + let check = is_equal(builder, &a[0], &out[0]); + builder.api.assert_zero(check.variable.0); + + out +} + +pub fn g2_add, const D: usize>( + builder: &mut CircuitBuilder, + a: &PointG2Target, + is_infinity_a: BoolVariable, + b: &PointG2Target, + is_infinity_b: BoolVariable, + iso_3_a: &Fp2Target, + iso_3_b: &Fp2Target, +) -> PointG2Target { + let x_equal = is_equal(builder, &a[0], &b[0]); + let y_equal = is_equal(builder, &a[1], &b[1]); + let do_double = builder.and(x_equal, y_equal); + let add_input_b = [ + [ + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + ], + [ + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + ], + ]; + for i in 0..12 { + if i == 0 { + let zero = builder.api.zero(); + let is_zero = builder.api.is_equal(b[0][0].limbs[i].target, zero); + let select = + builder + .api + .select(do_double.into(), is_zero.target, b[0][0].limbs[i].target); + builder + .api + .connect(add_input_b[0][0].limbs[i].target, select); + } else { + builder + .api + .connect_u32(add_input_b[0][0].limbs[i], b[0][0].limbs[i]); + } + } + builder.api.connect_biguint(&add_input_b[0][1], &b[0][1]); + builder.api.connect_biguint(&add_input_b[1][0], &b[1][0]); + builder.api.connect_biguint(&add_input_b[1][1], &b[1][1]); + let addition = g2_add_unequal(builder, a, &add_input_b); + let doubling = g2_double(builder, a, iso_3_a, iso_3_b); + let both_inf = builder.api.and(is_infinity_a.into(), is_infinity_b.into()); + let a_not_inf = builder.api.not(is_infinity_a.into()); + let b_not_inf = builder.api.not(is_infinity_b.into()); + let both_not_inf = builder.api.and(a_not_inf, b_not_inf); + let not_y_equal = builder.not(y_equal); + let a_neg_b = builder.and(x_equal, not_y_equal); + let inverse = builder.api.and(both_not_inf, a_neg_b.into()); + let out_inf = builder.api.or(both_inf, inverse); + builder.api.assert_zero(out_inf.target); + let add_or_double_select = [ + [ + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + ], + [ + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + ], + ]; + for i in 0..2 { + for j in 0..2 { + for k in 0..LIMBS { + let s = builder.api.select( + do_double.into(), + doubling[i][j].limbs[k].target, + addition[i][j].limbs[k].target, + ); + builder + .api + .connect(add_or_double_select[i][j].limbs[k].target, s); + } + } + } + let a_inf_select = [ + [ + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + ], + [ + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + ], + ]; + for i in 0..2 { + for j in 0..2 { + for k in 0..LIMBS { + let s = builder.api.select( + is_infinity_a.into(), + b[i][j].limbs[k].target, + add_or_double_select[i][j].limbs[k].target, + ); + builder.api.connect(a_inf_select[i][j].limbs[k].target, s); + } + } + } + let b_inf_select = [ + [ + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + ], + [ + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + ], + ]; + for i in 0..2 { + for j in 0..2 { + for k in 0..LIMBS { + let s = builder.api.select( + is_infinity_b.into(), + a[i][j].limbs[k].target, + a_inf_select[i][j].limbs[k].target, + ); + builder.api.connect(b_inf_select[i][j].limbs[k].target, s); + } + } + } + + b_inf_select +} + +pub fn g2_negate, const D: usize>( + builder: &mut CircuitBuilder, + p: &PointG2Target, +) -> PointG2Target { + [p[0].clone(), negate_fp2(builder, &p[1])] +} + +pub fn g2_scalar_mul, const D: usize>( + builder: &mut CircuitBuilder, + p: &PointG2Target, + iso_3_b: &Fp2Target, +) -> PointG2Target { + let iso_3_a = [ + builder.api.constant_biguint(&0.to_biguint().unwrap()), + builder.api.constant_biguint(&0.to_biguint().unwrap()), + ]; + let mut r = [ + [ + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + ], + [ + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + builder.api.add_virtual_biguint_target_unsafe(LIMBS), + ], + ]; + let fals = builder._false(); + for i in (0..get_bls_12_381_parameter().bits()).rev() { + if i == get_bls_12_381_parameter().bits() - 1 { + for idx in 0..2 { + for jdx in 0..2 { + builder.api.connect_biguint(&r[idx][jdx], &p[idx][jdx]); + } + } + } else { + let pdouble = g2_double(builder, &r, &iso_3_a, iso_3_b); + if !get_bls_12_381_parameter().bit(i) { + r = pdouble; + } else { + r = g2_add( + builder, + &pdouble, + fals.into(), + p, + fals.into(), + &iso_3_a, + iso_3_b, + ); + } + } + } + r +} + +pub fn signature_point_check, const D: usize>( + builder: &mut CircuitBuilder, + point: &PointG2Target, + sig: &[Variable; SIG_LEN], +) { + let msbs = builder.api.split_le(sig[0].0, 8); + let bflag = msbs[6]; + builder.api.assert_zero(bflag.target); + + let aflag = msbs[5]; + + let (x0, x1, y0, y1) = (&point[0][0], &point[0][1], &point[1][0], &point[1][1]); + let y1_zero = fp_is_zero(builder, &y1); + let zero = builder.api.zero_u32(); + let y_select_limbs: Vec = (0..LIMBS) + .into_iter() + .map(|i| { + U32Target::from_target_unsafe(builder.api.select( + y1_zero.into(), + y0.limbs.get(i).unwrap_or(&zero).target, + y1.limbs.get(i).unwrap_or(&zero).target, + )) + }) + .collect(); + let y_select = BigUintTarget { + limbs: y_select_limbs, + }; + let two = builder.api.constant_biguint(&2u32.into()); + let y_select_2 = builder.api.mul_biguint(&y_select, &two); + let p = builder.api.constant_biguint(&modulus()); + let y_select_2_p = builder.api.div_biguint(&y_select_2, &p); + for i in 0..y_select_2_p.limbs.len() { + if i == 0 { + builder + .api + .connect(aflag.target, y_select_2_p.limbs[i].target); + } else { + builder.api.connect_u32(y_select_2_p.limbs[i], zero); + } + } + + let z1_limbs: Vec = sig[0..SIG_LEN / 2] + .chunks(4) + .into_iter() + .map(|chunk| { + let zero = builder.api.zero(); + let factor = builder.api.constant(L::Field::from_canonical_u32(256)); + U32Target::from_target_unsafe( + chunk + .iter() + .fold(zero, |acc, c| builder.api.mul_add(acc, factor, c.0)), + ) + }) + .rev() + .collect(); + let z1 = BigUintTarget { limbs: z1_limbs }; + + let z2_limbs: Vec = sig[SIG_LEN / 2..SIG_LEN] + .chunks(4) + .into_iter() + .map(|chunk| { + let zero = builder.api.zero(); + let factor = builder.api.constant(L::Field::from_canonical_u32(256)); + U32Target::from_target_unsafe( + chunk + .iter() + .fold(zero, |acc, c| builder.api.mul_add(acc, factor, c.0)), + ) + }) + .rev() + .collect(); + let z2 = BigUintTarget { limbs: z2_limbs }; + + builder.api.connect_biguint(&x0, &z2); + + let pow_2_383 = builder + .api + .constant_biguint(&(BigUint::from(1u32) << 383u32)); + let pow_2_381 = builder + .api + .constant_biguint(&(BigUint::from(1u32) << 381u32)); + let pow_2_381_or_zero = BigUintTarget { + limbs: (0..LIMBS) + .into_iter() + .map(|i| { + U32Target::from_target_unsafe(builder.api.select( + aflag.into(), + pow_2_381.limbs[i].target, + zero.target, + )) + }) + .collect(), + }; + let flags = builder.api.add_biguint(&pow_2_383, &pow_2_381_or_zero); + let z1_reconstructed = builder.api.add_biguint(x1, &flags); + + builder.api.connect_biguint(&z1, &z1_reconstructed); +} + +#[derive(Debug, Default)] +pub struct G2AdditionGenerator { + a: PointG2Target, + b: PointG2Target, + dx: Fp2Target, + dy: Fp2Target, + out: PointG2Target, +} + +impl, const D: usize> SimpleGenerator for G2AdditionGenerator { + fn id(&self) -> String { + "G2AdditionGenerator".to_string() + } + + fn dependencies(&self) -> Vec { + let a_targets = self + .a + .iter() + .flat_map(|f2| { + f2.iter() + .flat_map(|f| f.limbs.iter().map(|l| l.target).collect::>()) + .collect::>() + }) + .collect::>(); + let b_targets = self + .b + .iter() + .flat_map(|f2| { + f2.iter() + .flat_map(|f| f.limbs.iter().map(|l| l.target).collect::>()) + .collect::>() + }) + .collect::>(); + let dx_targets = self + .dx + .iter() + .flat_map(|f| f.limbs.iter().map(|l| l.target).collect::>()) + .collect::>(); + let dy_targets = self + .dy + .iter() + .flat_map(|f| f.limbs.iter().map(|l| l.target).collect::>()) + .collect::>(); + [a_targets, b_targets, dx_targets, dy_targets].concat() + } + + fn run_once( + &self, + witness: &plonky2::iop::witness::PartitionWitness, + out_buffer: &mut plonky2::iop::generator::GeneratedValues, + ) { + let ax = Fp2([ + Fp::get_fp_from_biguint(witness.get_biguint_target(self.a[0][0].clone())), + Fp::get_fp_from_biguint(witness.get_biguint_target(self.a[0][1].clone())), + ]); + let ay = Fp2([ + Fp::get_fp_from_biguint(witness.get_biguint_target(self.a[1][0].clone())), + Fp::get_fp_from_biguint(witness.get_biguint_target(self.a[1][1].clone())), + ]); + let bx = Fp2([ + Fp::get_fp_from_biguint(witness.get_biguint_target(self.b[0][0].clone())), + Fp::get_fp_from_biguint(witness.get_biguint_target(self.b[0][1].clone())), + ]); + let dx = Fp2([ + Fp::get_fp_from_biguint(witness.get_biguint_target(self.dx[0].clone())), + Fp::get_fp_from_biguint(witness.get_biguint_target(self.dx[1].clone())), + ]); + let dy = Fp2([ + Fp::get_fp_from_biguint(witness.get_biguint_target(self.dy[0].clone())), + Fp::get_fp_from_biguint(witness.get_biguint_target(self.dy[1].clone())), + ]); + let dx_inv = dx.invert(); + let lambda = dy * dx_inv; + let lambda_sq = lambda * lambda; + let outx = lambda_sq - ax - bx; + let outy = lambda * (ax - outx) - ay; + out_buffer.set_biguint_target(&self.out[0][0], &outx.0[0].to_biguint()); + out_buffer.set_biguint_target(&self.out[0][1], &outx.0[1].to_biguint()); + out_buffer.set_biguint_target(&self.out[1][0], &outy.0[0].to_biguint()); + out_buffer.set_biguint_target(&self.out[1][1], &outy.0[1].to_biguint()); + } + + fn serialize( + &self, + dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + self.a[0][0].serialize(dst)?; + self.a[0][1].serialize(dst)?; + self.a[1][0].serialize(dst)?; + self.a[1][1].serialize(dst)?; + self.b[0][0].serialize(dst)?; + self.b[0][1].serialize(dst)?; + self.b[1][0].serialize(dst)?; + self.b[1][1].serialize(dst)?; + self.dx[0].serialize(dst)?; + self.dx[1].serialize(dst)?; + self.dy[0].serialize(dst)?; + self.dy[1].serialize(dst)?; + self.out[0][0].serialize(dst)?; + self.out[0][1].serialize(dst)?; + self.out[1][0].serialize(dst)?; + self.out[1][1].serialize(dst) + } + + fn deserialize( + src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + let ax_c0 = BigUintTarget::deserialize(src)?; + let ax_c1 = BigUintTarget::deserialize(src)?; + let ay_c0 = BigUintTarget::deserialize(src)?; + let ay_c1 = BigUintTarget::deserialize(src)?; + let bx_c0 = BigUintTarget::deserialize(src)?; + let bx_c1 = BigUintTarget::deserialize(src)?; + let by_c0 = BigUintTarget::deserialize(src)?; + let by_c1 = BigUintTarget::deserialize(src)?; + let dx_c0 = BigUintTarget::deserialize(src)?; + let dx_c1 = BigUintTarget::deserialize(src)?; + let dy_c0 = BigUintTarget::deserialize(src)?; + let dy_c1 = BigUintTarget::deserialize(src)?; + let outx_c0 = BigUintTarget::deserialize(src)?; + let outx_c1 = BigUintTarget::deserialize(src)?; + let outy_c0 = BigUintTarget::deserialize(src)?; + let outy_c1 = BigUintTarget::deserialize(src)?; + Ok(Self { + a: [[ax_c0, ax_c1], [ay_c0, ay_c1]], + b: [[bx_c0, bx_c1], [by_c0, by_c1]], + dx: [dx_c0, dx_c1], + dy: [dy_c0, dy_c1], + out: [[outx_c0, outx_c1], [outy_c0, outy_c1]], + }) + } +} + +#[derive(Debug, Default)] +pub struct G2DoublingGenerator { + a: PointG2Target, + iso_3_a: Fp2Target, + out: PointG2Target, +} + +impl, const D: usize> SimpleGenerator for G2DoublingGenerator { + fn id(&self) -> String { + "G2DoublingGenerator".to_string() + } + + fn dependencies(&self) -> Vec { + let a_targets = self + .a + .iter() + .flat_map(|f2| { + f2.iter() + .flat_map(|f| f.limbs.iter().map(|l| l.target).collect::>()) + .collect::>() + }) + .collect::>(); + let iso_3_a_targets = self + .iso_3_a + .iter() + .flat_map(|f| f.limbs.iter().map(|l| l.target).collect::>()) + .collect::>(); + [a_targets, iso_3_a_targets].concat() + } + + fn run_once( + &self, + witness: &plonky2::iop::witness::PartitionWitness, + out_buffer: &mut plonky2::iop::generator::GeneratedValues, + ) { + let iso_3_a = Fp2([ + Fp::get_fp_from_biguint(witness.get_biguint_target(self.iso_3_a[0].clone())), + Fp::get_fp_from_biguint(witness.get_biguint_target(self.iso_3_a[1].clone())), + ]); + let ax = Fp2([ + Fp::get_fp_from_biguint(witness.get_biguint_target(self.a[0][0].clone())), + Fp::get_fp_from_biguint(witness.get_biguint_target(self.a[0][1].clone())), + ]); + let ay = Fp2([ + Fp::get_fp_from_biguint(witness.get_biguint_target(self.a[1][0].clone())), + Fp::get_fp_from_biguint(witness.get_biguint_target(self.a[1][1].clone())), + ]); + let lambda_num = iso_3_a + ax * ax * Fp::get_fp_from_biguint(3u32.into()); + let lambda_denom = ay + ay; + let lambda = lambda_num / lambda_denom; + let lambda_sq = lambda * lambda; + let outx = lambda_sq - ax - ax; + let outy = lambda * (ax - outx) - ay; + out_buffer.set_biguint_target(&self.out[0][0], &outx.0[0].to_biguint()); + out_buffer.set_biguint_target(&self.out[0][1], &outx.0[1].to_biguint()); + out_buffer.set_biguint_target(&self.out[1][0], &outy.0[0].to_biguint()); + out_buffer.set_biguint_target(&self.out[1][1], &outy.0[1].to_biguint()); + } + + fn serialize( + &self, + dst: &mut Vec, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult<()> { + self.a[0][0].serialize(dst)?; + self.a[0][1].serialize(dst)?; + self.a[1][0].serialize(dst)?; + self.a[1][1].serialize(dst)?; + self.iso_3_a[0].serialize(dst)?; + self.iso_3_a[1].serialize(dst)?; + self.out[0][0].serialize(dst)?; + self.out[0][1].serialize(dst)?; + self.out[1][0].serialize(dst)?; + self.out[1][1].serialize(dst) + } + + fn deserialize( + src: &mut plonky2::util::serialization::Buffer, + _common_data: &plonky2::plonk::circuit_data::CommonCircuitData, + ) -> plonky2::util::serialization::IoResult + where + Self: Sized, + { + let ax_c0 = BigUintTarget::deserialize(src)?; + let ax_c1 = BigUintTarget::deserialize(src)?; + let ay_c0 = BigUintTarget::deserialize(src)?; + let ay_c1 = BigUintTarget::deserialize(src)?; + let iso_3_a_c0 = BigUintTarget::deserialize(src)?; + let iso_3_a_c1 = BigUintTarget::deserialize(src)?; + let outx_c0 = BigUintTarget::deserialize(src)?; + let outx_c1 = BigUintTarget::deserialize(src)?; + let outy_c0 = BigUintTarget::deserialize(src)?; + let outy_c1 = BigUintTarget::deserialize(src)?; + Ok(Self { + a: [[ax_c0, ax_c1], [ay_c0, ay_c1]], + iso_3_a: [iso_3_a_c0, iso_3_a_c1], + out: [[outx_c0, outx_c1], [outy_c0, outy_c1]], + }) + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use itertools::Itertools; + use num_bigint::{BigUint, ToBigUint}; + use plonky2::field::{ + goldilocks_field::GoldilocksField, + types::{Field, Field64}, + }; + use plonky2x::frontend::{ + builder::DefaultBuilder, uint::num::biguint::CircuitBuilderBiguint, vars::Variable, + }; + + use crate::verification::{ + fields::fp::LIMBS, + utils::native_bls::{Fp, Fp2}, + }; + + use super::{g2_add, g2_add_unequal, g2_double, g2_scalar_mul, signature_point_check, TWO}; + + #[test] + fn test_g2_add_unequal() { + let mut builder = DefaultBuilder::new(); + let ax = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "337725438187709982817188701931175748950561864071211469604211805451542415352866003578718608366094520056481699232210" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "325784474482020989596135374893471919876505088991873421195308352667079503424389512976821068246925718319548045276021" + ).unwrap()), + ]); + let ay = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "2965841325781469856973174148258785715970498867849450741444982165189412687797594966692602501064144340797710797471604" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "1396501224612541682947972324170488919567615665343008985787728980681572855276817422483173426760119128141672533354119" + ).unwrap()), + ]); + let bx = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "3310291183651938419676930134503606039576251708119934019650494815974674760881379622302324811830103490883079904029190" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "845507222118475144290150023685070019360459684233155402409229752404383900284940551672371362493058110240418657298132" + ).unwrap()), + ]); + let by = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "569469686320544423596306308487126199229297307366529623191489815159190893993668979352767262071942316086625514601662" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "2551756239942517806379811015764241238167383065214268002625836091916337464087928632477808357405422759164808763594986" + ).unwrap()), + ]); + let outx = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "3768960129599410557225162537737286003238400530051754572454824471200864202913026112975152396185116175737023068710834" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "2843653242501816279232983717246998149289638605923450990196321568072224346134709601553669097144892265594669670100681" + ).unwrap()), + ]); + let outy = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "2136473314670056131183153764113091685196675640973971063848296586048702180604877062503412214120535118046733529576506" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "3717743359948639609414970569174500186381762539811697438986507840606082550875593852503699874848297189142874182531754" + ).unwrap()), + ]); + + let a = [ + [ + builder.api.constant_biguint(&ax.to_biguint()[0]), + builder.api.constant_biguint(&ax.to_biguint()[1]), + ], + [ + builder.api.constant_biguint(&ay.to_biguint()[0]), + builder.api.constant_biguint(&ay.to_biguint()[1]), + ], + ]; + + let b = [ + [ + builder.api.constant_biguint(&bx.to_biguint()[0]), + builder.api.constant_biguint(&bx.to_biguint()[1]), + ], + [ + builder.api.constant_biguint(&by.to_biguint()[0]), + builder.api.constant_biguint(&by.to_biguint()[1]), + ], + ]; + + let out = g2_add_unequal(&mut builder, &a, &b); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for i in 0..TWO { + for j in 0..out[i].len() { + for k in 0..LIMBS { + builder.write(Variable(out[i][j].limbs[k].target)); + } + } + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + for i in 0..TWO { + for _ in 0..out[i].len() { + for _ in 0..LIMBS { + res_output.push(output.read::()) + } + } + } + + let mut biguint_res: Vec = Vec::new(); + for i in 0..2 * TWO { + biguint_res.push(BigUint::new( + res_output[(i * 12)..(i * 12) + 12] + .iter() + .map(|f| (f.0 % GoldilocksField::ORDER) as u32) + .collect_vec(), + )); + } + + let outx = outx.to_biguint(); + let outy = outy.to_biguint(); + for i in 0..TWO { + assert_eq!(biguint_res[i], outx[i]); + } + for i in 0..TWO { + assert_eq!(biguint_res[i + TWO], outy[i]); + } + } + + #[test] + fn test_g2_double() { + let mut builder = DefaultBuilder::new(); + let ax = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "337725438187709982817188701931175748950561864071211469604211805451542415352866003578718608366094520056481699232210" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "325784474482020989596135374893471919876505088991873421195308352667079503424389512976821068246925718319548045276021" + ).unwrap()), + ]); + let ay = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "2965841325781469856973174148258785715970498867849450741444982165189412687797594966692602501064144340797710797471604" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "1396501224612541682947972324170488919567615665343008985787728980681572855276817422483173426760119128141672533354119" + ).unwrap()), + ]); + let outx = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "1706600946883407123219281831938721281378271382276249190372502550662898700659312875480967274178992951148217552181426" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "3667242666443602243234297601464303917352028754060836539777371958000208843208072408275476423902876206704592938302165" + ).unwrap()), + ]); + let outy = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "1455123735227984325271077817690334450857761312547658768990224051882209081684526238004573782051265522918945273385158" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "3320466234608127782197732106422214686550406898681784249598895322673540642018347203281877363138090179901504571209003" + ).unwrap()), + ]); + + let a = [ + [ + builder.api.constant_biguint(&ax.to_biguint()[0]), + builder.api.constant_biguint(&ax.to_biguint()[1]), + ], + [ + builder.api.constant_biguint(&ay.to_biguint()[0]), + builder.api.constant_biguint(&ay.to_biguint()[1]), + ], + ]; + + let iso_3_a = [ + builder.api.constant_biguint(&0.to_biguint().unwrap()), + builder.api.constant_biguint(&240.to_biguint().unwrap()), + ]; + let iso_3_b = [ + builder.api.constant_biguint(&1012.to_biguint().unwrap()), + builder.api.constant_biguint(&1012.to_biguint().unwrap()), + ]; + + let out = g2_double(&mut builder, &a, &iso_3_a, &iso_3_b); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for i in 0..TWO { + for j in 0..out[i].len() { + for k in 0..LIMBS { + builder.write(Variable(out[i][j].limbs[k].target)); + } + } + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + for i in 0..TWO { + for _ in 0..out[i].len() { + for _ in 0..LIMBS { + res_output.push(output.read::()) + } + } + } + + let mut biguint_res: Vec = Vec::new(); + for i in 0..2 * TWO { + biguint_res.push(BigUint::new( + res_output[(i * 12)..(i * 12) + 12] + .iter() + .map(|f| (f.0 % GoldilocksField::ORDER) as u32) + .collect_vec(), + )); + } + + let outx = outx.to_biguint(); + let outy = outy.to_biguint(); + for i in 0..TWO { + assert_eq!(biguint_res[i], outx[i]); + } + for i in 0..TWO { + assert_eq!(biguint_res[i + TWO], outy[i]); + } + } + + #[test] + fn test_g2_add() { + let mut builder = DefaultBuilder::new(); + let ax = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "337725438187709982817188701931175748950561864071211469604211805451542415352866003578718608366094520056481699232210" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "325784474482020989596135374893471919876505088991873421195308352667079503424389512976821068246925718319548045276021" + ).unwrap()), + ]); + let ay = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "2965841325781469856973174148258785715970498867849450741444982165189412687797594966692602501064144340797710797471604" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "1396501224612541682947972324170488919567615665343008985787728980681572855276817422483173426760119128141672533354119" + ).unwrap()), + ]); + let bx = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "3310291183651938419676930134503606039576251708119934019650494815974674760881379622302324811830103490883079904029190" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "845507222118475144290150023685070019360459684233155402409229752404383900284940551672371362493058110240418657298132" + ).unwrap()), + ]); + let by = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "569469686320544423596306308487126199229297307366529623191489815159190893993668979352767262071942316086625514601662" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "2551756239942517806379811015764241238167383065214268002625836091916337464087928632477808357405422759164808763594986" + ).unwrap()), + ]); + let outx = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "3768960129599410557225162537737286003238400530051754572454824471200864202913026112975152396185116175737023068710834" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "2843653242501816279232983717246998149289638605923450990196321568072224346134709601553669097144892265594669670100681" + ).unwrap()), + ]); + let outy = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "2136473314670056131183153764113091685196675640973971063848296586048702180604877062503412214120535118046733529576506" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "3717743359948639609414970569174500186381762539811697438986507840606082550875593852503699874848297189142874182531754" + ).unwrap()), + ]); + + let iso_3_a = [ + builder.api.constant_biguint(&0.to_biguint().unwrap()), + builder.api.constant_biguint(&240.to_biguint().unwrap()), + ]; + let iso_3_b = [ + builder.api.constant_biguint(&1012.to_biguint().unwrap()), + builder.api.constant_biguint(&1012.to_biguint().unwrap()), + ]; + + let a = [ + [ + builder.api.constant_biguint(&ax.to_biguint()[0]), + builder.api.constant_biguint(&ax.to_biguint()[1]), + ], + [ + builder.api.constant_biguint(&ay.to_biguint()[0]), + builder.api.constant_biguint(&ay.to_biguint()[1]), + ], + ]; + + let b = [ + [ + builder.api.constant_biguint(&bx.to_biguint()[0]), + builder.api.constant_biguint(&bx.to_biguint()[1]), + ], + [ + builder.api.constant_biguint(&by.to_biguint()[0]), + builder.api.constant_biguint(&by.to_biguint()[1]), + ], + ]; + + let fals = builder._false(); + + let out = g2_add(&mut builder, &a, fals, &b, fals, &iso_3_a, &iso_3_b); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for i in 0..TWO { + for j in 0..out[i].len() { + for k in 0..LIMBS { + builder.write(Variable(out[i][j].limbs[k].target)); + } + } + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + for i in 0..TWO { + for _ in 0..out[i].len() { + for _ in 0..LIMBS { + res_output.push(output.read::()) + } + } + } + + let mut biguint_res: Vec = Vec::new(); + for i in 0..2 * TWO { + biguint_res.push(BigUint::new( + res_output[(i * 12)..(i * 12) + 12] + .iter() + .map(|f| (f.0 % GoldilocksField::ORDER) as u32) + .collect_vec(), + )); + } + + let outx = outx.to_biguint(); + let outy = outy.to_biguint(); + for i in 0..TWO { + assert_eq!(biguint_res[i], outx[i]); + } + for i in 0..TWO { + assert_eq!(biguint_res[i + TWO], outy[i]); + } + } + + #[test] + fn test_g2_scalar_mul() { + let mut builder = DefaultBuilder::new(); + let ax = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "3219922746671482828210036408711997441423671614254909325234707044434520756052360285257107968950769890523504628275940" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "1689252599334450651431125834598273362703914442067213087777626885820814565104897473205802289043260096634945919754747" + ).unwrap()), + ]); + let ay = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "3277365552217223927730141275188890184833071787772555827000840921808443941258778716588573376888715070179970391655322" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "583921403203359937897773959554466412643567032578544897698779952656397892876222999644067619700087458377600564507453" + ).unwrap()), + ]); + let outx = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "2523579754967640238723918616351685721284996518144674649571478689837790667637298382703328020485789979179436650708908" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "926383654583210622704996942518380628779065643276946198453367351460754762515870939199945068184689019420502882527581" + ).unwrap()), + ]); + let outy = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "3787164088273368384415735450659985644624425652571718026503769291441565414050570276349393167238939810080925158072505" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "3689766260810296892747853615583585529622598940500344733471060314731353498148974741263844587195375375425544954703339" + ).unwrap()), + ]); + + let a = [ + [ + builder.api.constant_biguint(&ax.to_biguint()[0]), + builder.api.constant_biguint(&ax.to_biguint()[1]), + ], + [ + builder.api.constant_biguint(&ay.to_biguint()[0]), + builder.api.constant_biguint(&ay.to_biguint()[1]), + ], + ]; + + let iso_3_b = [ + builder.api.constant_biguint(&4.to_biguint().unwrap()), + builder.api.constant_biguint(&4.to_biguint().unwrap()), + ]; + + let out = g2_scalar_mul(&mut builder, &a, &iso_3_b); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for i in 0..TWO { + for j in 0..out[i].len() { + for k in 0..LIMBS { + builder.write(Variable(out[i][j].limbs[k].target)); + } + } + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + for i in 0..TWO { + for _ in 0..out[i].len() { + for _ in 0..LIMBS { + res_output.push(output.read::()) + } + } + } + let mut biguint_res: Vec = Vec::new(); + for i in 0..2 * TWO { + biguint_res.push(BigUint::new( + res_output[(i * 12)..(i * 12) + 12] + .iter() + .map(|f| (f.0 % GoldilocksField::ORDER) as u32) + .collect_vec(), + )); + } + + let outx = outx.to_biguint(); + let outy = outy.to_biguint(); + for i in 0..TWO { + assert_eq!(biguint_res[i], outx[i]); + } + for i in 0..TWO { + assert_eq!(biguint_res[i + TWO], outy[i]); + } + } + + #[test] + fn test_signature_point_check() { + let mut builder = DefaultBuilder::new(); + let x = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "2132190448044539512343458281906429348357553485972550361022637600291474790426714276782518732598485406127127542511958" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "1768967113711705180967647921989767607043027235135825860038026636952386389242730816293578938377273126163720266364901" + ).unwrap()), + ]); + let y = Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "1601269830186296343258204708609068858787525822280553591425324687245481424080606221266002538737401918289754033770338" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "508402758079580872259652181430201489694066144504950057753724961054091567713555160539784585997814439522141428760875" + ).unwrap()), + ]); + let x_t = [ + builder.api.constant_biguint(&x.to_biguint()[0]), + builder.api.constant_biguint(&x.to_biguint()[1]), + ]; + let y_t = [ + builder.api.constant_biguint(&y.to_biguint()[0]), + builder.api.constant_biguint(&y.to_biguint()[1]), + ]; + + let sig: Vec = [ + 139, 126, 67, 23, 196, 226, 59, 211, 144, 232, 136, 101, 183, 50, 126, 215, 210, 110, + 97, 248, 215, 138, 135, 11, 184, 144, 5, 162, 250, 243, 244, 51, 140, 27, 110, 7, 158, + 63, 35, 135, 61, 90, 233, 5, 135, 72, 183, 229, 13, 218, 102, 33, 65, 70, 85, 67, 129, + 210, 109, 61, 39, 103, 248, 6, 238, 111, 155, 116, 213, 81, 130, 121, 92, 156, 15, 149, + 69, 65, 43, 98, 117, 125, 244, 59, 143, 22, 72, 75, 38, 67, 175, 183, 249, 6, 57, 86, + ] + .iter() + .map(|f| builder.constant(GoldilocksField::from_canonical_u8(*f))) + .collect(); + + let sig: [Variable; 96] = sig + .into_iter() + .collect::>() + .try_into() + .unwrap(); + + let point = [x_t, y_t]; + + signature_point_check(&mut builder, &point, &sig); + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, output) = circuit.prove(&input); + + // Verify proof. + circuit.verify(&proof, &input, &output); + } +} diff --git a/casper-finality-proofs/src/verification/curves/mod.rs b/casper-finality-proofs/src/verification/curves/mod.rs new file mode 100644 index 000000000..c0712f392 --- /dev/null +++ b/casper-finality-proofs/src/verification/curves/mod.rs @@ -0,0 +1,3 @@ +pub mod g1; +pub mod g2; +pub mod starky; diff --git a/casper-finality-proofs/src/verification/curves/starky/g1.rs b/casper-finality-proofs/src/verification/curves/starky/g1.rs new file mode 100644 index 000000000..edf9835e9 --- /dev/null +++ b/casper-finality-proofs/src/verification/curves/starky/g1.rs @@ -0,0 +1,1522 @@ +use num_bigint::BigUint; +use plonky2::{ + field::{ + extension::{Extendable, FieldExtension}, + packed::PackedField, + types::Field, + }, + hash::hash_types::RichField, + iop::ext_target::ExtensionTarget, + plonk::circuit_builder::CircuitBuilder, +}; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; + +use crate::verification::{ + fields::starky::fp::*, + utils::{ + native_bls::{get_u32_vec_from_literal_ref, get_u32_vec_from_literal_ref_24, modulus, Fp}, + starky_utils::assign_u32_in_series, + }, +}; + +pub const G1_POINT_ADDITION_X1: usize = 0; +pub const G1_POINT_ADDITION_Y1: usize = G1_POINT_ADDITION_X1 + 12; +pub const G1_POINT_ADDITION_X2: usize = G1_POINT_ADDITION_Y1 + 12; +pub const G1_POINT_ADDITION_Y2: usize = G1_POINT_ADDITION_X2 + 12; +pub const G1_POINT_ADDITION_X3: usize = G1_POINT_ADDITION_Y2 + 12; +pub const G1_POINT_ADDITION_Y3: usize = G1_POINT_ADDITION_X3 + 12; +pub const X2_X1_DIFF: usize = G1_POINT_ADDITION_Y3 + 12; +pub const Y2_Y1_DIFF: usize = X2_X1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_TOTAL; +pub const X2_X1_SQ: usize = Y2_Y1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_TOTAL; +pub const Y2_Y1_SQ: usize = + X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL + RANGE_CHECK_TOTAL; +pub const X1_X2_X3_SUM: usize = + Y2_Y1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL + RANGE_CHECK_TOTAL; +pub const X1_X2_X3_X2_X1_SQ: usize = X1_X2_X3_SUM + FP_ADDITION_TOTAL * 2; +pub const Y1_Y3: usize = + X1_X2_X3_X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL + RANGE_CHECK_TOTAL; +pub const X1_X3: usize = Y1_Y3 + FP_ADDITION_TOTAL; +pub const Y1_Y3_X2_X1: usize = X1_X3 + FP_ADDITION_TOTAL + FP_SUBTRACTION_TOTAL; +pub const Y2_Y1_X1_X3: usize = + Y1_Y3_X2_X1 + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL + RANGE_CHECK_TOTAL; +pub const TOT_COL: usize = + Y2_Y1_X1_X3 + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL + RANGE_CHECK_TOTAL; + +/// Fills the stark trace of g1 ec addition +pub fn fill_trace_g1_addition, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + pt1: &[Fp; 2], + pt2: &[Fp; 2], + start_row: usize, + start_col: usize, +) -> [Fp; 2] { + let dy = pt2[1] - pt1[1]; + let dx = pt2[0] - pt1[0]; + let lambda = dy / dx; + let lambda_sq = lambda * lambda; + let x3_fp = lambda_sq - pt2[0] - pt1[0]; + let y3_fp = lambda * (pt1[0] - x3_fp) - pt1[1]; + + let end_row = start_row + 11; + for row in start_row..end_row + 1 { + assign_u32_in_series(trace, row, start_col + G1_POINT_ADDITION_X1, &pt1[0].0); + assign_u32_in_series(trace, row, start_col + G1_POINT_ADDITION_Y1, &pt1[1].0); + assign_u32_in_series(trace, row, start_col + G1_POINT_ADDITION_X2, &pt2[0].0); + assign_u32_in_series(trace, row, start_col + G1_POINT_ADDITION_Y2, &pt2[1].0); + assign_u32_in_series(trace, row, start_col + G1_POINT_ADDITION_X3, &x3_fp.0); + assign_u32_in_series(trace, row, start_col + G1_POINT_ADDITION_Y3, &y3_fp.0); + } + + let x1 = pt1[0].to_biguint(); + let y1 = pt1[1].to_biguint(); + let x2 = pt2[0].to_biguint(); + let y2 = pt2[1].to_biguint(); + let x3 = x3_fp.to_biguint(); + let y3 = y3_fp.to_biguint(); + let p = modulus(); + + let x2_mod = &x2 + &p; + for row in start_row..end_row + 1 { + fill_trace_addition_fp( + trace, + &get_u32_vec_from_literal_ref(&x2), + &get_u32_vec_from_literal_ref(&p), + row, + start_col + X2_X1_DIFF, + ); + } + let x2_x1 = &x2_mod - &x1; + for row in start_row..end_row + 1 { + fill_trace_subtraction_fp( + trace, + &get_u32_vec_from_literal_ref(&x2_mod), + &get_u32_vec_from_literal_ref(&x1), + row, + start_col + X2_X1_DIFF + FP_ADDITION_TOTAL, + ); + } + let y2_mod = &y2 + &p; + for row in start_row..end_row + 1 { + fill_trace_addition_fp( + trace, + &get_u32_vec_from_literal_ref(&y2), + &get_u32_vec_from_literal_ref(&p), + row, + start_col + Y2_Y1_DIFF, + ); + } + let y2_y1 = &y2_mod - &y1; + for row in start_row..end_row + 1 { + fill_trace_subtraction_fp( + trace, + &get_u32_vec_from_literal_ref(&y2_mod), + &get_u32_vec_from_literal_ref(&y1), + row, + start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL, + ); + } + let x2_x1_sq = &x2_x1 * &x2_x1; + fill_multiplication_trace_no_mod_reduction( + trace, + &get_u32_vec_from_literal_ref(&x2_x1), + &get_u32_vec_from_literal_ref(&x2_x1), + start_row, + end_row, + start_col + X2_X1_SQ, + ); + let res = fill_reduction_trace( + trace, + &get_u32_vec_from_literal_ref_24(&x2_x1_sq), + start_row, + end_row, + start_col + X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS, + ); + let x2_x1_sq = &x2_x1_sq % &p; + // for row in start_row..end_row+1 { + fill_range_check_trace( + trace, + &res, + end_row, + start_col + X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + ); + // } + let y2_y1_sq = &y2_y1 * &y2_y1; + fill_multiplication_trace_no_mod_reduction( + trace, + &get_u32_vec_from_literal_ref(&y2_y1), + &get_u32_vec_from_literal_ref(&y2_y1), + start_row, + end_row, + start_col + Y2_Y1_SQ, + ); + let res = fill_reduction_trace( + trace, + &get_u32_vec_from_literal_ref_24(&y2_y1_sq), + start_row, + end_row, + start_col + Y2_Y1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS, + ); + let y2_y1_sq = &y2_y1_sq % &p; + // for row in start_row..end_row+1 { + fill_range_check_trace( + trace, + &res, + end_row, + start_col + Y2_Y1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + ); + // } + let x1_x2 = &x1 + &x2; + for row in start_row..end_row + 1 { + fill_trace_addition_fp( + trace, + &get_u32_vec_from_literal_ref(&x1), + &get_u32_vec_from_literal_ref(&x2), + row, + start_col + X1_X2_X3_SUM, + ); + } + let x1_x2_x3 = &x1_x2 + &x3; + for row in start_row..end_row + 1 { + fill_trace_addition_fp( + trace, + &get_u32_vec_from_literal_ref(&x1_x2), + &get_u32_vec_from_literal_ref(&x3), + row, + start_col + X1_X2_X3_SUM + FP_ADDITION_TOTAL, + ); + } + let x1_x2_x3_x2_x1_sq = &x1_x2_x3 * &x2_x1_sq; + fill_multiplication_trace_no_mod_reduction( + trace, + &get_u32_vec_from_literal_ref(&x1_x2_x3), + &get_u32_vec_from_literal_ref(&x2_x1_sq), + start_row, + end_row, + start_col + X1_X2_X3_X2_X1_SQ, + ); + let res = fill_reduction_trace( + trace, + &get_u32_vec_from_literal_ref_24(&x1_x2_x3_x2_x1_sq), + start_row, + end_row, + start_col + X1_X2_X3_X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS, + ); + let x1_x2_x3_x2_x1_sq = &x1_x2_x3_x2_x1_sq % &p; + // for row in start_row..end_row+1 { + fill_range_check_trace( + trace, + &res, + end_row, + start_col + X1_X2_X3_X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + ); + // } + assert_eq!(&x1_x2_x3_x2_x1_sq, &y2_y1_sq); + let y1_y3 = &y1 + &y3; + for row in start_row..end_row + 1 { + fill_trace_addition_fp( + trace, + &get_u32_vec_from_literal_ref(&y1), + &get_u32_vec_from_literal_ref(&y3), + row, + start_col + Y1_Y3, + ); + } + let x1_mod = &x1 + &p; + for row in start_row..end_row + 1 { + fill_trace_addition_fp( + trace, + &get_u32_vec_from_literal_ref(&x1), + &get_u32_vec_from_literal_ref(&p), + row, + start_col + X1_X3, + ); + } + let x1_x3 = &x1_mod - &x3; + for row in start_row..end_row + 1 { + fill_trace_subtraction_fp( + trace, + &get_u32_vec_from_literal_ref(&x1_mod), + &get_u32_vec_from_literal_ref(&x3), + row, + start_col + X1_X3 + FP_ADDITION_TOTAL, + ); + } + let y1_y3_x2_x1 = &y1_y3 * &x2_x1; + fill_multiplication_trace_no_mod_reduction( + trace, + &get_u32_vec_from_literal_ref(&y1_y3), + &get_u32_vec_from_literal_ref(&x2_x1), + start_row, + end_row, + start_col + Y1_Y3_X2_X1, + ); + let res = fill_reduction_trace( + trace, + &get_u32_vec_from_literal_ref_24(&y1_y3_x2_x1), + start_row, + end_row, + start_col + Y1_Y3_X2_X1 + FP_MULTIPLICATION_TOTAL_COLUMNS, + ); + let y1_y3_x2_x1 = BigUint::new(res.to_vec()); + // for row in start_row..end_row+1 { + fill_range_check_trace( + trace, + &res, + end_row, + start_col + Y1_Y3_X2_X1 + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + ); + // } + let y2_y1_x1_x3 = &y2_y1 * &x1_x3; + fill_multiplication_trace_no_mod_reduction( + trace, + &get_u32_vec_from_literal_ref(&y2_y1), + &get_u32_vec_from_literal_ref(&x1_x3), + start_row, + end_row, + start_col + Y2_Y1_X1_X3, + ); + let res = fill_reduction_trace( + trace, + &get_u32_vec_from_literal_ref_24(&y2_y1_x1_x3), + start_row, + end_row, + start_col + Y2_Y1_X1_X3 + FP_MULTIPLICATION_TOTAL_COLUMNS, + ); + let y2_y1_x1_x3 = BigUint::new(res.to_vec()); + // for row in start_row..end_row+1 { + fill_range_check_trace( + trace, + &res, + end_row, + start_col + Y2_Y1_X1_X3 + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + ); + // } + assert_eq!(&y1_y3_x2_x1, &y2_y1_x1_x3); + [x3_fp, y3_fp] +} + +/// Constraints the g1 ec addition. +pub fn add_g1_addition_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + let p = modulus().to_u32_digits(); + + for i in 0..12 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + G1_POINT_ADDITION_X1 + i] + - next_values[start_col + G1_POINT_ADDITION_X1 + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + G1_POINT_ADDITION_Y1 + i] + - next_values[start_col + G1_POINT_ADDITION_Y1 + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + G1_POINT_ADDITION_X2 + i] + - next_values[start_col + G1_POINT_ADDITION_X2 + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + G1_POINT_ADDITION_Y2 + i] + - next_values[start_col + G1_POINT_ADDITION_Y2 + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + G1_POINT_ADDITION_X3 + i] + - next_values[start_col + G1_POINT_ADDITION_X3 + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + G1_POINT_ADDITION_Y3 + i] + - next_values[start_col + G1_POINT_ADDITION_Y3 + i]), + ); + } + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X2_X1_DIFF + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + X2_X1_DIFF + FP_ADDITION_X_OFFSET + i] + - local_values[start_col + G1_POINT_ADDITION_X2 + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X2_X1_DIFF + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + X2_X1_DIFF + FP_ADDITION_Y_OFFSET + i] + - FE::from_canonical_u32(p[i])), + ); + } + add_addition_fp_constraints( + local_values, + yield_constr, + start_col + X2_X1_DIFF, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + X2_X1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values + [start_col + X2_X1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_X_OFFSET + i] + - local_values[start_col + X2_X1_DIFF + FP_ADDITION_SUM_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + X2_X1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values + [start_col + X2_X1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_Y_OFFSET + i] + - local_values[start_col + G1_POINT_ADDITION_X1 + i]), + ); + } + add_subtraction_fp_constraints( + local_values, + yield_constr, + start_col + X2_X1_DIFF + FP_ADDITION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + Y2_Y1_DIFF + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + Y2_Y1_DIFF + FP_ADDITION_X_OFFSET + i] + - local_values[start_col + G1_POINT_ADDITION_Y2 + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + Y2_Y1_DIFF + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + Y2_Y1_DIFF + FP_ADDITION_Y_OFFSET + i] + - FE::from_canonical_u32(p[i])), + ); + } + add_addition_fp_constraints( + local_values, + yield_constr, + start_col + Y2_Y1_DIFF, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values + [start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_X_OFFSET + i] + - local_values[start_col + Y2_Y1_DIFF + FP_ADDITION_SUM_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values + [start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_Y_OFFSET + i] + - local_values[start_col + G1_POINT_ADDITION_Y1 + i]), + ); + } + add_subtraction_fp_constraints( + local_values, + yield_constr, + start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + X2_X1_SQ + X_INPUT_OFFSET + i] + - local_values[start_col + + X2_X1_DIFF + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_DIFF_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + X2_X1_SQ + Y_INPUT_OFFSET + i] + - local_values[start_col + + X2_X1_DIFF + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_DIFF_OFFSET + + i]), + ); + } + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + X2_X1_SQ, + bit_selector, + ); + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + X2_X1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL + + RANGE_CHECK_SELECTOR_OFFSET] + * (local_values[start_col + X2_X1_SQ + SUM_OFFSET + i] + - local_values[start_col + + X2_X1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCE_X_OFFSET + + i]), + ); + } + add_reduce_constraints( + local_values, + next_values, + yield_constr, + start_col + X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS, + start_col + X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + Y2_Y1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + Y2_Y1_SQ + X_INPUT_OFFSET + i] + - local_values[start_col + + Y2_Y1_DIFF + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_DIFF_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + Y2_Y1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + Y2_Y1_SQ + Y_INPUT_OFFSET + i] + - local_values[start_col + + Y2_Y1_DIFF + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_DIFF_OFFSET + + i]), + ); + } + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + Y2_Y1_SQ, + bit_selector, + ); + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + Y2_Y1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL + + RANGE_CHECK_SELECTOR_OFFSET] + * (local_values[start_col + Y2_Y1_SQ + SUM_OFFSET + i] + - local_values[start_col + + Y2_Y1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCE_X_OFFSET + + i]), + ); + } + add_reduce_constraints( + local_values, + next_values, + yield_constr, + start_col + Y2_Y1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS, + start_col + Y2_Y1_SQ + MULTIPLICATION_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + Y2_Y1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_X_OFFSET + i] + - local_values[start_col + G1_POINT_ADDITION_X1 + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_Y_OFFSET + i] + - local_values[start_col + G1_POINT_ADDITION_X2 + i]), + ); + } + add_addition_fp_constraints( + local_values, + yield_constr, + start_col + X1_X2_X3_SUM, + bit_selector, + ); + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + X1_X2_X3_SUM + FP_ADDITION_TOTAL + FP_ADDITION_CHECK_OFFSET] + * (local_values + [start_col + X1_X2_X3_SUM + FP_ADDITION_TOTAL + FP_ADDITION_X_OFFSET + i] + - local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_SUM_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + X1_X2_X3_SUM + FP_ADDITION_TOTAL + FP_ADDITION_CHECK_OFFSET] + * (local_values + [start_col + X1_X2_X3_SUM + FP_ADDITION_TOTAL + FP_ADDITION_Y_OFFSET + i] + - local_values[start_col + G1_POINT_ADDITION_X3 + i]), + ); + } + add_addition_fp_constraints( + local_values, + yield_constr, + start_col + X1_X2_X3_SUM + FP_ADDITION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X1_X2_X3_X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + X1_X2_X3_X2_X1_SQ + X_INPUT_OFFSET + i] + - local_values[start_col + + X1_X2_X3_SUM + + FP_ADDITION_TOTAL + + FP_ADDITION_SUM_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X1_X2_X3_X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + X1_X2_X3_X2_X1_SQ + Y_INPUT_OFFSET + i] + - local_values[start_col + + X2_X1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + + i]), + ); + } + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + X1_X2_X3_X2_X1_SQ, + bit_selector, + ); + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + X1_X2_X3_X2_X1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL + + RANGE_CHECK_SELECTOR_OFFSET] + * (local_values[start_col + X1_X2_X3_X2_X1_SQ + SUM_OFFSET + i] + - local_values[start_col + + X1_X2_X3_X2_X1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCE_X_OFFSET + + i]), + ); + } + add_reduce_constraints( + local_values, + next_values, + yield_constr, + start_col + X1_X2_X3_X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS, + start_col + X1_X2_X3_X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + X1_X2_X3_X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X1_X2_X3_X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + + X1_X2_X3_X2_X1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + + i] + - local_values[start_col + + Y2_Y1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + + i]), + ); + } + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + Y1_Y3 + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + Y1_Y3 + FP_ADDITION_X_OFFSET + i] + - local_values[start_col + G1_POINT_ADDITION_Y1 + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + Y1_Y3 + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + Y1_Y3 + FP_ADDITION_Y_OFFSET + i] + - local_values[start_col + G1_POINT_ADDITION_Y3 + i]), + ); + } + add_addition_fp_constraints(local_values, yield_constr, start_col + Y1_Y3, bit_selector); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X1_X3 + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + X1_X3 + FP_ADDITION_X_OFFSET + i] + - local_values[start_col + G1_POINT_ADDITION_X1 + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X1_X3 + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + X1_X3 + FP_ADDITION_Y_OFFSET + i] + - FE::from_canonical_u32(p[i])), + ); + } + add_addition_fp_constraints(local_values, yield_constr, start_col + X1_X3, bit_selector); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X1_X3 + FP_ADDITION_TOTAL + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values + [start_col + X1_X3 + FP_ADDITION_TOTAL + FP_SUBTRACTION_X_OFFSET + i] + - local_values[start_col + X1_X3 + FP_ADDITION_SUM_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + X1_X3 + FP_ADDITION_TOTAL + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values + [start_col + X1_X3 + FP_ADDITION_TOTAL + FP_SUBTRACTION_Y_OFFSET + i] + - local_values[start_col + G1_POINT_ADDITION_X3 + i]), + ); + } + add_subtraction_fp_constraints( + local_values, + yield_constr, + start_col + X1_X3 + FP_ADDITION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + Y1_Y3_X2_X1 + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + Y1_Y3_X2_X1 + X_INPUT_OFFSET + i] + - local_values[start_col + Y1_Y3 + FP_ADDITION_SUM_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + Y1_Y3_X2_X1 + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + Y1_Y3_X2_X1 + Y_INPUT_OFFSET + i] + - local_values[start_col + + X2_X1_DIFF + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_DIFF_OFFSET + + i]), + ); + } + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + Y1_Y3_X2_X1, + bit_selector, + ); + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + Y1_Y3_X2_X1 + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL + + RANGE_CHECK_SELECTOR_OFFSET] + * (local_values[start_col + Y1_Y3_X2_X1 + SUM_OFFSET + i] + - local_values[start_col + + Y1_Y3_X2_X1 + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCE_X_OFFSET + + i]), + ); + } + add_reduce_constraints( + local_values, + next_values, + yield_constr, + start_col + Y1_Y3_X2_X1 + FP_MULTIPLICATION_TOTAL_COLUMNS, + start_col + Y1_Y3_X2_X1 + MULTIPLICATION_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + Y1_Y3_X2_X1 + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + Y2_Y1_X1_X3 + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + Y2_Y1_X1_X3 + X_INPUT_OFFSET + i] + - local_values[start_col + + Y2_Y1_DIFF + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_DIFF_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + Y2_Y1_X1_X3 + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + Y2_Y1_X1_X3 + Y_INPUT_OFFSET + i] + - local_values + [start_col + X1_X3 + FP_ADDITION_TOTAL + FP_SUBTRACTION_DIFF_OFFSET + i]), + ); + } + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + Y2_Y1_X1_X3, + bit_selector, + ); + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + Y2_Y1_X1_X3 + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL + + RANGE_CHECK_SELECTOR_OFFSET] + * (local_values[start_col + Y2_Y1_X1_X3 + SUM_OFFSET + i] + - local_values[start_col + + Y2_Y1_X1_X3 + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCE_X_OFFSET + + i]), + ); + } + add_reduce_constraints( + local_values, + next_values, + yield_constr, + start_col + Y2_Y1_X1_X3 + FP_MULTIPLICATION_TOTAL_COLUMNS, + start_col + Y2_Y1_X1_X3 + MULTIPLICATION_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + Y2_Y1_X1_X3 + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + Y2_Y1_X1_X3 + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + + Y2_Y1_X1_X3 + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + + i] + - local_values[start_col + + Y1_Y3_X2_X1 + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + + i]), + ); + } +} + +pub fn add_g1_addition_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + let p = modulus().to_u32_digits(); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET], + ); + + let sub = builder.sub_extension( + local_values[start_col + G1_POINT_ADDITION_X1 + i], + next_values[start_col + G1_POINT_ADDITION_X1 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint_transition(builder, c); + let sub = builder.sub_extension( + local_values[start_col + G1_POINT_ADDITION_Y1 + i], + next_values[start_col + G1_POINT_ADDITION_Y1 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint_transition(builder, c); + let sub = builder.sub_extension( + local_values[start_col + G1_POINT_ADDITION_X2 + i], + next_values[start_col + G1_POINT_ADDITION_X2 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint_transition(builder, c); + let sub = builder.sub_extension( + local_values[start_col + G1_POINT_ADDITION_Y2 + i], + next_values[start_col + G1_POINT_ADDITION_Y2 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint_transition(builder, c); + let sub = builder.sub_extension( + local_values[start_col + G1_POINT_ADDITION_X3 + i], + next_values[start_col + G1_POINT_ADDITION_X3 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint_transition(builder, c); + let sub = builder.sub_extension( + local_values[start_col + G1_POINT_ADDITION_Y3 + i], + next_values[start_col + G1_POINT_ADDITION_Y3 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint_transition(builder, c); + } + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + X2_X1_DIFF + FP_ADDITION_CHECK_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + X2_X1_DIFF + FP_ADDITION_X_OFFSET + i], + local_values[start_col + G1_POINT_ADDITION_X2 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let p_const = builder.constant_extension(F::Extension::from_canonical_u32(p[i])); + let sub = builder.sub_extension( + local_values[start_col + X2_X1_DIFF + FP_ADDITION_Y_OFFSET + i], + p_const, + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_addition_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + X2_X1_DIFF, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + X2_X1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_CHECK_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + X2_X1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_X_OFFSET + i], + local_values[start_col + X2_X1_DIFF + FP_ADDITION_SUM_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let sub = builder.sub_extension( + local_values[start_col + X2_X1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_Y_OFFSET + i], + local_values[start_col + G1_POINT_ADDITION_X1 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_subtraction_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + X2_X1_DIFF + FP_ADDITION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + Y2_Y1_DIFF + FP_ADDITION_CHECK_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + Y2_Y1_DIFF + FP_ADDITION_X_OFFSET + i], + local_values[start_col + G1_POINT_ADDITION_Y2 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let p_const = builder.constant_extension(F::Extension::from_canonical_u32(p[i])); + let sub = builder.sub_extension( + local_values[start_col + Y2_Y1_DIFF + FP_ADDITION_Y_OFFSET + i], + p_const, + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_addition_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + Y2_Y1_DIFF, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_CHECK_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_X_OFFSET + i], + local_values[start_col + Y2_Y1_DIFF + FP_ADDITION_SUM_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let sub = builder.sub_extension( + local_values[start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_Y_OFFSET + i], + local_values[start_col + G1_POINT_ADDITION_Y1 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_subtraction_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + X2_X1_SQ + X_INPUT_OFFSET + i], + local_values + [start_col + X2_X1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_DIFF_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let sub = builder.sub_extension( + local_values[start_col + X2_X1_SQ + Y_INPUT_OFFSET + i], + local_values + [start_col + X2_X1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_DIFF_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + X2_X1_SQ, + bit_selector, + ); + for i in 0..24 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + + X2_X1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL + + RANGE_CHECK_SELECTOR_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + X2_X1_SQ + SUM_OFFSET + i], + local_values + [start_col + X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCE_X_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_reduce_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS, + start_col + X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + Y2_Y1_SQ + MULTIPLICATION_SELECTOR_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + Y2_Y1_SQ + X_INPUT_OFFSET + i], + local_values + [start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_DIFF_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let sub = builder.sub_extension( + local_values[start_col + Y2_Y1_SQ + Y_INPUT_OFFSET + i], + local_values + [start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_DIFF_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + Y2_Y1_SQ, + bit_selector, + ); + for i in 0..24 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + + Y2_Y1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL + + RANGE_CHECK_SELECTOR_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + Y2_Y1_SQ + SUM_OFFSET + i], + local_values + [start_col + Y2_Y1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCE_X_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_reduce_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + Y2_Y1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS, + start_col + Y2_Y1_SQ + MULTIPLICATION_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + Y2_Y1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_CHECK_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_X_OFFSET + i], + local_values[start_col + G1_POINT_ADDITION_X1 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let sub = builder.sub_extension( + local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_Y_OFFSET + i], + local_values[start_col + G1_POINT_ADDITION_X2 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_addition_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + X1_X2_X3_SUM, + bit_selector, + ); + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_TOTAL + FP_ADDITION_CHECK_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_TOTAL + FP_ADDITION_X_OFFSET + i], + local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_SUM_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let sub = builder.sub_extension( + local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_TOTAL + FP_ADDITION_Y_OFFSET + i], + local_values[start_col + G1_POINT_ADDITION_X3 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_addition_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + X1_X2_X3_SUM + FP_ADDITION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + X1_X2_X3_X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + X1_X2_X3_X2_X1_SQ + X_INPUT_OFFSET + i], + local_values[start_col + X1_X2_X3_SUM + FP_ADDITION_TOTAL + FP_ADDITION_SUM_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let sub = builder.sub_extension( + local_values[start_col + X1_X2_X3_X2_X1_SQ + Y_INPUT_OFFSET + i], + local_values + [start_col + X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + X1_X2_X3_X2_X1_SQ, + bit_selector, + ); + for i in 0..24 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + + X1_X2_X3_X2_X1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL + + RANGE_CHECK_SELECTOR_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + X1_X2_X3_X2_X1_SQ + SUM_OFFSET + i], + local_values[start_col + + X1_X2_X3_X2_X1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCE_X_OFFSET + + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_reduce_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + X1_X2_X3_X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS, + start_col + X1_X2_X3_X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + X1_X2_X3_X2_X1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + X1_X2_X3_X2_X1_SQ + MULTIPLICATION_SELECTOR_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + + X1_X2_X3_X2_X1_SQ + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + + i], + local_values + [start_col + Y2_Y1_SQ + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + Y1_Y3 + FP_ADDITION_CHECK_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + Y1_Y3 + FP_ADDITION_X_OFFSET + i], + local_values[start_col + G1_POINT_ADDITION_Y1 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let sub = builder.sub_extension( + local_values[start_col + Y1_Y3 + FP_ADDITION_Y_OFFSET + i], + local_values[start_col + G1_POINT_ADDITION_Y3 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_addition_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + Y1_Y3, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + X1_X3 + FP_ADDITION_CHECK_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + X1_X3 + FP_ADDITION_X_OFFSET + i], + local_values[start_col + G1_POINT_ADDITION_X1 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let p_const = builder.constant_extension(F::Extension::from_canonical_u32(p[i])); + let sub = builder.sub_extension( + local_values[start_col + X1_X3 + FP_ADDITION_Y_OFFSET + i], + p_const, + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_addition_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + X1_X3, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + X1_X3 + FP_ADDITION_TOTAL + FP_SUBTRACTION_CHECK_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + X1_X3 + FP_ADDITION_TOTAL + FP_SUBTRACTION_X_OFFSET + i], + local_values[start_col + X1_X3 + FP_ADDITION_SUM_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let sub = builder.sub_extension( + local_values[start_col + X1_X3 + FP_ADDITION_TOTAL + FP_SUBTRACTION_Y_OFFSET + i], + local_values[start_col + G1_POINT_ADDITION_X3 + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_subtraction_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + X1_X3 + FP_ADDITION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + Y1_Y3_X2_X1 + MULTIPLICATION_SELECTOR_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + Y1_Y3_X2_X1 + X_INPUT_OFFSET + i], + local_values[start_col + Y1_Y3 + FP_ADDITION_SUM_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let sub = builder.sub_extension( + local_values[start_col + Y1_Y3_X2_X1 + Y_INPUT_OFFSET + i], + local_values + [start_col + X2_X1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_DIFF_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + Y1_Y3_X2_X1, + bit_selector, + ); + for i in 0..24 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + + Y1_Y3_X2_X1 + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL + + RANGE_CHECK_SELECTOR_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + Y1_Y3_X2_X1 + SUM_OFFSET + i], + local_values + [start_col + Y1_Y3_X2_X1 + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCE_X_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_reduce_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + Y1_Y3_X2_X1 + FP_MULTIPLICATION_TOTAL_COLUMNS, + start_col + Y1_Y3_X2_X1 + MULTIPLICATION_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + Y1_Y3_X2_X1 + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + Y2_Y1_X1_X3 + MULTIPLICATION_SELECTOR_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + Y2_Y1_X1_X3 + X_INPUT_OFFSET + i], + local_values + [start_col + Y2_Y1_DIFF + FP_ADDITION_TOTAL + FP_SUBTRACTION_DIFF_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + + let sub = builder.sub_extension( + local_values[start_col + Y2_Y1_X1_X3 + Y_INPUT_OFFSET + i], + local_values[start_col + X1_X3 + FP_ADDITION_TOTAL + FP_SUBTRACTION_DIFF_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + Y2_Y1_X1_X3, + bit_selector, + ); + for i in 0..24 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + + Y2_Y1_X1_X3 + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL + + RANGE_CHECK_SELECTOR_OFFSET], + ); + let sub = builder.sub_extension( + local_values[start_col + Y2_Y1_X1_X3 + SUM_OFFSET + i], + local_values + [start_col + Y2_Y1_X1_X3 + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCE_X_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } + add_reduce_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + Y2_Y1_X1_X3 + FP_MULTIPLICATION_TOTAL_COLUMNS, + start_col + Y2_Y1_X1_X3 + MULTIPLICATION_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + Y2_Y1_X1_X3 + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCTION_TOTAL, + bit_selector, + ); + + for i in 0..12 { + let mul = builder.mul_extension( + bit_selector_val, + local_values[start_col + Y2_Y1_X1_X3 + MULTIPLICATION_SELECTOR_OFFSET], + ); + let sub = builder.sub_extension( + local_values + [start_col + Y2_Y1_X1_X3 + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCED_OFFSET + i], + local_values + [start_col + Y1_Y3_X2_X1 + FP_MULTIPLICATION_TOTAL_COLUMNS + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(mul, sub); + yield_constr.constraint(builder, c); + } +} diff --git a/casper-finality-proofs/src/verification/curves/starky/mod.rs b/casper-finality-proofs/src/verification/curves/starky/mod.rs new file mode 100644 index 000000000..b629c54b8 --- /dev/null +++ b/casper-finality-proofs/src/verification/curves/starky/mod.rs @@ -0,0 +1 @@ +pub mod g1; diff --git a/casper-finality-proofs/src/verification/fields/fp.rs b/casper-finality-proofs/src/verification/fields/fp.rs new file mode 100644 index 000000000..f097dc79d --- /dev/null +++ b/casper-finality-proofs/src/verification/fields/fp.rs @@ -0,0 +1,340 @@ +use num_bigint::ToBigUint; +use plonky2::{ + field::extension::Extendable, + hash::hash_types::RichField, + iop::{ + generator::{GeneratedValues, SimpleGenerator}, + target::Target, + witness::PartitionWitness, + }, + plonk::circuit_data::CommonCircuitData, + util::serialization::{Buffer, IoResult, Read, Write}, +}; +use plonky2x::{ + backend::circuit::PlonkParameters, + frontend::{ + builder::CircuitBuilder, + uint::num::{ + biguint::{ + BigUintTarget, CircuitBuilderBiguint, GeneratedValuesBigUint, WitnessBigUint, + }, + u32::gadgets::arithmetic_u32::U32Target, + }, + vars::BoolVariable, + }, +}; + +use crate::verification::utils::native_bls::{mod_inverse, modulus}; + +pub const LIMBS: usize = 12; +pub type FpTarget = BigUintTarget; + +pub fn serialize(fp: FpTarget, dst: &mut Vec) -> plonky2::util::serialization::IoResult<()> { + dst.write_target_vec(&fp.limbs.iter().map(|bt| bt.target).collect::>()) +} + +pub fn deserialize(src: &mut Buffer) -> IoResult { + let target_limbs = src.read_target_vec()?; + let limbs: Vec = target_limbs + .into_iter() + .map(|f| U32Target::from_target_unsafe(f)) + .collect(); + Ok(FpTarget { limbs }) +} + +pub fn fp_is_zero, const D: usize>( + builder: &mut CircuitBuilder, + input: &FpTarget, +) -> BoolVariable { + let zero = builder.api.zero_biguint(); + BoolVariable::from(builder.api.cmp_biguint(input, &zero)) +} + +pub fn fp_is_equal, const D: usize>( + builder: &mut CircuitBuilder, + a: &FpTarget, + b: &FpTarget, +) -> BoolVariable { + BoolVariable::from(a.limbs.iter().zip(b.limbs.iter()).fold( + builder.api.constant_bool(true), + |acc, (a_l, b_l)| { + let is_equal = builder.api.is_equal(a_l.target, b_l.target); + builder.api.and(acc, is_equal) + }, + )) +} + +pub fn range_check_fp, const D: usize>( + builder: &mut CircuitBuilder, + input: &FpTarget, +) { + let p = builder.api.constant_biguint(&modulus()); + let check = builder.api.cmp_biguint(&p, &input); + builder.api.assert_zero(check.target); +} + +pub fn add_fp, const D: usize>( + builder: &mut CircuitBuilder, + a: &FpTarget, + b: &FpTarget, +) -> FpTarget { + let zero = builder.api.zero(); + let p = builder.api.constant_biguint(&modulus()); + let res = builder.api.add_biguint(a, b); + let cmp = builder.api.cmp_biguint(&p, &res); + let sub_limbs = (0..12) + .into_iter() + .map(|i| U32Target::from_target_unsafe(builder.api.select(cmp, p.limbs[i].target, zero))) + .collect::>(); + let sub = BigUintTarget { limbs: sub_limbs }; + builder.api.sub_biguint(&res, &sub) +} + +pub fn negate_fp, const D: usize>( + builder: &mut CircuitBuilder, + input: &FpTarget, +) -> FpTarget { + let p = builder.api.constant_biguint(&modulus()); + builder.api.sub_biguint(&p, input) +} + +pub fn sub_fp, const D: usize>( + builder: &mut CircuitBuilder, + a: &FpTarget, + b: &FpTarget, +) -> FpTarget { + let minus_b = negate_fp(builder, b); + add_fp(builder, a, &minus_b) +} + +pub fn mul_fp, const D: usize>( + builder: &mut CircuitBuilder, + a: &FpTarget, + b: &FpTarget, +) -> FpTarget { + let p = builder.api.constant_biguint(&modulus()); + let res = builder.api.mul_biguint(a, b); + builder.api.rem_biguint(&res, &p) +} + +pub fn inv_fp, const D: usize>( + builder: &mut CircuitBuilder, + input: &FpTarget, +) -> FpTarget { + let one = builder.api.constant_biguint(&1u32.to_biguint().unwrap()); + let input_inv = builder.api.add_virtual_biguint_target_unsafe(LIMBS); + builder.api.add_simple_generator(FpInverseGenerator { + input: input.clone(), + input_inv: input_inv.clone(), + }); + range_check_fp(builder, &input_inv); + let mul = mul_fp(builder, input, &input_inv); + builder.api.connect_biguint(&mul, &one); + input_inv +} + +pub fn div_fp, const D: usize>( + builder: &mut CircuitBuilder, + a: &FpTarget, + b: &FpTarget, +) -> FpTarget { + let b_inv = inv_fp(builder, b); + mul_fp(builder, a, &b_inv) +} + +#[derive(Debug, Default)] +pub struct FpInverseGenerator { + input: BigUintTarget, + input_inv: BigUintTarget, +} + +impl, const D: usize> SimpleGenerator for FpInverseGenerator { + fn id(&self) -> String { + "FpInverseGenerator".to_string() + } + + fn dependencies(&self) -> Vec { + self.input + .limbs + .iter() + .map(|l| l.target) + .collect::>() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let input = witness.get_biguint_target(self.input.clone()); + let inverse = mod_inverse(input, modulus()); + out_buffer.set_biguint_target(&self.input_inv, &inverse); + } + + fn serialize(&self, dst: &mut Vec, _common_data: &CommonCircuitData) -> IoResult<()> { + serialize(self.input.clone(), dst)?; + serialize(self.input_inv.clone(), dst) + } + + fn deserialize(src: &mut Buffer, _common_data: &CommonCircuitData) -> IoResult + where + Self: Sized, + { + let input = deserialize(src)?; + let input_inv = deserialize(src)?; + Ok(Self { input, input_inv }) + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use num_bigint::BigUint; + use plonky2::field::{goldilocks_field::GoldilocksField, types::Field64}; + use plonky2x::frontend::{ + builder::DefaultBuilder, + uint::num::biguint::CircuitBuilderBiguint, + vars::{BoolVariable, Variable}, + }; + + use crate::verification::utils::native_bls::{modulus, Fp}; + use itertools::Itertools; + + use super::{div_fp, range_check_fp, sub_fp, LIMBS}; + + #[test] + fn test_subtraction_circuit() { + let mut builder = DefaultBuilder::new(); + let a_bigu = BigUint::from_str( + "1216495682195235861952885506871698490232894470117269383940381148575524314493849307811227440691167647909822763414941" + ).unwrap(); + let b_bigu = BigUint::from_str( + "2153848155426317245700560287567131132765685008362732985860101000686875894603366983854567186180519945327668975076337" + ).unwrap(); + + let a_fp = Fp::get_fp_from_biguint(a_bigu.clone()); + let b_fp = Fp::get_fp_from_biguint(b_bigu.clone()); + let expected_res_fp = a_fp - b_fp; + + let a_bigu_t = builder.api.constant_biguint(&a_bigu); + let b_bigu_t = builder.api.constant_biguint(&b_bigu); + + let res = sub_fp(&mut builder, &a_bigu_t, &b_bigu_t); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for k in 0..LIMBS { + builder.write(Variable(res.limbs[k].target)); + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + for _ in 0..LIMBS { + res_output.push(output.read::()) + } + + let biguint_res: BigUint = BigUint::new( + res_output + .iter() + .map(|f| (f.0 % GoldilocksField::ORDER) as u32) + .collect_vec(), + ); + + assert_eq!(biguint_res, expected_res_fp.to_biguint()); + } + + #[test] + fn test_division_circuit() { + let mut builder = DefaultBuilder::new(); + let a_bigu = BigUint::from_str( + "2153848155426317245700560287567131132765685008362732985860101000686875894603366983854567186180519945327668975076337" + ).unwrap(); + let b_bigu = BigUint::from_str( + "1216495682195235861952885506871698490232894470117269383940381148575524314493849307811227440691167647909822763414941" + ).unwrap(); + + let a_fp = Fp::get_fp_from_biguint(a_bigu.clone()); + let b_fp = Fp::get_fp_from_biguint(b_bigu.clone()); + let expected_res_fp = a_fp / b_fp; + + let a_bigu_t = builder.api.constant_biguint(&a_bigu); + let b_bigu_t = builder.api.constant_biguint(&b_bigu); + + let res = div_fp(&mut builder, &a_bigu_t, &b_bigu_t); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for k in 0..LIMBS { + builder.write(Variable(res.limbs[k].target)); + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + for _ in 0..LIMBS { + res_output.push(output.read::()) + } + + let biguint_res: BigUint = BigUint::new( + res_output + .iter() + .map(|f| (f.0 % GoldilocksField::ORDER) as u32) + .collect_vec(), + ); + + assert_eq!(biguint_res, expected_res_fp.to_biguint()); + } + + #[test] + fn test_range_check_fp() { + let mut builder = DefaultBuilder::new(); + let input_to_check = BigUint::from_str("234").unwrap(); + + let input_to_check_t = builder.api.constant_biguint(&input_to_check); + + let p = builder.api.constant_biguint(&modulus()); + let check = builder.api.cmp_biguint(&p, &input_to_check_t); + range_check_fp(&mut builder, &input_to_check_t); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + builder.write(BoolVariable::from(check).variable); + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + res_output.push(output.read::()); + + let res = res_output[0].0 % GoldilocksField::ORDER; + + assert_eq!(res, 0); + } +} diff --git a/casper-finality-proofs/src/verification/fields/fp2.rs b/casper-finality-proofs/src/verification/fields/fp2.rs new file mode 100644 index 000000000..237dcaa5f --- /dev/null +++ b/casper-finality-proofs/src/verification/fields/fp2.rs @@ -0,0 +1,398 @@ +use num_bigint::ToBigUint; +use plonky2::{ + field::extension::Extendable, + hash::hash_types::RichField, + iop::{ + generator::{GeneratedValues, SimpleGenerator}, + target::{BoolTarget, Target}, + witness::PartitionWitness, + }, + plonk::circuit_data::CommonCircuitData, + util::serialization::{Buffer, IoResult}, +}; +use plonky2x::{ + backend::circuit::PlonkParameters, + frontend::{ + builder::CircuitBuilder, + uint::num::biguint::{CircuitBuilderBiguint, GeneratedValuesBigUint, WitnessBigUint}, + vars::BoolVariable, + }, +}; + +use crate::verification::utils::native_bls::{Fp, Fp2}; + +use super::fp::{ + add_fp, deserialize, fp_is_equal, fp_is_zero, mul_fp, negate_fp, range_check_fp, serialize, + sub_fp, FpTarget, LIMBS, +}; + +const TWO: usize = 2; +pub type Fp2Target = [FpTarget; TWO]; + +pub fn is_zero, const D: usize>( + builder: &mut CircuitBuilder, + input: &Fp2Target, +) -> BoolVariable { + let zero1 = fp_is_zero(builder, &input[0]); + let zero2 = fp_is_zero(builder, &input[1]); + builder.and(zero1, zero2) +} + +pub fn is_equal, const D: usize>( + builder: &mut CircuitBuilder, + a: &Fp2Target, + b: &Fp2Target, +) -> BoolVariable { + BoolVariable::from(a.iter().zip(b.iter()).fold( + builder.api.constant_bool(true), + |acc, (a_f, b_f)| { + let is_equal = fp_is_equal(builder, a_f, b_f); + builder + .api + .and(acc, BoolTarget::new_unsafe(is_equal.variable.0)) + }, + )) +} + +pub fn range_check_fp2, const D: usize>( + builder: &mut CircuitBuilder, + input: &Fp2Target, +) { + range_check_fp(builder, &input[0]); + range_check_fp(builder, &input[1]); +} + +pub fn sgn0_fp2, const D: usize>( + builder: &mut CircuitBuilder, + input: &Fp2Target, +) -> BoolVariable { + let two = builder.api.constant_biguint(&2u32.into()); + let sign0 = builder.api.rem_biguint(&input[0], &two); + let sign0_bool = BoolTarget::new_unsafe(sign0.limbs[0].target); + let zero0 = fp_is_zero(builder, &input[0]); + let sign1 = builder.api.rem_biguint(&input[1], &two); + let sign1_bool = BoolTarget::new_unsafe(sign1.limbs[0].target); + let zero_and_sign1 = builder.and(zero0, BoolVariable::from(sign1_bool)); + builder.or(BoolVariable::from(sign0_bool), zero_and_sign1) +} + +pub fn add_fp2, const D: usize>( + builder: &mut CircuitBuilder, + a: &Fp2Target, + b: &Fp2Target, +) -> Fp2Target { + let mut res = vec![]; + for i in 0..TWO { + res.push(add_fp(builder, &a[i], &b[i])); + } + res.try_into().unwrap() +} + +pub fn negate_fp2, const D: usize>( + builder: &mut CircuitBuilder, + input: &Fp2Target, +) -> Fp2Target { + let mut res = vec![]; + for i in 0..TWO { + res.push(negate_fp(builder, &input[i])); + } + res.try_into().unwrap() +} + +pub fn sub_fp2, const D: usize>( + builder: &mut CircuitBuilder, + a: &Fp2Target, + b: &Fp2Target, +) -> Fp2Target { + let minus_b = negate_fp2(builder, b); + add_fp2(builder, a, &minus_b) +} + +pub fn mul_fp2, const D: usize>( + builder: &mut CircuitBuilder, + a: &Fp2Target, + b: &Fp2Target, +) -> Fp2Target { + let t1 = mul_fp(builder, &a[0], &b[0]); + let t2 = mul_fp(builder, &a[1], &b[1]); + let t1t2 = add_fp(builder, &t1, &t2); + + let c0c1 = add_fp(builder, &a[0], &a[1]); + let r0r1 = add_fp(builder, &b[0], &b[1]); + let c0c1r0r1 = mul_fp(builder, &c0c1, &r0r1); + + let mut res = vec![]; + res.push(sub_fp(builder, &t1, &t2)); + res.push(sub_fp(builder, &c0c1r0r1, &t1t2)); + res.try_into().unwrap() +} + +pub fn inv_fp2, const D: usize>( + builder: &mut CircuitBuilder, + input: &Fp2Target, +) -> Fp2Target { + let one = builder.api.constant_biguint(&1u32.to_biguint().unwrap()); + let zero = builder.api.constant_biguint(&0u32.to_biguint().unwrap()); + let inv_c0 = builder.api.add_virtual_biguint_target_unsafe(LIMBS); + let inv_c1 = builder.api.add_virtual_biguint_target_unsafe(LIMBS); + let input_inv = [inv_c0, inv_c1]; + builder.api.add_simple_generator(Fp2InverseGenerator { + input: input.clone(), + input_inv: input_inv.clone(), + }); + range_check_fp2(builder, &input_inv); + let mul = mul_fp2(builder, input, &input_inv); + builder.api.connect_biguint(&mul[0], &one); + builder.api.connect_biguint(&mul[1], &zero); + input_inv +} + +pub fn div_fp2, const D: usize>( + builder: &mut CircuitBuilder, + a: &Fp2Target, + b: &Fp2Target, +) -> Fp2Target { + let b_inv = inv_fp2(builder, b); + mul_fp2(builder, a, &b_inv) +} + +pub fn frobenius_map, const D: usize>( + builder: &mut CircuitBuilder, + inp: &Fp2Target, + pow: usize, +) -> Fp2Target { + if pow % 2 == 0 { + inp.clone() + } else { + [inp[0].clone(), negate_fp(builder, &inp[1])] + } +} + +#[derive(Debug, Default)] +pub struct Fp2InverseGenerator { + input: Fp2Target, + input_inv: Fp2Target, +} + +impl, const D: usize> SimpleGenerator for Fp2InverseGenerator { + fn id(&self) -> String { + "Fp2InverseGenerator".to_string() + } + + fn dependencies(&self) -> Vec { + self.input + .iter() + .flat_map(|f| f.limbs.iter().map(|l| l.target)) + .collect::>() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let c0 = witness.get_biguint_target(self.input[0].clone()); + let c1 = witness.get_biguint_target(self.input[1].clone()); + let input_fp2 = Fp2([Fp::get_fp_from_biguint(c0), Fp::get_fp_from_biguint(c1)]); + let inverse = input_fp2.invert(); + out_buffer.set_biguint_target(&self.input_inv[0], &inverse.0[0].to_biguint()); + out_buffer.set_biguint_target(&self.input_inv[1], &inverse.0[1].to_biguint()); + } + + fn serialize(&self, dst: &mut Vec, _common_data: &CommonCircuitData) -> IoResult<()> { + serialize(self.input[0].clone(), dst)?; + serialize(self.input[1].clone(), dst)?; + serialize(self.input_inv[0].clone(), dst)?; + serialize(self.input_inv[1].clone(), dst) + } + + fn deserialize(src: &mut Buffer, _common_data: &CommonCircuitData) -> IoResult + where + Self: Sized, + { + let c0 = deserialize(src)?; + let c1 = deserialize(src)?; + let inv_c0 = deserialize(src)?; + let inv_c1 = deserialize(src)?; + Ok(Self { + input: [c0, c1], + input_inv: [inv_c0, inv_c1], + }) + } +} + +#[cfg(test)] +mod tests { + + use itertools::Itertools; + use num_bigint::BigUint; + use plonky2::field::{goldilocks_field::GoldilocksField, types::Field64}; + use plonky2x::frontend::{ + builder::DefaultBuilder, uint::num::biguint::CircuitBuilderBiguint, vars::Variable, + }; + + use crate::verification::{ + fields::fp::LIMBS, + utils::native_bls::{Fp, Fp2}, + }; + + use super::{div_fp2, sub_fp2, TWO}; + + #[test] + fn test_subtraction_circuit() { + let mut builder = DefaultBuilder::new(); + let c0_fp = Fp([ + 1115400077, 734036635, 2658976793, 3446373348, 3797461211, 2799729988, 1086715089, + 1766116042, 3720719530, 4214563288, 2211874409, 287824937, + ]); + let c1_fp = Fp([ + 4070035387, 3598430679, 2371795623, 2598602036, 314293284, 3104159902, 3828298491, + 1770882328, 1026148559, 2003704675, 804131021, 382850433, + ]); + let r0_fp = Fp([ + 3944640261, 440162500, 3767697757, 767512216, 3185360355, 1355179671, 2310853452, + 2890628660, 2539693039, 3306767406, 473197245, 198293246, + ]); + let r1_fp = Fp([ + 920955909, 775806582, 2117093864, 286632291, 2248224021, 4208799968, 2272086148, + 4009382258, 291945614, 2017047933, 1541154483, 220533456, + ]); + let a_fp2 = Fp2([c0_fp, c1_fp]); + let b_fp2 = Fp2([r0_fp, r1_fp]); + let expected_res = a_fp2 - b_fp2; + + let a_fp2_bigu = a_fp2.to_biguint(); + let b_fp2_bigu = b_fp2.to_biguint(); + + let a_fp2_bigu_t = [ + builder.api.constant_biguint(&a_fp2_bigu[0]), + builder.api.constant_biguint(&a_fp2_bigu[1]), + ]; + let b_fp2_bigu_t = [ + builder.api.constant_biguint(&b_fp2_bigu[0]), + builder.api.constant_biguint(&b_fp2_bigu[1]), + ]; + + let res = sub_fp2(&mut builder, &a_fp2_bigu_t, &b_fp2_bigu_t); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for i in 0..res.len() { + for j in 0..LIMBS { + builder.write(Variable(res[i].limbs[j].target)); + } + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + for _ in 0..res.len() { + for _ in 0..LIMBS { + res_output.push(output.read::()) + } + } + + let mut biguint_res: Vec = Vec::new(); + for i in 0..TWO { + biguint_res.push(BigUint::new( + res_output[(i * 12)..(i * 12) + 12] + .iter() + .map(|f| (f.0 % GoldilocksField::ORDER) as u32) + .collect_vec(), + )); + } + + let expected_res = expected_res.to_biguint(); + + for i in 0..TWO { + assert_eq!(expected_res[i], biguint_res[i]); + } + } + + #[test] + fn test_division_circuit() { + let mut builder = DefaultBuilder::new(); + let c0_fp = Fp([ + 1115400077, 734036635, 2658976793, 3446373348, 3797461211, 2799729988, 1086715089, + 1766116042, 3720719530, 4214563288, 2211874409, 287824937, + ]); + let c1_fp = Fp([ + 4070035387, 3598430679, 2371795623, 2598602036, 314293284, 3104159902, 3828298491, + 1770882328, 1026148559, 2003704675, 804131021, 382850433, + ]); + let r0_fp = Fp([ + 3944640261, 440162500, 3767697757, 767512216, 3185360355, 1355179671, 2310853452, + 2890628660, 2539693039, 3306767406, 473197245, 198293246, + ]); + let r1_fp = Fp([ + 920955909, 775806582, 2117093864, 286632291, 2248224021, 4208799968, 2272086148, + 4009382258, 291945614, 2017047933, 1541154483, 220533456, + ]); + let a_fp2 = Fp2([c0_fp, c1_fp]); + let b_fp2 = Fp2([r0_fp, r1_fp]); + let expected_res = a_fp2 / b_fp2; + + let a_fp2_bigu = a_fp2.to_biguint(); + let b_fp2_bigu = b_fp2.to_biguint(); + + let a_fp2_bigu_t = [ + builder.api.constant_biguint(&a_fp2_bigu[0]), + builder.api.constant_biguint(&a_fp2_bigu[1]), + ]; + let b_fp2_bigu_t = [ + builder.api.constant_biguint(&b_fp2_bigu[0]), + builder.api.constant_biguint(&b_fp2_bigu[1]), + ]; + + let res = div_fp2(&mut builder, &a_fp2_bigu_t, &b_fp2_bigu_t); + + // Define your circuit. + let mut res_output: Vec = Vec::new(); + for i in 0..res.len() { + for j in 0..LIMBS { + builder.write(Variable(res[i].limbs[j].target)); + } + } + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + // Generate a proof. + let (proof, mut output) = circuit.prove(&input); + + // Verify proof. + circuit.verify(&proof, &input, &output); + + // Read output. + for _ in 0..res.len() { + for _ in 0..LIMBS { + res_output.push(output.read::()) + } + } + + let mut biguint_res: Vec = Vec::new(); + for i in 0..TWO { + biguint_res.push(BigUint::new( + res_output[(i * 12)..(i * 12) + 12] + .iter() + .map(|f| (f.0 % GoldilocksField::ORDER) as u32) + .collect_vec(), + )); + } + + let expected_res = expected_res.to_biguint(); + + for i in 0..TWO { + assert_eq!(expected_res[i], biguint_res[i]); + } + } +} diff --git a/casper-finality-proofs/src/verification/fields/mod.rs b/casper-finality-proofs/src/verification/fields/mod.rs new file mode 100644 index 000000000..b7eca5322 --- /dev/null +++ b/casper-finality-proofs/src/verification/fields/mod.rs @@ -0,0 +1,3 @@ +pub mod starky; +pub mod fp2; +pub mod fp; \ No newline at end of file diff --git a/casper-finality-proofs/src/verification/fields/starky/fp.rs b/casper-finality-proofs/src/verification/fields/starky/fp.rs new file mode 100644 index 000000000..d5f2da2bd --- /dev/null +++ b/casper-finality-proofs/src/verification/fields/starky/fp.rs @@ -0,0 +1,1964 @@ +//! This module contains functions for filling the stark trace and adding constraints for the corresponding trace for some Fp operations (multiplication, addition, subtraction, etc). One fp element is represented as \[u32; 12\] inside the trace. +use num_bigint::{BigUint, ToBigUint}; +use plonky2::{ + field::{ + extension::{Extendable, FieldExtension}, + packed::PackedField, + types::Field, + }, + hash::hash_types::RichField, + iop::ext_target::ExtensionTarget, + plonk::circuit_builder::CircuitBuilder, +}; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; +use crate::verification::utils::{ + native_bls::{ + add_u32_slices, add_u32_slices_12, get_bits_as_array, get_div_rem_modulus_from_biguint_12, + get_selector_bits_from_u32, get_u32_vec_from_literal, get_u32_vec_from_literal_24, modulus, + mul_u32_slice_u32, multiply_by_slice, sub_u32_slices, sub_u32_slices_12, Fp, + }, + starky_utils::*, +}; + +// Fp Multiplication layout offsets +/* + These trace offsets are for long multiplication. The inputs are each of 12 limbs. The trace needs 12 rows. + to compute the result of the multiplication. The final result is stored in the slice [SUM_OFFSET..SUM_OFFSET+24]. + X_INPUT_OFFSET -> offset at which the first input is set. + Y_INPUT_OFFSET -> offset at which the second input is set. + XY_OFFSET -> offset of x * y[i] where 0 <= i < 12. + XY_CARRIES_OFFSET -> offset of carries which resulted from the operation x * y[i]. + SHIFTED_XY_OFFSET -> offset at which the shifted values of x * y\[i\] are set. In long multiplication, the multiplication of the i-th digit + is set after shifting the result by i places. This is exactly that shift. The maximum shift can be 11, hence the maximum result can be of + length 24. Therefore, 24 placesa are reserved for this field. + SELECTOR_OFFSET -> offset specifying which index of y are we using for multiplication in the current row. Total 12 selectors, one for each limb. + SUM_OFFSET -> offset at which the sum of the individual multiplications done so far are stored. + SUM_CARRIES_OFFSET -> offset of carries which resulted from the additions. + MULTIPLICATION_SELECTOR_OFFSET -> Selector to ensure that the input is same across all rows. Set 1 in all rows except last one. + MULTIPLICATION_FIRST_ROW_OFFSET -> Selector to indicate the first row of multiplication operation +*/ +pub const X_INPUT_OFFSET: usize = 0; +pub const Y_INPUT_OFFSET: usize = X_INPUT_OFFSET + 12; +pub const XY_OFFSET: usize = Y_INPUT_OFFSET + 12; +pub const XY_CARRIES_OFFSET: usize = XY_OFFSET + 13; +pub const SHIFTED_XY_OFFSET: usize = XY_CARRIES_OFFSET + 12; +pub const SELECTOR_OFFSET: usize = SHIFTED_XY_OFFSET + 24; +pub const SUM_OFFSET: usize = SELECTOR_OFFSET + 12; +pub const SUM_CARRIES_OFFSET: usize = SUM_OFFSET + 24; +pub const MULTIPLICATION_SELECTOR_OFFSET: usize = SUM_CARRIES_OFFSET + 24; +pub const MULTIPLICATION_FIRST_ROW_OFFSET: usize = MULTIPLICATION_SELECTOR_OFFSET + 1; + +pub const FP_MULTIPLICATION_TOTAL_COLUMNS: usize = MULTIPLICATION_FIRST_ROW_OFFSET + 1; + +// Non reduced addition layout offsets +/* + These trace offsets are for long addition. The inputs are 24 limbs each. The trace needs 1 row to compute the result. + ADDITION_CHECK_OFFSET -> Selector to indicate this operation is on. + ADDITION_X_OFFSET -> offset at which first input set. + ADDITION_Y_OFFSET -> offset at which first second set. + ADDITION_SUM_OFFSET -> offset at which the result of the addition is set. + ADDITION_CARRY_OFFSET -> offset of carries which resulted from the addition operation. +*/ +pub const ADDITION_CHECK_OFFSET: usize = 0; +pub const ADDITION_X_OFFSET: usize = ADDITION_CHECK_OFFSET + 1; +pub const ADDITION_Y_OFFSET: usize = ADDITION_X_OFFSET + 24; +pub const ADDITION_SUM_OFFSET: usize = ADDITION_Y_OFFSET + 24; +pub const ADDITION_CARRY_OFFSET: usize = ADDITION_SUM_OFFSET + 24; +pub const ADDITION_TOTAL: usize = ADDITION_CARRY_OFFSET + 24; + +// Non reduced subtraction layout offsets +/* + These trace offsets are for long subtraction. The inputs are 24 limbs each. The trace needs 1 row to compute the result. Assume x > y. + SUBTRACTION_CHECK_OFFSET -> Selector to indicate this operation is on. + SUBTRACTION_X_OFFSET -> offset at which first input set. + SUBTRACTION_Y_OFFSET -> offset at which first second set. + SUBTRACTION_SUM_OFFSET -> offset at which the result of the subtraction is set. + SUBTRACTION_CARRY_OFFSET -> offset of borrows which resulted from the subtraction operation. +*/ +pub const SUBTRACTION_CHECK_OFFSET: usize = 0; +pub const SUBTRACTION_X_OFFSET: usize = SUBTRACTION_CHECK_OFFSET + 1; +pub const SUBTRACTION_Y_OFFSET: usize = SUBTRACTION_X_OFFSET + 24; +pub const SUBTRACTION_DIFF_OFFSET: usize = SUBTRACTION_Y_OFFSET + 24; +pub const SUBTRACTION_BORROW_OFFSET: usize = SUBTRACTION_DIFF_OFFSET + 24; +pub const SUBTRACTION_TOTAL: usize = SUBTRACTION_BORROW_OFFSET + 24; + +// Reduce and rangecheck layout offsets +/* + These trace offsets are for reducing a [u32; 24] input with the bls12-381 field prime. Ensures, x = d*p + r. Where x is the input, + d is the quotient, p is the prime and r is the reduced output. The trace needs 12 rows. + REDUCE_MULTIPLICATION_OFFSET -> offset at which the multiplication operation is done. + REDUCE_X_OFFSET -> offset at which input is set. + REDUCTION_ADDITION_OFFSET -> offset at which addition operation is done. + REDUCED_OFFSET -> offset at which the reduced value is set +*/ +pub const REDUCE_MULTIPLICATION_OFFSET: usize = 0; +pub const REDUCE_X_OFFSET: usize = REDUCE_MULTIPLICATION_OFFSET + FP_MULTIPLICATION_TOTAL_COLUMNS; +pub const REDUCTION_ADDITION_OFFSET: usize = REDUCE_X_OFFSET + 24; +pub const REDUCED_OFFSET: usize = REDUCTION_ADDITION_OFFSET + ADDITION_TOTAL; +pub const REDUCTION_TOTAL: usize = REDUCED_OFFSET + 12; + +// Rangecheck offsets +// whenever range check is used, start_col - 12 will contain the element being rangechecked +/* + These trace offsets are for checking if a given input is less than the bls12-381 field prime. Needs 1 row for the computation. The check works as follows -> + 1. Compute y = (2**382 - p + x) + 2. If (y>>382)&1 == 0, then x in less than p. + RANGE_CHECK_SELECTOR_OFFSET -> selector to indicate this operation is on. + RANGE_CHECK_SUM_OFFSET -> offset which stores the sum. + RANGE_CHECK_SUM_CARRY_OFFSET -> offset which stores the carries resulted from the addition operation. + RANGE_CHECK_BIT_DECOMP_OFFSET -> offset at which the bit decomposition of the most significant limb of the sum is stored. +*/ +pub const RANGE_CHECK_SELECTOR_OFFSET: usize = 0; +pub const RANGE_CHECK_SUM_OFFSET: usize = RANGE_CHECK_SELECTOR_OFFSET + 1; +pub const RANGE_CHECK_SUM_CARRY_OFFSET: usize = RANGE_CHECK_SUM_OFFSET + 12; +pub const RANGE_CHECK_BIT_DECOMP_OFFSET: usize = RANGE_CHECK_SUM_CARRY_OFFSET + 12; +pub const RANGE_CHECK_TOTAL: usize = RANGE_CHECK_BIT_DECOMP_OFFSET + 32; + +// Fp addition layout offsets +/* + These trace offsets are for long addition. The inputs are 12 limbs each. The trace needs 1 row to compute the result. + FP_ADDITION_CHECK_OFFSET -> Selector to indicate this operation is on. + FP_ADDITION_X_OFFSET -> offset at which first input set. + FP_ADDITION_Y_OFFSET -> offset at which first second set. + FP_ADDITION_SUM_OFFSET -> offset at which the result of the addition is set. + FP_ADDITION_CARRY_OFFSET -> offset of carries which resulted from the addition operation. +*/ +pub const FP_ADDITION_CHECK_OFFSET: usize = 0; +pub const FP_ADDITION_X_OFFSET: usize = FP_ADDITION_CHECK_OFFSET + 1; +pub const FP_ADDITION_Y_OFFSET: usize = FP_ADDITION_X_OFFSET + 12; +pub const FP_ADDITION_SUM_OFFSET: usize = FP_ADDITION_Y_OFFSET + 12; +pub const FP_ADDITION_CARRY_OFFSET: usize = FP_ADDITION_SUM_OFFSET + 12; +pub const FP_ADDITION_TOTAL: usize = FP_ADDITION_CARRY_OFFSET + 12; + +// Fp subtraction layout offsets +/* + These trace offsets are for long subtraction. The inputs are 12 limbs each. The trace needs 1 row to compute the result. Assume x > y. + FP_SUBTRACTION_CHECK_OFFSET -> Selector to indicate this operation is on. + FP_SUBTRACTION_X_OFFSET -> offset at which first input set. + FP_SUBTRACTION_Y_OFFSET -> offset at which first second set. + FP_SUBTRACTION_SUM_OFFSET -> offset at which the result of the subtraction is set. + FP_SUBTRACTION_CARRY_OFFSET -> offset of borrows which resulted from the subtraction operation. +*/ +pub const FP_SUBTRACTION_CHECK_OFFSET: usize = 0; +pub const FP_SUBTRACTION_X_OFFSET: usize = FP_SUBTRACTION_CHECK_OFFSET + 1; +pub const FP_SUBTRACTION_Y_OFFSET: usize = FP_SUBTRACTION_X_OFFSET + 12; +pub const FP_SUBTRACTION_DIFF_OFFSET: usize = FP_SUBTRACTION_Y_OFFSET + 12; +pub const FP_SUBTRACTION_BORROW_OFFSET: usize = FP_SUBTRACTION_DIFF_OFFSET + 12; +pub const FP_SUBTRACTION_TOTAL: usize = FP_SUBTRACTION_BORROW_OFFSET + 12; + +// Fp multiply single +/* + These trace offsets are for long multiplication. The first input is 12 limbs, the second input is 1 limb. The trace needs 1 row to compute the result. + FP_MULTIPLY_SINGLE_CHECK_OFFSET -> Selector to indicate this operation is on. + FP_MULTIPLY_SINGLE_X_OFFSET -> offset at which first input set. + FP_MULTIPLY_SINGLE_Y_OFFSET -> offset at which first second set. + FP_MULTIPLY_SINGLE_SUM_OFFSET -> offset at which the result of the addition is set. + FP_MULTIPLY_SINGLE_CARRY_OFFSET -> offset of carries which resulted from the addition operation. +*/ +pub const FP_MULTIPLY_SINGLE_CHECK_OFFSET: usize = 0; +pub const FP_MULTIPLY_SINGLE_X_OFFSET: usize = FP_MULTIPLY_SINGLE_CHECK_OFFSET + 1; +pub const FP_MULTIPLY_SINGLE_Y_OFFSET: usize = FP_MULTIPLY_SINGLE_X_OFFSET + 12; +pub const FP_MULTIPLY_SINGLE_SUM_OFFSET: usize = FP_MULTIPLY_SINGLE_Y_OFFSET + 1; +pub const FP_MULTIPLY_SINGLE_CARRY_OFFSET: usize = FP_MULTIPLY_SINGLE_SUM_OFFSET + 12; +pub const FP_MULTIPLY_SINGLE_TOTAL: usize = FP_MULTIPLY_SINGLE_CARRY_OFFSET + 12; + +// Fp reduce rangecheck single +/* + These trace offsets are for for reducing a [u32; 12] input with the bls12-381 field prime. Ensures, x = d*p + r. Where x is the input, + d is the quotient, p is the prime and r is the reduced output. + FP_SINGLE_REDUCE_MULTIPLICATION_OFFSET -> offset at which the multiplication operation is done. + FP_SINGLE_REDUCE_X_OFFSET -> offset at which input is set. + FP_SINGLE_REDUCTION_ADDITION_OFFSET -> offset at which addition operation is done. + FP_SINGLE_REDUCED_OFFSET -> offset at which the reduced value is set +*/ +pub const FP_SINGLE_REDUCE_MULTIPLICATION_OFFSET: usize = 0; +pub const FP_SINGLE_REDUCE_X_OFFSET: usize = + FP_SINGLE_REDUCE_MULTIPLICATION_OFFSET + FP_MULTIPLY_SINGLE_TOTAL; +pub const FP_SINGLE_REDUCTION_ADDITION_OFFSET: usize = FP_SINGLE_REDUCE_X_OFFSET + 12; +pub const FP_SINGLE_REDUCED_OFFSET: usize = FP_SINGLE_REDUCTION_ADDITION_OFFSET + FP_ADDITION_TOTAL; +pub const FP_SINGLE_REDUCE_TOTAL: usize = FP_SINGLE_REDUCED_OFFSET + 12; + +macro_rules! bit_decomp_32 { + ($row:expr, $col:expr, $f:ty, $p:ty) => { + ((0..32).fold(<$p>::ZEROS, |acc, i| { + acc + $row[$col + i] * <$f>::from_canonical_u64(1 << i) + })) + }; +} + +macro_rules! bit_decomp_32_circuit { + ($builder:expr, $row:expr, $col:expr, $f:ty) => {{ + let zero = $builder.constant_extension(<$f>::Extension::ZERO); + ((0..32).fold(zero, |acc, i| { + let tmp_const = + $builder.constant_extension(<$f>::Extension::from_canonical_u64(1 << i)); + let mul_tmp = $builder.mul_extension($row[$col + i], tmp_const); + $builder.add_extension(acc, mul_tmp) + })) + }}; +} + +/// Fills the stark trace of addition following long addition. Inputs are 24 limbs each. Needs 1 row. +pub fn fill_addition_trace, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[u32; 24], + y: &[u32; 24], + row: usize, + start_col: usize, +) { + trace[row][start_col + ADDITION_CHECK_OFFSET] = F::ONE; + let (x_y_sum, x_y_sum_carry) = add_u32_slices(&x, &y); + assign_u32_in_series(trace, row, start_col + ADDITION_X_OFFSET, x); + assign_u32_in_series(trace, row, start_col + ADDITION_Y_OFFSET, y); + assign_u32_in_series(trace, row, start_col + ADDITION_SUM_OFFSET, &x_y_sum); + assign_u32_in_series( + trace, + row, + start_col + ADDITION_CARRY_OFFSET, + &x_y_sum_carry, + ); +} + +/// Fills the stark trace of addition following long addition. Inputs are 12 limbs each. Needs 1 row. +pub fn fill_trace_addition_fp, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[u32; 12], + y: &[u32; 12], + row: usize, + start_col: usize, +) { + trace[row][start_col + FP_ADDITION_CHECK_OFFSET] = F::ONE; + let (x_y_sum, x_y_sum_carry) = add_u32_slices_12(&x, &y); + assign_u32_in_series(trace, row, start_col + FP_ADDITION_X_OFFSET, x); + assign_u32_in_series(trace, row, start_col + FP_ADDITION_Y_OFFSET, y); + assign_u32_in_series(trace, row, start_col + FP_ADDITION_SUM_OFFSET, &x_y_sum); + assign_u32_in_series( + trace, + row, + start_col + FP_ADDITION_CARRY_OFFSET, + &x_y_sum_carry, + ); +} + +/// Fills the stark trace of negation. Input is 12 limbs. Needs 1 row. In essence, it fills an addition trace with inputs as `x` and `-x`. +pub fn fill_trace_negate_fp, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[u32; 12], + row: usize, + start_col: usize, +) { + let minus_x = (-Fp(x.to_owned())).0; + fill_trace_addition_fp(trace, x, &minus_x, row, start_col); +} + +/// Fills the stark trace of subtraction following long subtraction. Inputs are 24 limbs each. Needs 1 row. Assume x > y. +pub fn fill_subtraction_trace, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[u32; 24], + y: &[u32; 24], + row: usize, + start_col: usize, +) { + trace[row][start_col + SUBTRACTION_CHECK_OFFSET] = F::ONE; + let (x_y_diff, x_y_diff_borrow) = sub_u32_slices(&x, &y); + assign_u32_in_series(trace, row, start_col + SUBTRACTION_X_OFFSET, x); + assign_u32_in_series(trace, row, start_col + SUBTRACTION_Y_OFFSET, y); + assign_u32_in_series(trace, row, start_col + SUBTRACTION_DIFF_OFFSET, &x_y_diff); + assign_u32_in_series( + trace, + row, + start_col + SUBTRACTION_BORROW_OFFSET, + &x_y_diff_borrow, + ); +} + +/// Fills the stark trace of subtraction following long subtraction. Inputs are 12 limbs each. Needs 1 row. Assume x > y. +pub fn fill_trace_subtraction_fp, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[u32; 12], + y: &[u32; 12], + row: usize, + start_col: usize, +) { + trace[row][start_col + FP_SUBTRACTION_CHECK_OFFSET] = F::ONE; + let (x_y_diff, x_y_borrow) = sub_u32_slices_12(&x, &y); + assign_u32_in_series(trace, row, start_col + FP_SUBTRACTION_X_OFFSET, x); + assign_u32_in_series(trace, row, start_col + FP_SUBTRACTION_Y_OFFSET, y); + assign_u32_in_series( + trace, + row, + start_col + FP_SUBTRACTION_DIFF_OFFSET, + &x_y_diff, + ); + assign_u32_in_series( + trace, + row, + start_col + FP_SUBTRACTION_BORROW_OFFSET, + &x_y_borrow, + ); +} + +/// Fills the stark trace of multiplication following long multiplication. Inputs are 12 limbs and 1 limb respectively. Needs 1 row. +pub fn fill_trace_multiply_single_fp< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &[u32; 12], + y: u32, + row: usize, + start_col: usize, +) { + trace[row][start_col + FP_MULTIPLY_SINGLE_CHECK_OFFSET] = F::ONE; + let (x_y_sum, x_y_carry) = mul_u32_slice_u32(x, y); + assign_u32_in_series(trace, row, start_col + FP_MULTIPLY_SINGLE_X_OFFSET, x); + trace[row][start_col + FP_MULTIPLY_SINGLE_Y_OFFSET] = F::from_canonical_u32(y); + assign_u32_in_series( + trace, + row, + start_col + FP_MULTIPLY_SINGLE_SUM_OFFSET, + &x_y_sum, + ); + assign_u32_in_series( + trace, + row, + start_col + FP_MULTIPLY_SINGLE_CARRY_OFFSET, + &x_y_carry, + ); +} + +/// Fills the stark trace of reducing wrt modulo p. Input is 12 limbs. Needs 1 row. Returns the answer as \[u32; 12\]. +pub fn fill_trace_reduce_single, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[u32; 12], + row: usize, + start_col: usize, +) -> [u32; 12] { + let (div, rem) = get_div_rem_modulus_from_biguint_12(BigUint::new(x.to_vec())); + let div = div[0]; + let modulus = get_u32_vec_from_literal(modulus()); + fill_trace_multiply_single_fp( + trace, + &modulus, + div, + row, + start_col + FP_SINGLE_REDUCE_MULTIPLICATION_OFFSET, + ); + assign_u32_in_series(trace, row, start_col + FP_SINGLE_REDUCE_X_OFFSET, x); + let div_x_mod = + get_u32_vec_from_literal(div.to_biguint().unwrap() * BigUint::new(modulus.to_vec())); + assign_u32_in_series(trace, row, start_col + FP_SINGLE_REDUCED_OFFSET, &rem); + fill_trace_addition_fp( + trace, + &div_x_mod, + &rem, + row, + start_col + FP_SINGLE_REDUCTION_ADDITION_OFFSET, + ); + rem +} + +/// Fills the stark trace for range check operation wrt the field prime p. Input is 12 limbs. Needs 1 row. +pub fn fill_range_check_trace, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[u32; 12], + row: usize, + start_col: usize, +) { + let y = (BigUint::from(1u32) << 382) - modulus(); + let y_u32 = get_u32_vec_from_literal(y); + let (x_y_sum, x_y_carry) = add_u32_slices_12(&x, &y_u32); + trace[row][start_col + RANGE_CHECK_SELECTOR_OFFSET] = F::ONE; + assign_u32_in_series(trace, row, start_col + RANGE_CHECK_SUM_OFFSET, &x_y_sum); + assign_u32_in_series( + trace, + row, + start_col + RANGE_CHECK_SUM_CARRY_OFFSET, + &x_y_carry, + ); + assign_u32_in_series( + trace, + row, + start_col + RANGE_CHECK_BIT_DECOMP_OFFSET, + &get_bits_as_array(x_y_sum[11]), + ); +} + +/// Fills stark trace for multiplication following long multiplication. Inputs are 12 limbs each. Needs 12 rows. +pub fn fill_multiplication_trace_no_mod_reduction< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &[u32; 12], + y: &[u32; 12], + start_row: usize, + end_row: usize, + start_col: usize, +) { + let mut selector = 1; + // Inputs are filled from start_row..end_row + 1 + trace[start_row][start_col + MULTIPLICATION_FIRST_ROW_OFFSET] = F::ONE; + for i in start_row..start_row + 11 { + trace[i][start_col + MULTIPLICATION_SELECTOR_OFFSET] = F::ONE; + } + for row in start_row..end_row + 1 { + assign_u32_in_series(trace, row, start_col + X_INPUT_OFFSET, x); + assign_u32_in_series(trace, row, start_col + Y_INPUT_OFFSET, y); + let selector_u32 = get_selector_bits_from_u32(selector); + assign_u32_in_series(trace, row, start_col + SELECTOR_OFFSET, &selector_u32); + selector *= 2; + } + + // We have calcualted multiplying two max bls12_381 Fp numbers + // dont exceed [u32; 24] so no need of [u32; 25] + let mut prev_xy_sum = [0u32; 24]; + + for i in 0..12 { + let (xy, xy_carry) = multiply_by_slice(&x, y[i]); + assign_u32_in_series(trace, start_row + i, start_col + XY_OFFSET, &xy); + assign_u32_in_series( + trace, + start_row + i, + start_col + XY_CARRIES_OFFSET, + &xy_carry, + ); + + // fill shifted XY's + // XY's will have 0-11 number of shifts in their respective rows + let mut xy_shifted = [0u32; 24]; + for j in 0..13 { + let shift = i; + xy_shifted[j + shift] = xy[j]; + } + assign_u32_in_series( + trace, + start_row + i, + start_col + SHIFTED_XY_OFFSET, + &xy_shifted, + ); + + // Fill XY_SUM, XY_SUM_CARRIES + let (xy_sum, xy_sum_carry) = add_u32_slices(&xy_shifted, &prev_xy_sum); + assign_u32_in_series(trace, start_row + i, start_col + SUM_OFFSET, &xy_sum); + assign_u32_in_series( + trace, + start_row + i, + start_col + SUM_CARRIES_OFFSET, + &xy_sum_carry, + ); + + prev_xy_sum = xy_sum; + } +} + +/// Fills the stark trace of reducing wrt modulo p. Input is 24 limbs. Needs 12 rows. Set addition selector to 1 only in the 11th row, because that's where multiplication result is set. Returns the answer as \[u32; 12\]. +pub fn fill_reduction_trace, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[u32; 24], + start_row: usize, + end_row: usize, + start_col: usize, +) -> [u32; 12] { + let (div, rem) = get_div_rem_modulus_from_biguint_12(BigUint::new(x.to_vec())); + let modulus = get_u32_vec_from_literal(modulus()); + fill_multiplication_trace_no_mod_reduction( + trace, + &div, + &modulus, + start_row, + end_row, + start_col + REDUCE_MULTIPLICATION_OFFSET, + ); + + for row in start_row..end_row + 1 { + assign_u32_in_series(trace, row, start_col + REDUCE_X_OFFSET, x); + } + + let div_x_mod = + get_u32_vec_from_literal_24(BigUint::new(div.to_vec()) * BigUint::new(modulus.to_vec())); + + for i in start_row..end_row + 1 { + assign_u32_in_series(trace, i, start_col + REDUCED_OFFSET, &rem); + } + let mut rem_24 = [0u32; 24]; + rem_24[0..12].copy_from_slice(&rem); + + fill_addition_trace( + trace, + &div_x_mod, + &rem_24, + start_row + 11, + start_col + REDUCTION_ADDITION_OFFSET, + ); + rem +} + +/// Constraints the operation for multiplication of two \[u32; 12\]. +/// +/// Constraint the input values across this row and next row wherever selector is on. +/// +/// Constraints the following -> `selector[i] * (product[j] + carries[j]*(2**32) - x[j] * y[i] - carries[j-1]) == 0`. for 0 <= j < 12, for 0 <= i < 12. +/// which encapsulates the condition "either selector is off or the multiplication is correct". +/// +/// Constraints the shifted value with product of the current limb as `selector[i] * (shifted[i + j] - product[j]) == 0`. for 0 <= j < 12, for 0 <= i < 12. +/// which encapsulates the condition "either selector is off or product is shifted by i places". +/// +/// Constraint the first row of multiplication that `sum == shifted` for all limbs +/// +/// Constraints `next_row_sum[i] + next_row_carries[i]*(2**32) == curr_row_sum[i] + shifted[i] + next_row_carries[i-1]` for 0 <= i < 24. +pub fn add_multiplication_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + // Constrains the X and Y is filled same across the rows + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + for i in 0..12 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + X_INPUT_OFFSET + i] + - next_values[start_col + X_INPUT_OFFSET + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + Y_INPUT_OFFSET + i] + - next_values[start_col + Y_INPUT_OFFSET + i]), + ); + } + + // Constrain that multiplication happens correctly at each level + for i in 0..12 { + for j in 0..12 { + if j == 0 { + yield_constr.constraint_transition( + //local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET] * + bit_selector_val + * local_values[start_col + SELECTOR_OFFSET + i] + * (local_values[start_col + X_INPUT_OFFSET + j] + * local_values[start_col + Y_INPUT_OFFSET + i] + - local_values[start_col + XY_OFFSET + j] + - (local_values[start_col + XY_CARRIES_OFFSET + j] + * FE::from_canonical_u64(1 << 32))), + ) + } else { + yield_constr.constraint_transition( + //local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET] * + bit_selector_val + * local_values[start_col + SELECTOR_OFFSET + i] + * (local_values[start_col + X_INPUT_OFFSET + j] + * local_values[start_col + Y_INPUT_OFFSET + i] + + local_values[start_col + XY_CARRIES_OFFSET + j - 1] + - local_values[start_col + XY_OFFSET + j] + - (local_values[start_col + XY_CARRIES_OFFSET + j] + * FE::from_canonical_u64(1 << 32))), + ) + } + } + } + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + XY_OFFSET + 12] + - local_values[start_col + XY_CARRIES_OFFSET + 11]), + ); + + // Constrain XY SHIFTING + for i in 0..12 { + // shift is decided by selector + for j in 0..13 { + yield_constr.constraint_transition( + //local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET] * + bit_selector_val + * local_values[start_col + SELECTOR_OFFSET + i] + * (local_values[start_col + SHIFTED_XY_OFFSET + j + i] + - local_values[start_col + XY_OFFSET + j]), + ) + } + } + + // Constrain addition at each row + // 1. Constrain XY_SUM at row 0 is same as XY_SHIFTED + // 2. Constrain XY_SUM_CARRIES at row 0 are all 0 + for j in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLICATION_FIRST_ROW_OFFSET] + * (local_values[start_col + SUM_OFFSET + j] + - local_values[start_col + SHIFTED_XY_OFFSET + j]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLICATION_FIRST_ROW_OFFSET] + * local_values[start_col + SUM_CARRIES_OFFSET + j], + ) + } + // yield_constr.constraint_first_row(//local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET] * + // local_values[start_col + SUM_OFFSET + 24]); + + // 3. Constrain addition + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET] + * (next_values[start_col + SUM_OFFSET] + + (next_values[start_col + SUM_CARRIES_OFFSET] * FE::from_canonical_u64(1 << 32)) + - next_values[start_col + SHIFTED_XY_OFFSET] + - local_values[start_col + SUM_OFFSET]), + ); + + for j in 1..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET] + * (next_values[start_col + SUM_OFFSET + j] + + (next_values[start_col + SUM_CARRIES_OFFSET + j] + * FE::from_canonical_u64(1 << 32)) + - next_values[start_col + SHIFTED_XY_OFFSET + j] + - local_values[start_col + SUM_OFFSET + j] + - next_values[start_col + SUM_CARRIES_OFFSET + j - 1]), + ) + } + // yield_constr.constraint_transition(local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET] * (next_values[start_col + SUM_OFFSET + 24] - next_values[start_col + SUM_CARRIES_OFFSET + 23])); +} + +pub fn add_multiplication_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let constant = builder.constant_extension(F::Extension::from_canonical_u64(1 << 32)); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET], + ); + let c1 = builder.sub_extension( + local_values[start_col + X_INPUT_OFFSET + i], + next_values[start_col + X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(tmp, c1); + yield_constr.constraint_transition(builder, c1); + let c2 = builder.sub_extension( + local_values[start_col + Y_INPUT_OFFSET + i], + next_values[start_col + Y_INPUT_OFFSET + i], + ); + let c2 = builder.mul_extension(tmp, c2); + yield_constr.constraint_transition(builder, c2); + } + + for i in 0..12 { + for j in 0..12 { + if j == 0 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + SELECTOR_OFFSET + i], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + X_INPUT_OFFSET + j], + local_values[start_col + Y_INPUT_OFFSET + i], + ); + let mul_tmp3 = builder + .mul_extension(local_values[start_col + XY_CARRIES_OFFSET + j], constant); + + let sub_tmp1 = builder.sub_extension(mul_tmp2, mul_tmp3); + let sub_tmp2 = + builder.sub_extension(sub_tmp1, local_values[start_col + XY_OFFSET + j]); + + let c = builder.mul_extension(mul_tmp1, sub_tmp2); + yield_constr.constraint_transition(builder, c); + } else { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + SELECTOR_OFFSET + i], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + X_INPUT_OFFSET + j], + local_values[start_col + Y_INPUT_OFFSET + i], + ); + + let mul_tmp3 = builder + .mul_extension(local_values[start_col + XY_CARRIES_OFFSET + j], constant); + + let sub_tmp1 = builder.sub_extension(mul_tmp2, mul_tmp3); + let sub_tmp2 = + builder.sub_extension(sub_tmp1, local_values[start_col + XY_OFFSET + j]); + + let add_tmp1 = builder.add_extension( + sub_tmp2, + local_values[start_col + XY_CARRIES_OFFSET + j - 1], + ); + + let c = builder.mul_extension(mul_tmp1, add_tmp1); + yield_constr.constraint_transition(builder, c); + } + } + } + + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + XY_OFFSET + 12], + local_values[start_col + XY_CARRIES_OFFSET + 11], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + + for i in 0..12 { + for j in 0..13 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + SELECTOR_OFFSET + i], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + SHIFTED_XY_OFFSET + j + i], + local_values[start_col + XY_OFFSET + j], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + } + + for j in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLICATION_FIRST_ROW_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + SUM_OFFSET + j], + local_values[start_col + SHIFTED_XY_OFFSET + j], + ); + + let c1 = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c1); + + let c2 = builder.mul_extension(mul_tmp1, local_values[start_col + SUM_CARRIES_OFFSET + j]); + yield_constr.constraint(builder, c2); + } + + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET], + ); + let mul_tmp2 = builder.mul_extension(next_values[start_col + SUM_CARRIES_OFFSET], constant); + + let sub_tmp1 = builder.sub_extension(mul_tmp2, next_values[start_col + SHIFTED_XY_OFFSET]); + let sub_tmp2 = builder.sub_extension(sub_tmp1, local_values[start_col + SUM_OFFSET]); + + let add_tmp1 = builder.add_extension(sub_tmp2, next_values[start_col + SUM_OFFSET]); + + let c = builder.mul_extension(mul_tmp1, add_tmp1); + yield_constr.constraint_transition(builder, c); + + for j in 1..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLICATION_SELECTOR_OFFSET], + ); + let mul_tmp2 = + builder.mul_extension(next_values[start_col + SUM_CARRIES_OFFSET + j], constant); + + let sub_tmp1 = + builder.sub_extension(mul_tmp2, next_values[start_col + SHIFTED_XY_OFFSET + j]); + let sub_tmp2 = builder.sub_extension(sub_tmp1, local_values[start_col + SUM_OFFSET + j]); + let sub_tmp3 = builder.sub_extension( + sub_tmp2, + next_values[start_col + SUM_CARRIES_OFFSET + j - 1], + ); + + let add_tmp1 = builder.add_extension(sub_tmp3, next_values[start_col + SUM_OFFSET + j]); + + let c = builder.mul_extension(mul_tmp1, add_tmp1); + yield_constr.constraint_transition(builder, c); + } +} + +/// Constraints the addition for addition of two \[u32; 24\]. +/// Constraints the following for every limb -> `sum[i] + carries[i]*(2**32) == x[i] + y[i] + carries[i-1]`. +pub fn add_addition_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for j in 0..24 { + if j == 0 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + ADDITION_CHECK_OFFSET] + * (local_values[start_col + ADDITION_SUM_OFFSET + j] + + (local_values[start_col + ADDITION_CARRY_OFFSET + j] + * FE::from_canonical_u64(1 << 32)) + - local_values[start_col + ADDITION_X_OFFSET + j] + - local_values[start_col + ADDITION_Y_OFFSET + j]), + ) + } else { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + ADDITION_CHECK_OFFSET] + * (local_values[start_col + ADDITION_SUM_OFFSET + j] + + (local_values[start_col + ADDITION_CARRY_OFFSET + j] + * FE::from_canonical_u64(1 << 32)) + - local_values[start_col + ADDITION_X_OFFSET + j] + - local_values[start_col + ADDITION_Y_OFFSET + j] + - local_values[start_col + ADDITION_CARRY_OFFSET + j - 1]), + ) + } + } +} +pub fn add_addition_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let constant = builder.constant_extension(F::Extension::from_canonical_u64(1 << 32)); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for j in 0..24 { + if j == 0 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + ADDITION_CHECK_OFFSET], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + ADDITION_CARRY_OFFSET + j], + constant, + ); + + let sub_tmp1 = + builder.sub_extension(mul_tmp2, local_values[start_col + ADDITION_X_OFFSET + j]); + let sub_tmp2 = + builder.sub_extension(sub_tmp1, local_values[start_col + ADDITION_Y_OFFSET + j]); + + let add_tmp1 = + builder.add_extension(sub_tmp2, local_values[start_col + ADDITION_SUM_OFFSET + j]); + + let c = builder.mul_extension(mul_tmp1, add_tmp1); + yield_constr.constraint_transition(builder, c); + } else { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + ADDITION_CHECK_OFFSET], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + ADDITION_CARRY_OFFSET + j], + constant, + ); + + let sub_tmp1 = + builder.sub_extension(mul_tmp2, local_values[start_col + ADDITION_X_OFFSET + j]); + let sub_tmp2 = + builder.sub_extension(sub_tmp1, local_values[start_col + ADDITION_Y_OFFSET + j]); + let sub_tmp3 = builder.sub_extension( + sub_tmp2, + local_values[start_col + ADDITION_CARRY_OFFSET + j - 1], + ); + + let add_tmp1 = + builder.add_extension(sub_tmp3, local_values[start_col + ADDITION_SUM_OFFSET + j]); + + let c = builder.mul_extension(mul_tmp1, add_tmp1); + yield_constr.constraint_transition(builder, c); + } + } +} + +/// Constraints the operation for addition of two \[u32; 12\]. +/// Constraints the following for every limb -> `sum[i] + carries[i]*(2**32) == x[i] + y[i] + carries[i-1]`. +pub fn add_addition_fp_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for j in 0..12 { + if j == 0 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + FP_ADDITION_SUM_OFFSET + j] + + (local_values[start_col + FP_ADDITION_CARRY_OFFSET + j] + * FE::from_canonical_u64(1 << 32)) + - local_values[start_col + FP_ADDITION_X_OFFSET + j] + - local_values[start_col + FP_ADDITION_Y_OFFSET + j]), + ) + } else { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + FP_ADDITION_SUM_OFFSET + j] + + (local_values[start_col + FP_ADDITION_CARRY_OFFSET + j] + * FE::from_canonical_u64(1 << 32)) + - local_values[start_col + FP_ADDITION_X_OFFSET + j] + - local_values[start_col + FP_ADDITION_Y_OFFSET + j] + - local_values[start_col + FP_ADDITION_CARRY_OFFSET + j - 1]), + ) + } + } +} + +pub fn add_addition_fp_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let constant = builder.constant_extension(F::Extension::from_canonical_u64(1 << 32)); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for j in 0..12 { + if j == 0 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP_ADDITION_CHECK_OFFSET], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + FP_ADDITION_CARRY_OFFSET + j], + constant, + ); + + let sub_tmp1 = + builder.sub_extension(mul_tmp2, local_values[start_col + FP_ADDITION_X_OFFSET + j]); + let sub_tmp2 = + builder.sub_extension(sub_tmp1, local_values[start_col + FP_ADDITION_Y_OFFSET + j]); + + let add_tmp1 = builder.add_extension( + sub_tmp2, + local_values[start_col + FP_ADDITION_SUM_OFFSET + j], + ); + + let c = builder.mul_extension(mul_tmp1, add_tmp1); + yield_constr.constraint(builder, c); + } else { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP_ADDITION_CHECK_OFFSET], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + FP_ADDITION_CARRY_OFFSET + j], + constant, + ); + + let sub_tmp1 = + builder.sub_extension(mul_tmp2, local_values[start_col + FP_ADDITION_X_OFFSET + j]); + let sub_tmp2 = + builder.sub_extension(sub_tmp1, local_values[start_col + FP_ADDITION_Y_OFFSET + j]); + let sub_tmp3 = builder.sub_extension( + sub_tmp2, + local_values[start_col + FP_ADDITION_CARRY_OFFSET + j - 1], + ); + + let add_tmp1 = builder.add_extension( + sub_tmp3, + local_values[start_col + FP_ADDITION_SUM_OFFSET + j], + ); + + let c = builder.mul_extension(mul_tmp1, add_tmp1); + yield_constr.constraint(builder, c); + } + } +} + +/// Constraints the operation for subtraction of two \[u32; 12\]. +/// Constraints the following for every limb -> `diff[i] - borrows[i]*(2**32) == x[i] - y[i] - borrows[i-1]`. +pub fn add_subtraction_fp_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for j in 0..12 { + if j == 0 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + FP_SUBTRACTION_DIFF_OFFSET + j] + + local_values[start_col + FP_SUBTRACTION_Y_OFFSET + j] + - (local_values[start_col + FP_SUBTRACTION_BORROW_OFFSET + j] + * FE::from_canonical_u64(1 << 32)) + - local_values[start_col + FP_SUBTRACTION_X_OFFSET + j]), + ) + } else { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + FP_SUBTRACTION_DIFF_OFFSET + j] + + local_values[start_col + FP_SUBTRACTION_Y_OFFSET + j] + + local_values[start_col + FP_SUBTRACTION_BORROW_OFFSET + j - 1] + - (local_values[start_col + FP_SUBTRACTION_BORROW_OFFSET + j] + * FE::from_canonical_u64(1 << 32)) + - local_values[start_col + FP_SUBTRACTION_X_OFFSET + j]), + ) + } + } +} + +pub fn add_subtraction_fp_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let constant = builder.constant_extension(F::Extension::from_canonical_u64(1 << 32)); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for j in 0..12 { + if j == 0 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP_SUBTRACTION_CHECK_OFFSET], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + FP_SUBTRACTION_BORROW_OFFSET + j], + constant, + ); + + let add_tmp1 = builder.add_extension( + local_values[start_col + FP_SUBTRACTION_DIFF_OFFSET + j], + local_values[start_col + FP_SUBTRACTION_Y_OFFSET + j], + ); + + let sub_tmp1 = builder.sub_extension(add_tmp1, mul_tmp2); + let sub_tmp2 = builder.sub_extension( + sub_tmp1, + local_values[start_col + FP_SUBTRACTION_X_OFFSET + j], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp2); + yield_constr.constraint(builder, c); + } else { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP_SUBTRACTION_CHECK_OFFSET], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + FP_SUBTRACTION_BORROW_OFFSET + j], + constant, + ); + + let add_tmp1 = builder.add_extension( + local_values[start_col + FP_SUBTRACTION_DIFF_OFFSET + j], + local_values[start_col + FP_SUBTRACTION_Y_OFFSET + j], + ); + let add_tmp2 = builder.add_extension( + add_tmp1, + local_values[start_col + FP_SUBTRACTION_BORROW_OFFSET + j - 1], + ); + + let sub_tmp1 = builder.sub_extension(add_tmp2, mul_tmp2); + let sub_tmp2 = builder.sub_extension( + sub_tmp1, + local_values[start_col + FP_SUBTRACTION_X_OFFSET + j], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp2); + yield_constr.constraint(builder, c); + } + } +} + +/// Constraints the negation operation for \[u32; 12\]. +/// Constraints an addition operation, following by constraining `result == p`, where p is the field prime. +pub fn add_negate_fp_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + add_addition_fp_constraints(local_values, yield_constr, start_col, bit_selector); + let mod_u32 = get_u32_vec_from_literal(modulus()); + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + FP_ADDITION_SUM_OFFSET + i] + - FE::from_canonical_u32(mod_u32[i])), + ); + } +} + +pub fn add_negate_fp_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + add_addition_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col, + bit_selector, + ); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + let mod_u32 = get_u32_vec_from_literal(modulus()); + + for i in 0..12 { + let constant = builder.constant_extension(F::Extension::from_canonical_u32(mod_u32[i])); + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP_ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP_ADDITION_SUM_OFFSET + i], + constant, + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } +} +/// Constraints the operation for multiplication of \[u32; 12\] with a u32. +/// Constraints the following for every limb -> `product[i] + carries[i]*(2**32) == x[i] * y + carries[i-1]`. +pub fn add_fp_single_multiply_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for j in 0..12 { + if j == 0 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP_MULTIPLY_SINGLE_CHECK_OFFSET] + * (local_values[start_col + FP_MULTIPLY_SINGLE_SUM_OFFSET + j] + + (local_values[start_col + FP_MULTIPLY_SINGLE_CARRY_OFFSET + j] + * FE::from_canonical_u64(1 << 32)) + - local_values[start_col + FP_MULTIPLY_SINGLE_X_OFFSET + j] + * local_values[start_col + FP_MULTIPLY_SINGLE_Y_OFFSET]), + ) + } else { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP_MULTIPLY_SINGLE_CHECK_OFFSET] + * (local_values[start_col + FP_MULTIPLY_SINGLE_SUM_OFFSET + j] + + (local_values[start_col + FP_MULTIPLY_SINGLE_CARRY_OFFSET + j] + * FE::from_canonical_u64(1 << 32)) + - local_values[start_col + FP_MULTIPLY_SINGLE_X_OFFSET + j] + * local_values[start_col + FP_MULTIPLY_SINGLE_Y_OFFSET] + - local_values[start_col + FP_MULTIPLY_SINGLE_CARRY_OFFSET + j - 1]), + ) + } + } +} + +pub fn add_fp_single_multiply_constraints_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let constant = builder.constant_extension(F::Extension::from_canonical_u64(1 << 32)); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for j in 0..12 { + if j == 0 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP_MULTIPLY_SINGLE_CHECK_OFFSET], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + FP_MULTIPLY_SINGLE_CARRY_OFFSET + j], + constant, + ); + let mul_tmp3 = builder.mul_extension( + local_values[start_col + FP_MULTIPLY_SINGLE_X_OFFSET + j], + local_values[start_col + FP_MULTIPLY_SINGLE_Y_OFFSET], + ); + + let sub_tmp1 = builder.sub_extension(mul_tmp2, mul_tmp3); + + let add_tmp1 = builder.add_extension( + sub_tmp1, + local_values[start_col + FP_MULTIPLY_SINGLE_SUM_OFFSET + j], + ); + + let c = builder.mul_extension(mul_tmp1, add_tmp1); + yield_constr.constraint(builder, c); + } else { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP_MULTIPLY_SINGLE_CHECK_OFFSET], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + FP_MULTIPLY_SINGLE_CARRY_OFFSET + j], + constant, + ); + let mul_tmp3 = builder.mul_extension( + local_values[start_col + FP_MULTIPLY_SINGLE_X_OFFSET + j], + local_values[start_col + FP_MULTIPLY_SINGLE_Y_OFFSET], + ); + + let sub_tmp1 = builder.sub_extension(mul_tmp2, mul_tmp3); + let sub_tmp2 = builder.sub_extension( + sub_tmp1, + local_values[start_col + FP_MULTIPLY_SINGLE_CARRY_OFFSET + j - 1], + ); + + let add_tmp1 = builder.add_extension( + sub_tmp2, + local_values[start_col + FP_MULTIPLY_SINGLE_SUM_OFFSET + j], + ); + + let c = builder.mul_extension(mul_tmp1, add_tmp1); + yield_constr.constraint(builder, c); + } + } +} + +/// Constraints the reduction operation for \[u32; 12\]. +/// Constraints a single multiplication operation with `p` as `x` input. Then constraints an addition operation with the result of the previous multiplication and the reduced answer as inputs. Then constraints the result of the addition with the input of reduction operation. +pub fn add_fp_reduce_single_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + let modulus = modulus(); + let modulus_u32 = get_u32_vec_from_literal(modulus); + for i in 0..12 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + + FP_SINGLE_REDUCE_MULTIPLICATION_OFFSET + + FP_MULTIPLY_SINGLE_CHECK_OFFSET] + * (local_values[start_col + + FP_SINGLE_REDUCE_MULTIPLICATION_OFFSET + + FP_MULTIPLY_SINGLE_X_OFFSET + + i] + - FE::from_canonical_u32(modulus_u32[i])), + ); + } + + add_fp_single_multiply_constraints( + local_values, + yield_constr, + start_col + FP_SINGLE_REDUCE_MULTIPLICATION_OFFSET, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint_transition( + bit_selector_val + * local_values + [start_col + FP_SINGLE_REDUCTION_ADDITION_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP_SINGLE_REDUCE_MULTIPLICATION_OFFSET + + FP_MULTIPLY_SINGLE_SUM_OFFSET + + i] + - local_values[start_col + + FP_SINGLE_REDUCTION_ADDITION_OFFSET + + FP_ADDITION_X_OFFSET + + i]), + ); + } + + add_addition_fp_constraints( + local_values, + yield_constr, + start_col + FP_SINGLE_REDUCTION_ADDITION_OFFSET, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint_transition( + bit_selector_val + * local_values + [start_col + FP_SINGLE_REDUCTION_ADDITION_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + FP_SINGLE_REDUCED_OFFSET + i] + - local_values[start_col + + FP_SINGLE_REDUCTION_ADDITION_OFFSET + + FP_ADDITION_Y_OFFSET + + i]), + ); + } + + for i in 0..12 { + yield_constr.constraint_transition( + bit_selector_val + * local_values + [start_col + FP_SINGLE_REDUCTION_ADDITION_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + FP_SINGLE_REDUCE_X_OFFSET + i] + - local_values[start_col + + FP_SINGLE_REDUCTION_ADDITION_OFFSET + + FP_ADDITION_SUM_OFFSET + + i]), + ) + } +} + +pub fn add_fp_reduce_single_constraints_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let modulus = modulus(); + let modulus_u32 = get_u32_vec_from_literal(modulus); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..12 { + let lc = builder.constant_extension(F::Extension::from_canonical_u32(modulus_u32[i])); + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP_SINGLE_REDUCE_MULTIPLICATION_OFFSET + + FP_MULTIPLY_SINGLE_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP_SINGLE_REDUCE_MULTIPLICATION_OFFSET + + FP_MULTIPLY_SINGLE_X_OFFSET + + i], + lc, + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + add_fp_single_multiply_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP_SINGLE_REDUCE_MULTIPLICATION_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values + [start_col + FP_SINGLE_REDUCTION_ADDITION_OFFSET + FP_ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP_SINGLE_REDUCE_MULTIPLICATION_OFFSET + + FP_MULTIPLY_SINGLE_SUM_OFFSET + + i], + local_values + [start_col + FP_SINGLE_REDUCTION_ADDITION_OFFSET + FP_ADDITION_X_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + add_addition_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP_SINGLE_REDUCTION_ADDITION_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values + [start_col + FP_SINGLE_REDUCTION_ADDITION_OFFSET + FP_ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP_SINGLE_REDUCED_OFFSET + i], + local_values + [start_col + FP_SINGLE_REDUCTION_ADDITION_OFFSET + FP_ADDITION_Y_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..12 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values + [start_col + FP_SINGLE_REDUCTION_ADDITION_OFFSET + FP_ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP_SINGLE_REDUCE_X_OFFSET + i], + local_values + [start_col + FP_SINGLE_REDUCTION_ADDITION_OFFSET + FP_ADDITION_SUM_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } +} + +/// Constraints the operation for subtraction of two \[u32; 24\]. +/// Constraints the following for every limb -> `diff[i] - borrows[i]*(2**32) == x[i] - y[i] - borrows[i-1]`. +pub fn add_subtraction_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for j in 0..24 { + if j == 0 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + SUBTRACTION_DIFF_OFFSET + j] + + local_values[start_col + SUBTRACTION_Y_OFFSET + j] + - (local_values[start_col + SUBTRACTION_BORROW_OFFSET + j] + * FE::from_canonical_u64(1 << 32)) + - local_values[start_col + SUBTRACTION_X_OFFSET + j]), + ) + } else { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + SUBTRACTION_DIFF_OFFSET + j] + + local_values[start_col + SUBTRACTION_Y_OFFSET + j] + + local_values[start_col + SUBTRACTION_BORROW_OFFSET + j - 1] + - (local_values[start_col + SUBTRACTION_BORROW_OFFSET + j] + * FE::from_canonical_u64(1 << 32)) + - local_values[start_col + SUBTRACTION_X_OFFSET + j]), + ) + } + } +} + +pub fn add_subtraction_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let constant = builder.constant_extension(F::Extension::from_canonical_u64(1 << 32)); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for j in 0..24 { + if j == 0 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + SUBTRACTION_CHECK_OFFSET], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + SUBTRACTION_BORROW_OFFSET + j], + constant, + ); + + let add_tmp1 = builder.add_extension( + local_values[start_col + SUBTRACTION_DIFF_OFFSET + j], + local_values[start_col + SUBTRACTION_Y_OFFSET + j], + ); + + let sub_tmp1 = builder.sub_extension(add_tmp1, mul_tmp2); + let sub_tmp2 = + builder.sub_extension(sub_tmp1, local_values[start_col + SUBTRACTION_X_OFFSET + j]); + + let c = builder.mul_extension(mul_tmp1, sub_tmp2); + yield_constr.constraint_transition(builder, c); + } else { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + SUBTRACTION_CHECK_OFFSET], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + SUBTRACTION_BORROW_OFFSET + j], + constant, + ); + + let add_tmp1 = builder.add_extension( + local_values[start_col + SUBTRACTION_DIFF_OFFSET + j], + local_values[start_col + SUBTRACTION_Y_OFFSET + j], + ); + let add_tmp2 = builder.add_extension( + add_tmp1, + local_values[start_col + SUBTRACTION_BORROW_OFFSET + j - 1], + ); + + let sub_tmp1 = builder.sub_extension(add_tmp2, mul_tmp2); + let sub_tmp2 = + builder.sub_extension(sub_tmp1, local_values[start_col + SUBTRACTION_X_OFFSET + j]); + + let c = builder.mul_extension(mul_tmp1, sub_tmp2); + yield_constr.constraint_transition(builder, c); + } + } +} +/// Constraints the range check operation of a \[u23; 12\]. +/// Constraints the addition of the input and (2**382)-p. Then constraints the bit decomposition of the most significant limb of the result of the previous addition. Then constraints the 30th bit of the decomposition (which is overall 382nd bit of the result) to zero. +pub fn add_range_check_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + let y = (BigUint::from(1u32) << 382) - modulus(); + let y_u32 = get_u32_vec_from_literal(y); + + for i in 0..12 { + if i == 0 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + RANGE_CHECK_SELECTOR_OFFSET] + * (local_values[start_col + RANGE_CHECK_SUM_OFFSET + i] + + (local_values[start_col + RANGE_CHECK_SUM_CARRY_OFFSET + i] + * FE::from_canonical_u64(1 << 32)) + - FE::from_canonical_u32(y_u32[i]) + - local_values[start_col - 12 + i]), + ); + } else if i < 12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + RANGE_CHECK_SELECTOR_OFFSET] + * (local_values[start_col + RANGE_CHECK_SUM_OFFSET + i] + + (local_values[start_col + RANGE_CHECK_SUM_CARRY_OFFSET + i] + * FE::from_canonical_u64(1 << 32)) + - FE::from_canonical_u32(y_u32[i]) + - local_values[start_col - 12 + i] + - local_values[start_col + RANGE_CHECK_SUM_CARRY_OFFSET + i - 1]), + ); + } + let bit_col: usize = start_col + RANGE_CHECK_BIT_DECOMP_OFFSET; + let val_reconstructed = bit_decomp_32!(local_values, bit_col, FE, P); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + RANGE_CHECK_SELECTOR_OFFSET] + * (val_reconstructed - local_values[start_col + RANGE_CHECK_SUM_OFFSET + 11]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + RANGE_CHECK_SELECTOR_OFFSET] + * local_values[bit_col + 30], + ); + } +} + +pub fn add_range_check_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let constant = builder.constant_extension(F::Extension::from_canonical_u64(1 << 32)); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + let y = (BigUint::from(1u32) << 382) - modulus(); + let y_u32 = get_u32_vec_from_literal(y); + + for i in 0..12 { + if i == 0 { + let lc = builder.constant_extension(F::Extension::from_canonical_u32(y_u32[i])); + + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + RANGE_CHECK_SELECTOR_OFFSET], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + RANGE_CHECK_SUM_CARRY_OFFSET + i], + constant, + ); + + let sub_tmp1 = builder.sub_extension(mul_tmp2, lc); + let sub_tmp2 = builder.sub_extension(sub_tmp1, local_values[start_col - 12 + i]); + + let add_tmp1 = builder.add_extension( + sub_tmp2, + local_values[start_col + RANGE_CHECK_SUM_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, add_tmp1); + + yield_constr.constraint(builder, c); + } else if i < 12 { + let lc = builder.constant_extension(F::Extension::from_canonical_u32(y_u32[i])); + + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + RANGE_CHECK_SELECTOR_OFFSET], + ); + let mul_tmp2 = builder.mul_extension( + local_values[start_col + RANGE_CHECK_SUM_CARRY_OFFSET + i], + constant, + ); + + let sub_tmp1 = builder.sub_extension(mul_tmp2, lc); + let sub_tmp2 = builder.sub_extension(sub_tmp1, local_values[start_col - 12 + i]); + let sub_tmp3 = builder.sub_extension( + sub_tmp2, + local_values[start_col + RANGE_CHECK_SUM_CARRY_OFFSET + i - 1], + ); + + let add_tmp1 = builder.add_extension( + sub_tmp3, + local_values[start_col + RANGE_CHECK_SUM_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, add_tmp1); + + yield_constr.constraint(builder, c); + } + + let bit_col: usize = start_col + RANGE_CHECK_BIT_DECOMP_OFFSET; + let val_reconstructed = bit_decomp_32_circuit!(builder, local_values, bit_col, F); + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + RANGE_CHECK_SELECTOR_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + val_reconstructed, + local_values[start_col + RANGE_CHECK_SUM_OFFSET + 11], + ); + + let c1 = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c1); + + let c2 = builder.mul_extension(mul_tmp1, local_values[bit_col + 30]); + yield_constr.constraint(builder, c2); + } +} + +/// Constraints the reduction operation for \[u32; 24\]. +/// Constraints that input and result is same across this row and next row wherever the selector is on. +/// Constraints a multiplication operation with `p` as `x` input. Then constraints an addition operation with the result of the previous multiplication and the reduced answer as inputs. Then constraints the result of the addition with the input of reduction operation. +pub fn add_reduce_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + selector_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + let modulus = modulus(); + let modulus_u32 = get_u32_vec_from_literal(modulus); + for i in 0..12 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[selector_col] + * (local_values[start_col + REDUCE_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i] + - FE::from_canonical_u32(modulus_u32[i])), + ); + } + + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + REDUCE_MULTIPLICATION_OFFSET, + bit_selector, + ); + + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[selector_col] + * (local_values[start_col + REDUCE_X_OFFSET + i] + - next_values[start_col + REDUCE_X_OFFSET + i]), + ); + } + + for i in 0..12 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[selector_col] + * (local_values[start_col + REDUCED_OFFSET + i] + - next_values[start_col + REDUCED_OFFSET + i]), + ); + } + + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + REDUCE_MULTIPLICATION_OFFSET + SUM_OFFSET + i] + - local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_X_OFFSET + i]), + ); + } + + add_addition_constraints( + local_values, + yield_constr, + start_col + REDUCTION_ADDITION_OFFSET, + bit_selector, + ); + + for i in 0..24 { + if i < 12 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + REDUCED_OFFSET + i] + - local_values + [start_col + REDUCTION_ADDITION_OFFSET + ADDITION_Y_OFFSET + i]), + ); + } else { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_CHECK_OFFSET] + * local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_Y_OFFSET + i], + ); + } + } + + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + REDUCE_X_OFFSET + i] + - local_values + [start_col + REDUCTION_ADDITION_OFFSET + ADDITION_SUM_OFFSET + i]), + ) + } +} + +pub fn add_reduce_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + selector_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + let modulus = modulus(); + let modulus_u32 = get_u32_vec_from_literal(modulus); + + for i in 0..12 { + let lc = builder.constant_extension(F::Extension::from_canonical_u32(modulus_u32[i])); + + let mul_tmp1 = builder.mul_extension(bit_selector_val, local_values[selector_col]); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + REDUCE_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i], + lc, + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + REDUCE_MULTIPLICATION_OFFSET, + bit_selector, + ); + for i in 0..24 { + let mul_tmp1 = builder.mul_extension(bit_selector_val, local_values[selector_col]); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + REDUCE_X_OFFSET + i], + next_values[start_col + REDUCE_X_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..12 { + let mul_tmp1 = builder.mul_extension(bit_selector_val, local_values[selector_col]); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + REDUCED_OFFSET + i], + next_values[start_col + REDUCED_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + REDUCE_MULTIPLICATION_OFFSET + SUM_OFFSET + i], + local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_X_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + add_addition_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + REDUCTION_ADDITION_OFFSET, + bit_selector, + ); + + for i in 0..24 { + if i < 12 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + REDUCED_OFFSET + i], + local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_Y_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } else { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_CHECK_OFFSET], + ); + + let c = builder.mul_extension( + mul_tmp1, + local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_Y_OFFSET + i], + ); + yield_constr.constraint_transition(builder, c); + } + } + + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + REDUCE_X_OFFSET + i], + local_values[start_col + REDUCTION_ADDITION_OFFSET + ADDITION_SUM_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } +} diff --git a/casper-finality-proofs/src/verification/fields/starky/fp12.rs b/casper-finality-proofs/src/verification/fields/starky/fp12.rs new file mode 100644 index 000000000..577068cd6 --- /dev/null +++ b/casper-finality-proofs/src/verification/fields/starky/fp12.rs @@ -0,0 +1,6470 @@ +use crate::verification::{ + fields::starky::{fp::*, fp2::*,fp6::*}, + utils::{ + native_bls::{ + fp4_square, get_bls_12_381_parameter, mul_by_nonresidue, Fp, Fp12, Fp2, Fp6 + }, + starky_utils::*, + }, +}; +use num_bigint::BigUint; +use plonky2::{ + field::{ + extension::{Extendable, FieldExtension}, + packed::PackedField, + types::Field, + }, + hash::hash_types::RichField, + iop::ext_target::ExtensionTarget, + plonk::circuit_builder::CircuitBuilder, +}; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; + +// MultiplyBy014 +/* + These trace offsets are for multiplyBy014 (super::native::Fp12::multiplyBy014) function. The Ti's are defined in the native function definition. It needs 12 rows. +*/ +pub const MULTIPLY_BY_014_SELECTOR_OFFSET: usize = 0; +pub const MULTIPLY_BY_014_INPUT_OFFSET: usize = MULTIPLY_BY_014_SELECTOR_OFFSET + 1; +pub const MULTIPLY_BY_014_O0_OFFSET: usize = MULTIPLY_BY_014_INPUT_OFFSET + 24 * 3 * 2; +pub const MULTIPLY_BY_014_O1_OFFSET: usize = MULTIPLY_BY_014_O0_OFFSET + 24; +pub const MULTIPLY_BY_014_O4_OFFSET: usize = MULTIPLY_BY_014_O1_OFFSET + 24; +pub const MULTIPLY_BY_014_T0_CALC_OFFSET: usize = MULTIPLY_BY_014_O4_OFFSET + 24; +pub const MULTIPLY_BY_014_T1_CALC_OFFSET: usize = + MULTIPLY_BY_014_T0_CALC_OFFSET + MULTIPLY_BY_01_TOTAL; +pub const MULTIPLY_BY_014_T2_CALC_OFFSET: usize = + MULTIPLY_BY_014_T1_CALC_OFFSET + MULTIPLY_BY_1_TOTAL; +pub const MULTIPLY_BY_014_X_CALC_OFFSET: usize = + MULTIPLY_BY_014_T2_CALC_OFFSET + FP6_NON_RESIDUE_MUL_TOTAL; +pub const MULTIPLY_BY_014_T3_CALC_OFFSET: usize = MULTIPLY_BY_014_X_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 6; +pub const MULTIPLY_BY_014_T4_CALC_OFFSET: usize = MULTIPLY_BY_014_T3_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 6; +pub const MULTIPLY_BY_014_T5_CALC_OFFSET: usize = MULTIPLY_BY_014_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const MULTIPLY_BY_014_T6_CALC_OFFSET: usize = + MULTIPLY_BY_014_T5_CALC_OFFSET + MULTIPLY_BY_01_TOTAL; +pub const MULTIPLY_BY_014_Y_CALC_OFFSET: usize = MULTIPLY_BY_014_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 6; +pub const MULTIPLY_BY_014_TOTAL: usize = MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 6; + +// FP12 multiplication offsets +/* + These trace offsets are for fp12 multiplication. It needs 12 rows. The Ti's are defined in (super::native::mul_fp_12). +*/ +pub const FP12_MUL_SELECTOR_OFFSET: usize = 0; +pub const FP12_MUL_X_INPUT_OFFSET: usize = FP12_MUL_SELECTOR_OFFSET + 1; +pub const FP12_MUL_Y_INPUT_OFFSET: usize = FP12_MUL_X_INPUT_OFFSET + 24 * 3 * 2; +pub const FP12_MUL_T0_CALC_OFFSET: usize = FP12_MUL_Y_INPUT_OFFSET + 24 * 3 * 2; +pub const FP12_MUL_T1_CALC_OFFSET: usize = FP12_MUL_T0_CALC_OFFSET + FP6_MUL_TOTAL_COLUMNS; +pub const FP12_MUL_T2_CALC_OFFSET: usize = FP12_MUL_T1_CALC_OFFSET + FP6_MUL_TOTAL_COLUMNS; +pub const FP12_MUL_X_CALC_OFFSET: usize = FP12_MUL_T2_CALC_OFFSET + FP6_NON_RESIDUE_MUL_TOTAL; +pub const FP12_MUL_T3_CALC_OFFSET: usize = + FP12_MUL_X_CALC_OFFSET + FP6_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 6; +pub const FP12_MUL_T4_CALC_OFFSET: usize = + FP12_MUL_T3_CALC_OFFSET + FP6_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 6; +pub const FP12_MUL_T5_CALC_OFFSET: usize = + FP12_MUL_T4_CALC_OFFSET + FP6_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 6; +pub const FP12_MUL_T6_CALC_OFFSET: usize = FP12_MUL_T5_CALC_OFFSET + FP6_MUL_TOTAL_COLUMNS; +pub const FP12_MUL_Y_CALC_OFFSET: usize = FP12_MUL_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 6; +pub const FP12_MUL_TOTAL_COLUMNS: usize = FP12_MUL_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 6; + +// Cyclotomic square offsets +/* + These trace offsets are for cyclotomicSquare function (super::native::Fp12::cyclotomicSquare). It needs 12 rows. The Ti's are defined in native function. +*/ +pub const CYCLOTOMIC_SQ_SELECTOR_OFFSET: usize = 0; +pub const CYCLOTOMIC_SQ_INPUT_OFFSET: usize = CYCLOTOMIC_SQ_SELECTOR_OFFSET + 1; +pub const CYCLOTOMIC_SQ_T0_CALC_OFFSET: usize = CYCLOTOMIC_SQ_INPUT_OFFSET + 24 * 3 * 2; +pub const CYCLOTOMIC_SQ_T1_CALC_OFFSET: usize = CYCLOTOMIC_SQ_T0_CALC_OFFSET + FP4_SQ_TOTAL_COLUMNS; +pub const CYCLOTOMIC_SQ_T2_CALC_OFFSET: usize = CYCLOTOMIC_SQ_T1_CALC_OFFSET + FP4_SQ_TOTAL_COLUMNS; +pub const CYCLOTOMIC_SQ_T3_CALC_OFFSET: usize = CYCLOTOMIC_SQ_T2_CALC_OFFSET + FP4_SQ_TOTAL_COLUMNS; +pub const CYCLOTOMIC_SQ_T4_CALC_OFFSET: usize = + CYCLOTOMIC_SQ_T3_CALC_OFFSET + FP2_NON_RESIDUE_MUL_TOTAL; +pub const CYCLOTOMIC_SQ_T5_CALC_OFFSET: usize = CYCLOTOMIC_SQ_T4_CALC_OFFSET + + FP2_SUBTRACTION_TOTAL + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const CYCLOTOMIC_SQ_C0_CALC_OFFSET: usize = CYCLOTOMIC_SQ_T5_CALC_OFFSET + FP2_FP_TOTAL_COLUMNS; +pub const CYCLOTOMIC_SQ_T6_CALC_OFFSET: usize = CYCLOTOMIC_SQ_C0_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const CYCLOTOMIC_SQ_T7_CALC_OFFSET: usize = CYCLOTOMIC_SQ_T6_CALC_OFFSET + + FP2_SUBTRACTION_TOTAL + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const CYCLOTOMIC_SQ_C1_CALC_OFFSET: usize = CYCLOTOMIC_SQ_T7_CALC_OFFSET + FP2_FP_TOTAL_COLUMNS; +pub const CYCLOTOMIC_SQ_T8_CALC_OFFSET: usize = CYCLOTOMIC_SQ_C1_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const CYCLOTOMIC_SQ_T9_CALC_OFFSET: usize = CYCLOTOMIC_SQ_T8_CALC_OFFSET + + FP2_SUBTRACTION_TOTAL + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const CYCLOTOMIC_SQ_C2_CALC_OFFSET: usize = CYCLOTOMIC_SQ_T9_CALC_OFFSET + FP2_FP_TOTAL_COLUMNS; +pub const CYCLOTOMIC_SQ_T10_CALC_OFFSET: usize = CYCLOTOMIC_SQ_C2_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const CYCLOTOMIC_SQ_T11_CALC_OFFSET: usize = CYCLOTOMIC_SQ_T10_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const CYCLOTOMIC_SQ_C3_CALC_OFFSET: usize = + CYCLOTOMIC_SQ_T11_CALC_OFFSET + FP2_FP_TOTAL_COLUMNS; +pub const CYCLOTOMIC_SQ_T12_CALC_OFFSET: usize = CYCLOTOMIC_SQ_C3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const CYCLOTOMIC_SQ_T13_CALC_OFFSET: usize = CYCLOTOMIC_SQ_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const CYCLOTOMIC_SQ_C4_CALC_OFFSET: usize = + CYCLOTOMIC_SQ_T13_CALC_OFFSET + FP2_FP_TOTAL_COLUMNS; +pub const CYCLOTOMIC_SQ_T14_CALC_OFFSET: usize = CYCLOTOMIC_SQ_C4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const CYCLOTOMIC_SQ_T15_CALC_OFFSET: usize = CYCLOTOMIC_SQ_T14_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const CYCLOTOMIC_SQ_C5_CALC_OFFSET: usize = + CYCLOTOMIC_SQ_T15_CALC_OFFSET + FP2_FP_TOTAL_COLUMNS; +pub const CYCLOTOMIC_SQ_TOTAL_COLUMNS: usize = CYCLOTOMIC_SQ_C5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; + +// Cyclotomic exponent offsets +/* + These offsets are for cyclotomicExponent (super::native::Fp12::cyclotomicExponent) function. Needs 12*70 rows. The offsets are defined such that each 0 bit of the bls12-381 parameter takes 12 rows (one operation, cyclotomicSquare) and each 1 bit takes 12*2 rows (two operations, cyclotomicSquare and fp12 multiplication). + CYCLOTOMIC_EXP_START_ROW -> selector which is 1 for the first row of the trace. + FIRST_ROW_SELECTOR_OFFSET -> selector which is 1 for the starting row for each operation. Hence, every 12th row, it is set 1. + RES_ROW_SELECTOR_OFFSET -> selector which is 1 for the row which contains the final result of cyclotomicExponent. + BIT1_SELECTOR_OFFSET -> selector which is 1 for each 1 bit of bls12-381 parameter. It is set 1 for 12 rows continous rows. + INPUT_OFFSET -> offset where input for the function is set. + Z_OFFSET -> offset where result of the previous computation is stored. + Z_CYCLOTOMIC_SQ_OFFSET -> offset containing the computation for cyclotomicSquare function. + Z_MUL_INPUT_OFFSET -> offset containing the computation for fp12 multiplication. + + Z_CYCLOTMIC_SQ_OFFSET and Z_MUL_INPUT_OFFSET are equal because both the operations are never done in the same rows. In a single row, either cyclotomic square is being computed or fp12 multiplication is being computed. +*/ +pub const CYCLOTOMIC_EXP_SELECTOR_OFFSET: usize = 0; +pub const CYCLOTOMIC_EXP_START_ROW: usize = CYCLOTOMIC_EXP_SELECTOR_OFFSET + 1; +pub const FIRST_ROW_SELECTOR_OFFSET: usize = CYCLOTOMIC_EXP_START_ROW + 1; +pub const BIT1_SELECTOR_OFFSET: usize = FIRST_ROW_SELECTOR_OFFSET + 1; +pub const RES_ROW_SELECTOR_OFFSET: usize = BIT1_SELECTOR_OFFSET + 1; +pub const INPUT_OFFSET: usize = RES_ROW_SELECTOR_OFFSET + 1; +pub const Z_OFFSET: usize = INPUT_OFFSET + 24 * 3 * 2; +pub const Z_CYCLOTOMIC_SQ_OFFSET: usize = Z_OFFSET + 24 * 3 * 2; +pub const Z_MUL_INPUT_OFFSET: usize = Z_OFFSET + 24 * 3 * 2; +pub const CYCLOTOMIC_EXP_TOTAL_COLUMNS: usize = Z_MUL_INPUT_OFFSET + FP12_MUL_TOTAL_COLUMNS; + +// Forbenius map Fp12 +/* + These trace offsets are for forbenius_map (super::native::Fp12::forbenius_map) function. It needs 12 rows. + FP12_FORBENIUS_MAP_DIV_OFFSET -> offset which stores integer division power/12. + FP12_FORBENIUS_MAP_REM_OFFSET -> offset which stores power%12. + FP12_FORBENIUS_MAP_BIT0_OFFSET, FP12_FORBENIUS_MAP_BIT1_OFFSET, FP12_FORBENIUS_MAP_BIT2_OFFSET, FP12_FORBENIUS_MAP_BIT3_OFFSET -> offsets which store the bit decomposition of remainder (power%12). +*/ +pub const FP12_FORBENIUS_MAP_SELECTOR_OFFSET: usize = 0; +pub const FP12_FORBENIUS_MAP_INPUT_OFFSET: usize = FP12_FORBENIUS_MAP_SELECTOR_OFFSET + 1; +pub const FP12_FORBENIUS_MAP_POW_OFFSET: usize = FP12_FORBENIUS_MAP_INPUT_OFFSET + 24 * 3 * 2; +pub const FP12_FORBENIUS_MAP_DIV_OFFSET: usize = FP12_FORBENIUS_MAP_POW_OFFSET + 1; +pub const FP12_FORBENIUS_MAP_REM_OFFSET: usize = FP12_FORBENIUS_MAP_DIV_OFFSET + 1; +pub const FP12_FORBENIUS_MAP_BIT0_OFFSET: usize = FP12_FORBENIUS_MAP_REM_OFFSET + 1; +pub const FP12_FORBENIUS_MAP_BIT1_OFFSET: usize = FP12_FORBENIUS_MAP_BIT0_OFFSET + 1; +pub const FP12_FORBENIUS_MAP_BIT2_OFFSET: usize = FP12_FORBENIUS_MAP_BIT1_OFFSET + 1; +pub const FP12_FORBENIUS_MAP_BIT3_OFFSET: usize = FP12_FORBENIUS_MAP_BIT2_OFFSET + 1; +pub const FP12_FORBENIUS_MAP_R0_CALC_OFFSET: usize = FP12_FORBENIUS_MAP_BIT3_OFFSET + 1; +pub const FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET: usize = + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + FP6_FORBENIUS_MAP_TOTAL_COLUMNS; +pub const FP12_FORBENIUS_MAP_C0_CALC_OFFSET: usize = + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + FP6_FORBENIUS_MAP_TOTAL_COLUMNS; +pub const FP12_FORBENIUS_MAP_C1_CALC_OFFSET: usize = + FP12_FORBENIUS_MAP_C0_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const FP12_FORBENIUS_MAP_C2_CALC_OFFSET: usize = + FP12_FORBENIUS_MAP_C1_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const FP12_FORBENIUS_MAP_TOTAL_COLUMNS: usize = + FP12_FORBENIUS_MAP_C2_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; + +// Fp12 conjugate +/* + These trace offsets are for fp12 conjugate (super::native::Fp12::conjugate). It needs 1 row. +*/ +pub const FP12_CONJUGATE_INPUT_OFFSET: usize = 0; +pub const FP12_CONJUGATE_OUTPUT_OFFSET: usize = FP12_CONJUGATE_INPUT_OFFSET + 24 * 3 * 2; +pub const FP12_CONJUGATE_ADDITIION_OFFSET: usize = FP12_CONJUGATE_OUTPUT_OFFSET + 24 * 3 * 2; +pub const FP12_CONJUGATE_TOTAL: usize = FP12_CONJUGATE_ADDITIION_OFFSET + FP6_ADDITION_TOTAL; + +/// Fills trace of [multiplyBy014](super::native::Fp12::multiplyBy014) function. Input is 12\*12 limbs and three 12\*2 limbs. Needs 12 rows. +pub fn fill_trace_multiply_by_014, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp12, + o0: &Fp2, + o1: &Fp2, + o4: &Fp2, + start_row: usize, + end_row: usize, + start_col: usize, +) { + for row in start_row..end_row + 1 { + for i in 0..12 { + assign_u32_in_series( + trace, + row, + start_col + MULTIPLY_BY_014_INPUT_OFFSET + i * 12, + &x.0[i].0, + ); + } + for i in 0..2 { + assign_u32_in_series( + trace, + row, + start_col + MULTIPLY_BY_014_O0_OFFSET + i * 12, + &o0.0[i].0, + ); + assign_u32_in_series( + trace, + row, + start_col + MULTIPLY_BY_014_O1_OFFSET + i * 12, + &o1.0[i].0, + ); + assign_u32_in_series( + trace, + row, + start_col + MULTIPLY_BY_014_O4_OFFSET + i * 12, + &o4.0[i].0, + ); + } + trace[row][start_col + MULTIPLY_BY_014_SELECTOR_OFFSET] = F::ONE; + } + trace[end_row][start_col + MULTIPLY_BY_014_SELECTOR_OFFSET] = F::ZERO; + + let c0 = Fp6(x.0[..6].try_into().unwrap()); + let c1 = Fp6(x.0[6..].try_into().unwrap()); + + let t0 = c0.multiply_by_01(*o0, *o1); + fill_trace_multiply_by_01( + trace, + &c0, + o0, + o1, + start_row, + end_row, + start_col + MULTIPLY_BY_014_T0_CALC_OFFSET, + ); + let t1 = c1.multiply_by_1(*o4); + fill_trace_multiply_by_1( + trace, + &c1, + o4, + start_row, + end_row, + start_col + MULTIPLY_BY_014_T1_CALC_OFFSET, + ); + let t2 = mul_by_nonresidue(t1.0); + for row in start_row..end_row + 1 { + fill_trace_non_residue_multiplication_fp6( + trace, + &t1, + row, + start_col + MULTIPLY_BY_014_T2_CALC_OFFSET, + ); + } + let _x = t2 + t0; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction_fp6( + trace, + &t2, + &t0, + row, + start_col + MULTIPLY_BY_014_X_CALC_OFFSET, + ); + } + + let t3 = c0 + c1; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction_fp6( + trace, + &c0, + &c1, + row, + start_col + MULTIPLY_BY_014_T3_CALC_OFFSET, + ); + } + let t4 = (*o1) + (*o4); + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &o1.get_u32_slice(), + &o4.get_u32_slice(), + row, + start_col + MULTIPLY_BY_014_T4_CALC_OFFSET, + ); + } + let t5 = t3.multiply_by_01(*o0, t4); + fill_trace_multiply_by_01( + trace, + &t3, + o0, + &t4, + start_row, + end_row, + start_col + MULTIPLY_BY_014_T5_CALC_OFFSET, + ); + let t6 = t5 - t0; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction_fp6( + trace, + &t5, + &t0, + row, + start_col + MULTIPLY_BY_014_T6_CALC_OFFSET, + ); + } + let _y = t6 - t1; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction_fp6( + trace, + &t6, + &t1, + row, + start_col + MULTIPLY_BY_014_Y_CALC_OFFSET, + ); + } +} + +/// Fills stark trace for fp12 multiplication. Inputs are 12*12 limbs each. Needs 12 rows. +pub fn fill_trace_fp12_multiplication< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &Fp12, + y: &Fp12, + start_row: usize, + end_row: usize, + start_col: usize, +) { + for row in start_row..end_row + 1 { + for i in 0..12 { + assign_u32_in_series( + trace, + row, + start_col + FP12_MUL_X_INPUT_OFFSET + i * 12, + &x.0[i].0, + ); + assign_u32_in_series( + trace, + row, + start_col + FP12_MUL_Y_INPUT_OFFSET + i * 12, + &y.0[i].0, + ); + } + trace[row][start_col + FP12_MUL_SELECTOR_OFFSET] = F::ONE; + } + trace[end_row][start_col + FP12_MUL_SELECTOR_OFFSET] = F::ZERO; + let (c0, c1) = ( + Fp6(x.0[0..6].try_into().unwrap()), + Fp6(x.0[6..12].try_into().unwrap()), + ); + let (r0, r1) = ( + Fp6(y.0[0..6].try_into().unwrap()), + Fp6(y.0[6..12].try_into().unwrap()), + ); + let t0 = c0 * r0; + fill_trace_fp6_multiplication( + trace, + &c0, + &r0, + start_row, + end_row, + start_col + FP12_MUL_T0_CALC_OFFSET, + ); + let t1 = c1 * r1; + fill_trace_fp6_multiplication( + trace, + &c1, + &r1, + start_row, + end_row, + start_col + FP12_MUL_T1_CALC_OFFSET, + ); + let t2 = mul_by_nonresidue(t1.0); + for row in start_row..end_row + 1 { + fill_trace_non_residue_multiplication_fp6( + trace, + &t1, + row, + start_col + FP12_MUL_T2_CALC_OFFSET, + ); + } + let _x = t0 + t2; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction_fp6( + trace, + &t0, + &t2, + row, + start_col + FP12_MUL_X_CALC_OFFSET, + ); + } + + let t3 = c0 + c1; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction_fp6( + trace, + &c0, + &c1, + row, + start_col + FP12_MUL_T3_CALC_OFFSET, + ); + } + let t4 = r0 + r1; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction_fp6( + trace, + &r0, + &r1, + row, + start_col + FP12_MUL_T4_CALC_OFFSET, + ); + } + let t5 = t3 * t4; + fill_trace_fp6_multiplication( + trace, + &t3, + &t4, + start_row, + end_row, + start_col + FP12_MUL_T5_CALC_OFFSET, + ); + let t6 = t5 - t0; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction_fp6( + trace, + &t5, + &t0, + row, + start_col + FP12_MUL_T6_CALC_OFFSET, + ); + } + let _y = t6 - t1; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction_fp6( + trace, + &t6, + &t1, + row, + start_col + FP12_MUL_Y_CALC_OFFSET, + ); + } +} + +/// Fills trace of [cyclotomicSquare](super::native::Fp12::cyclotomicSquare) function. Input is 12*12 limbs. Needs 12 rows. +pub fn fill_trace_cyclotomic_sq, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp12, + start_row: usize, + end_row: usize, + start_col: usize, +) { + for row in start_row..end_row + 1 { + assign_u32_in_series( + trace, + row, + start_col + CYCLOTOMIC_SQ_INPUT_OFFSET, + &x.get_u32_slice().concat(), + ); + trace[row][start_col + CYCLOTOMIC_SQ_SELECTOR_OFFSET] = F::ONE; + } + trace[end_row][start_col + CYCLOTOMIC_SQ_SELECTOR_OFFSET] = F::ZERO; + let c0c0 = Fp2(x.0[0..2].try_into().unwrap()); + let c0c1 = Fp2(x.0[2..4].try_into().unwrap()); + let c0c2 = Fp2(x.0[4..6].try_into().unwrap()); + let c1c0 = Fp2(x.0[6..8].try_into().unwrap()); + let c1c1 = Fp2(x.0[8..10].try_into().unwrap()); + let c1c2 = Fp2(x.0[10..12].try_into().unwrap()); + let two = Fp::get_fp_from_biguint(BigUint::from(2 as u32)); + + let t0 = fp4_square(c0c0, c1c1); + fill_trace_fp4_sq( + trace, + &c0c0, + &c1c1, + start_row, + end_row, + start_col + CYCLOTOMIC_SQ_T0_CALC_OFFSET, + ); + + let t1 = fp4_square(c1c0, c0c2); + fill_trace_fp4_sq( + trace, + &c1c0, + &c0c2, + start_row, + end_row, + start_col + CYCLOTOMIC_SQ_T1_CALC_OFFSET, + ); + + let t2 = fp4_square(c0c1, c1c2); + fill_trace_fp4_sq( + trace, + &c0c1, + &c1c2, + start_row, + end_row, + start_col + CYCLOTOMIC_SQ_T2_CALC_OFFSET, + ); + + let t3 = t2.1.mul_by_nonresidue(); + for row in start_row..end_row + 1 { + fill_trace_non_residue_multiplication( + trace, + &t2.1.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_T3_CALC_OFFSET, + ); + } + + let t4 = t0.0 - c0c0; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t0.0.get_u32_slice(), + &c0c0.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_T4_CALC_OFFSET, + ); + } + let t5 = t4 * two; + fill_trace_fp2_fp_mul( + trace, + &t4.get_u32_slice(), + &two.0, + start_row, + end_row, + start_col + CYCLOTOMIC_SQ_T5_CALC_OFFSET, + ); + let _c0 = t5 + t0.0; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t5.get_u32_slice(), + &t0.0.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_C0_CALC_OFFSET, + ); + } + + let t6 = t1.0 - c0c1; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t1.0.get_u32_slice(), + &c0c1.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_T6_CALC_OFFSET, + ); + } + let t7 = t6 * two; + fill_trace_fp2_fp_mul( + trace, + &t6.get_u32_slice(), + &two.0, + start_row, + end_row, + start_col + CYCLOTOMIC_SQ_T7_CALC_OFFSET, + ); + let _c1 = t7 + t1.0; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t7.get_u32_slice(), + &t1.0.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_C1_CALC_OFFSET, + ); + } + + let t8 = t2.0 - c0c2; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t2.0.get_u32_slice(), + &c0c2.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_T8_CALC_OFFSET, + ); + } + let t9 = t8 * two; + fill_trace_fp2_fp_mul( + trace, + &t8.get_u32_slice(), + &two.0, + start_row, + end_row, + start_col + CYCLOTOMIC_SQ_T9_CALC_OFFSET, + ); + let _c2 = t9 + t2.0; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t9.get_u32_slice(), + &t2.0.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_C2_CALC_OFFSET, + ); + } + + let t10 = t3 + c1c0; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t3.get_u32_slice(), + &c1c0.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_T10_CALC_OFFSET, + ); + } + let t11 = t10 * two; + fill_trace_fp2_fp_mul( + trace, + &t10.get_u32_slice(), + &two.0, + start_row, + end_row, + start_col + CYCLOTOMIC_SQ_T11_CALC_OFFSET, + ); + let _c3 = t11 + t3; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t11.get_u32_slice(), + &t3.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_C3_CALC_OFFSET, + ); + } + + let t12 = t0.1 + c1c1; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t0.1.get_u32_slice(), + &c1c1.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_T12_CALC_OFFSET, + ); + } + let t13 = t12 * two; + fill_trace_fp2_fp_mul( + trace, + &t12.get_u32_slice(), + &two.0, + start_row, + end_row, + start_col + CYCLOTOMIC_SQ_T13_CALC_OFFSET, + ); + let _c4 = t13 + t0.1; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t13.get_u32_slice(), + &t0.1.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_C4_CALC_OFFSET, + ); + } + + let t14 = t1.1 + c1c2; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t1.1.get_u32_slice(), + &c1c2.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_T14_CALC_OFFSET, + ); + } + let t15 = t14 * two; + fill_trace_fp2_fp_mul( + trace, + &t14.get_u32_slice(), + &two.0, + start_row, + end_row, + start_col + CYCLOTOMIC_SQ_T15_CALC_OFFSET, + ); + let _c5 = t15 + t1.1; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t15.get_u32_slice(), + &t1.1.get_u32_slice(), + row, + start_col + CYCLOTOMIC_SQ_C5_CALC_OFFSET, + ); + } +} + +/// Fills trace of [cyclotomicExponent](super::native::Fp12::cyclotocmicExponent) function. Input is 12\*12 limbs. Needs 12\*70 rows. For each bit 0 of bls12-381 parameter, fills the trace for cyclotomicSquare computation. For each bit 1 of the bls12-381 parameter, fills trace for cyclotomic square computation in 12 rows, then fills the trace for fp12 multiplication computation in the next 12 rows and also sets `trace[row][start_col + BIT1_SELECTOR_OFFSET]` to 1 for these rows. After going through all bits of the bls12-381 parameter, fills the result in the next row's Z_OFFSET, while also setting RES_ROW_SELECTOR to 1. +pub fn fill_trace_cyclotomic_exp, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp12, + start_row: usize, + end_row: usize, + start_col: usize, +) { + for row in start_row..end_row + 1 { + assign_u32_in_series( + trace, + row, + start_col + INPUT_OFFSET, + &x.get_u32_slice().concat(), + ); + trace[row][start_col + CYCLOTOMIC_EXP_SELECTOR_OFFSET] = F::ONE; + } + trace[end_row][start_col + CYCLOTOMIC_EXP_SELECTOR_OFFSET] = F::ZERO; + trace[start_row][start_col + CYCLOTOMIC_EXP_START_ROW] = F::ONE; + let mut z = Fp12::one(); + let mut i = get_bls_12_381_parameter().bits() - 1; + let mut bitone = false; + assert_eq!(end_row + 1 - start_row, 70 * 12 + 1); + + for j in 0..70 { + let s_row = start_row + j * 12; + let e_row = s_row + 11; + for row in s_row..e_row + 1 { + if bitone { + trace[row][start_col + BIT1_SELECTOR_OFFSET] = F::ONE; + } + assign_u32_in_series( + trace, + row, + start_col + Z_OFFSET, + &z.get_u32_slice().concat(), + ); + } + trace[s_row][start_col + FIRST_ROW_SELECTOR_OFFSET] = F::ONE; + if bitone { + fill_trace_fp12_multiplication( + trace, + &z, + &x, + s_row, + e_row, + start_col + Z_MUL_INPUT_OFFSET, + ); + z = z * (*x); + } else { + fill_trace_cyclotomic_sq(trace, &z, s_row, e_row, start_col + Z_CYCLOTOMIC_SQ_OFFSET); + z = z.cyclotomic_square(); + } + if get_bls_12_381_parameter().bit(i) && !bitone { + bitone = true; + } else if j < 69 { + i -= 1; + bitone = false; + } + } + trace[start_row + 70 * 12][start_col + RES_ROW_SELECTOR_OFFSET] = F::ONE; + assign_u32_in_series( + trace, + start_row + 70 * 12, + start_col + Z_OFFSET, + &z.get_u32_slice().concat(), + ); +} + +/// Fills trace of [forbenius_map](super::native::Fp12::forbenius_map) function. Input is 12*12 limbs and usize. Needs 12 rows. +pub fn fill_trace_fp12_forbenius_map< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &Fp12, + pow: usize, + start_row: usize, + end_row: usize, + start_col: usize, +) { + let div = pow / 12; + let rem = pow % 12; + for row in start_row..end_row + 1 { + assign_u32_in_series( + trace, + row, + start_col + FP12_FORBENIUS_MAP_INPUT_OFFSET, + &x.get_u32_slice().concat(), + ); + trace[row][start_col + FP12_FORBENIUS_MAP_SELECTOR_OFFSET] = F::ONE; + trace[row][start_col + FP12_FORBENIUS_MAP_POW_OFFSET] = F::from_canonical_usize(pow); + trace[row][start_col + FP12_FORBENIUS_MAP_DIV_OFFSET] = F::from_canonical_usize(div); + trace[row][start_col + FP12_FORBENIUS_MAP_REM_OFFSET] = F::from_canonical_usize(rem); + trace[row][start_col + FP12_FORBENIUS_MAP_BIT0_OFFSET] = F::from_canonical_usize(rem & 1); + trace[row][start_col + FP12_FORBENIUS_MAP_BIT1_OFFSET] = + F::from_canonical_usize((rem >> 1) & 1); + trace[row][start_col + FP12_FORBENIUS_MAP_BIT2_OFFSET] = + F::from_canonical_usize((rem >> 2) & 1); + trace[row][start_col + FP12_FORBENIUS_MAP_BIT3_OFFSET] = F::from_canonical_usize(rem >> 3); + } + trace[end_row][start_col + FP12_FORBENIUS_MAP_SELECTOR_OFFSET] = F::ZERO; + let r0 = Fp6(x.0[0..6].to_vec().try_into().unwrap()); + let r1 = Fp6(x.0[6..12].to_vec().try_into().unwrap()); + let _x = r0.forbenius_map(pow); + fill_trace_fp6_forbenius_map( + trace, + &r0, + pow, + start_row, + end_row, + start_col + FP12_FORBENIUS_MAP_R0_CALC_OFFSET, + ); + let c0c1c2 = r1.forbenius_map(pow); + fill_trace_fp6_forbenius_map( + trace, + &r1, + pow, + start_row, + end_row, + start_col + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET, + ); + let c0 = Fp2(c0c1c2.0[0..2].to_vec().try_into().unwrap()); + let c1 = Fp2(c0c1c2.0[2..4].to_vec().try_into().unwrap()); + let c2 = Fp2(c0c1c2.0[4..6].to_vec().try_into().unwrap()); + let forbenius_coefficients = Fp12::forbenius_coefficients(); + let coeff = forbenius_coefficients[pow % 12]; + generate_trace_fp2_mul( + trace, + c0.get_u32_slice(), + coeff.get_u32_slice(), + start_row, + end_row, + start_col + FP12_FORBENIUS_MAP_C0_CALC_OFFSET, + ); + generate_trace_fp2_mul( + trace, + c1.get_u32_slice(), + coeff.get_u32_slice(), + start_row, + end_row, + start_col + FP12_FORBENIUS_MAP_C1_CALC_OFFSET, + ); + generate_trace_fp2_mul( + trace, + c2.get_u32_slice(), + coeff.get_u32_slice(), + start_row, + end_row, + start_col + FP12_FORBENIUS_MAP_C2_CALC_OFFSET, + ); +} + +/// Fill trace of [conjugate](super::native::Fp12::conjugate) function. Input is 12*12 limbs. Needs 1 row. +pub fn fill_trace_fp12_conjugate, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp12, + row: usize, + start_col: usize, +) { + assign_u32_in_series( + trace, + row, + start_col + FP12_CONJUGATE_INPUT_OFFSET, + &x.get_u32_slice().concat(), + ); + let conjugate = x.conjugate(); + assign_u32_in_series( + trace, + row, + start_col + FP12_CONJUGATE_OUTPUT_OFFSET, + &conjugate.get_u32_slice().concat(), + ); + let x_fp6 = Fp6(x.0[6..12].try_into().unwrap()); + let conjugat_fp6 = Fp6(conjugate.0[6..12].try_into().unwrap()); + fill_trace_addition_fp6( + trace, + &x_fp6.get_u32_slice(), + &conjugat_fp6.get_u32_slice(), + row, + start_col + FP12_CONJUGATE_ADDITIION_OFFSET, + ); +} + +/// Constraints [multiplyBy014](super::native::Fp12::multiplyBy014) function. +/// +/// Constraints inputs across this and next row, wherever selector is set to on. Constraints all the Ti's (defined in the native function) accordinng to their respective operations. +pub fn add_multiply_by_014_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for i in 0..12 { + for j in 0..12 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_014_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_BY_014_INPUT_OFFSET + j * 12 + i] + - next_values[start_col + MULTIPLY_BY_014_INPUT_OFFSET + j * 12 + i]), + ); + } + for j in 0..2 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_014_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_BY_014_O0_OFFSET + j * 12 + i] + - next_values[start_col + MULTIPLY_BY_014_O0_OFFSET + j * 12 + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_014_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_BY_014_O1_OFFSET + j * 12 + i] + - next_values[start_col + MULTIPLY_BY_014_O1_OFFSET + j * 12 + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_014_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_BY_014_O4_OFFSET + j * 12 + i] + - next_values[start_col + MULTIPLY_BY_014_O4_OFFSET + j * 12 + i]), + ); + } + } + + // T0 + for i in 0..12 { + for j in 0..6 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + MULTIPLY_BY_01_SELECTOR_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + MULTIPLY_BY_01_INPUT_OFFSET + + j * 12 + + i] + - local_values[start_col + MULTIPLY_BY_014_INPUT_OFFSET + j * 12 + i]), + ); + } + for j in 0..2 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + MULTIPLY_BY_01_SELECTOR_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + MULTIPLY_BY_01_B0_OFFSET + + j * 12 + + i] + - local_values[start_col + MULTIPLY_BY_014_O0_OFFSET + j * 12 + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + MULTIPLY_BY_01_SELECTOR_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + MULTIPLY_BY_01_B1_OFFSET + + j * 12 + + i] + - local_values[start_col + MULTIPLY_BY_014_O1_OFFSET + j * 12 + i]), + ); + } + } + add_multiply_by_01_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_BY_014_T0_CALC_OFFSET, + bit_selector, + ); + + // T1 + for i in 0..12 { + for j in 0..6 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T1_CALC_OFFSET + + MULTIPLY_BY_1_SELECTOR_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T1_CALC_OFFSET + + MULTIPLY_BY_1_INPUT_OFFSET + + j * 12 + + i] + - local_values + [start_col + MULTIPLY_BY_014_INPUT_OFFSET + j * 12 + i + 24 * 3]), + ); + } + for j in 0..2 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T1_CALC_OFFSET + + MULTIPLY_BY_1_SELECTOR_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T1_CALC_OFFSET + + MULTIPLY_BY_1_B1_OFFSET + + j * 12 + + i] + - local_values[start_col + MULTIPLY_BY_014_O4_OFFSET + j * 12 + i]), + ); + } + } + add_multiply_by_1_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_BY_014_T1_CALC_OFFSET, + bit_selector, + ); + + // T2 + for j in 0..2 { + let (x_offset, yz_offset) = if j == 0 { + (FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET, Z1_REDUCE_OFFSET) + } else { + (FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET, Z2_REDUCE_OFFSET) + }; + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + j * 12] + - local_values[start_col + + MULTIPLY_BY_014_T1_CALC_OFFSET + + MULTIPLY_BY_1_X_CALC_OFFSET + + x_offset + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + j * 12 + + 24] + - local_values[start_col + + MULTIPLY_BY_014_T1_CALC_OFFSET + + MULTIPLY_BY_1_Y_CALC_OFFSET + + yz_offset + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + j * 12 + + 48] + - local_values[start_col + + MULTIPLY_BY_014_T1_CALC_OFFSET + + MULTIPLY_BY_1_Z_CALC_OFFSET + + yz_offset + + REDUCED_OFFSET + + i]), + ); + } + } + add_non_residue_multiplication_fp6_constraints( + local_values, + yield_constr, + start_col + MULTIPLY_BY_014_T2_CALC_OFFSET, + bit_selector, + ); + + // X + for j in 0..6 { + let (addition_offset, x_offset, y_offset) = if j == 0 { + ( + FP6_ADDITION_0_OFFSET + FP2_ADDITION_0_OFFSET, + FP6_NON_RESIDUE_MUL_C2 + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET, + MULTIPLY_BY_01_X_CALC_OFFSET + FP2_ADDITION_TOTAL, + ) + } else if j == 1 { + ( + FP6_ADDITION_0_OFFSET + FP2_ADDITION_1_OFFSET, + FP6_NON_RESIDUE_MUL_C2 + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET, + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL, + ) + } else if j == 2 { + ( + FP6_ADDITION_1_OFFSET + FP2_ADDITION_0_OFFSET, + FP6_NON_RESIDUE_MUL_INPUT_OFFSET, + MULTIPLY_BY_01_Y_CALC_OFFSET + FP2_ADDITION_TOTAL + FP2_SUBTRACTION_TOTAL, + ) + } else if j == 3 { + ( + FP6_ADDITION_1_OFFSET + FP2_ADDITION_1_OFFSET, + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + 12, + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL, + ) + } else if j == 4 { + ( + FP6_ADDITION_2_OFFSET + FP2_ADDITION_0_OFFSET, + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + 24, + MULTIPLY_BY_01_Z_CALC_OFFSET + FP2_ADDITION_TOTAL, + ) + } else { + ( + FP6_ADDITION_2_OFFSET + FP2_ADDITION_1_OFFSET, + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + 36, + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL, + ) + }; + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_X_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_X_CALC_OFFSET + + addition_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + MULTIPLY_BY_014_T2_CALC_OFFSET + x_offset + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_X_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_X_CALC_OFFSET + + addition_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + y_offset + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + add_addition_with_reduction_constranints_fp6( + local_values, + yield_constr, + start_col + MULTIPLY_BY_014_X_CALC_OFFSET, + bit_selector, + ); + + // T3 + for j in 0..3 { + let mut addition_offset = if j == 0 { + FP6_ADDITION_0_OFFSET + } else if j == 1 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + for k in 0..2 { + addition_offset += if k == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T3_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T3_CALC_OFFSET + + addition_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values + [start_col + MULTIPLY_BY_014_INPUT_OFFSET + j * 24 + k * 12 + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T3_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T3_CALC_OFFSET + + addition_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_014_INPUT_OFFSET + + j * 24 + + k * 12 + + i + + 24 * 3]), + ); + } + } + } + add_addition_with_reduction_constranints_fp6( + local_values, + yield_constr, + start_col + MULTIPLY_BY_014_T3_CALC_OFFSET, + bit_selector, + ); + + // T4 + for j in 0..2 { + let addition_offset = if j == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T4_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T4_CALC_OFFSET + + addition_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + MULTIPLY_BY_014_O1_OFFSET + j * 12 + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T4_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T4_CALC_OFFSET + + addition_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + MULTIPLY_BY_014_O4_OFFSET + j * 12 + i]), + ); + } + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + MULTIPLY_BY_014_T4_CALC_OFFSET, + bit_selector, + ); + + // T5 + for i in 0..12 { + for j in 0..6 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T5_CALC_OFFSET + + MULTIPLY_BY_01_SELECTOR_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T5_CALC_OFFSET + + MULTIPLY_BY_01_INPUT_OFFSET + + j * 12 + + i] + - local_values[start_col + + MULTIPLY_BY_014_T3_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + for j in 0..2 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T5_CALC_OFFSET + + MULTIPLY_BY_01_SELECTOR_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T5_CALC_OFFSET + + MULTIPLY_BY_01_B0_OFFSET + + j * 12 + + i] + - local_values[start_col + MULTIPLY_BY_014_O0_OFFSET + j * 12 + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T5_CALC_OFFSET + + MULTIPLY_BY_01_SELECTOR_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T5_CALC_OFFSET + + MULTIPLY_BY_01_B1_OFFSET + + j * 12 + + i] + - local_values[start_col + + MULTIPLY_BY_014_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + add_multiply_by_01_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_BY_014_T5_CALC_OFFSET, + bit_selector, + ); + + // T6 + for j in 0..3 { + let (mut addition_offset, mut subtraction_offset, input_offset) = if j == 0 { + ( + FP6_ADDITION_0_OFFSET, + FP6_SUBTRACTION_0_OFFSET, + MULTIPLY_BY_01_X_CALC_OFFSET + FP2_ADDITION_TOTAL, + ) + } else if j == 1 { + ( + FP6_ADDITION_1_OFFSET, + FP6_SUBTRACTION_1_OFFSET, + MULTIPLY_BY_01_Y_CALC_OFFSET + FP2_ADDITION_TOTAL + FP2_SUBTRACTION_TOTAL, + ) + } else { + ( + FP6_ADDITION_2_OFFSET, + FP6_SUBTRACTION_2_OFFSET, + MULTIPLY_BY_01_Z_CALC_OFFSET + FP2_ADDITION_TOTAL, + ) + }; + for k in 0..2 { + addition_offset += if k == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + subtraction_offset += if k == 0 { + FP2_SUBTRACTION_0_OFFSET + } else { + FP2_SUBTRACTION_1_OFFSET + }; + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T6_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T6_CALC_OFFSET + + addition_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_014_T5_CALC_OFFSET + + input_offset + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * k + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + subtraction_offset + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + subtraction_offset + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + input_offset + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * k + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + } + add_subtraction_with_reduction_constranints_fp6( + local_values, + yield_constr, + start_col + MULTIPLY_BY_014_T6_CALC_OFFSET, + bit_selector, + ); + + // Y + for j in 0..6 { + let (addition_offset, subtraction_offset, input_offset) = if j == 0 { + ( + FP6_ADDITION_0_OFFSET + FP2_ADDITION_0_OFFSET, + FP6_SUBTRACTION_0_OFFSET + FP2_SUBTRACTION_0_OFFSET, + MULTIPLY_BY_1_X_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET, + ) + } else if j == 1 { + ( + FP6_ADDITION_0_OFFSET + FP2_ADDITION_1_OFFSET, + FP6_SUBTRACTION_0_OFFSET + FP2_SUBTRACTION_1_OFFSET, + MULTIPLY_BY_1_X_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET, + ) + } else if j == 2 { + ( + FP6_ADDITION_1_OFFSET + FP2_ADDITION_0_OFFSET, + FP6_SUBTRACTION_1_OFFSET + FP2_SUBTRACTION_0_OFFSET, + MULTIPLY_BY_1_Y_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET, + ) + } else if j == 3 { + ( + FP6_ADDITION_1_OFFSET + FP2_ADDITION_1_OFFSET, + FP6_SUBTRACTION_1_OFFSET + FP2_SUBTRACTION_1_OFFSET, + MULTIPLY_BY_1_Y_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET, + ) + } else if j == 4 { + ( + FP6_ADDITION_2_OFFSET + FP2_ADDITION_0_OFFSET, + FP6_SUBTRACTION_2_OFFSET + FP2_SUBTRACTION_0_OFFSET, + MULTIPLY_BY_1_Z_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET, + ) + } else { + ( + FP6_ADDITION_2_OFFSET + FP2_ADDITION_1_OFFSET, + FP6_SUBTRACTION_2_OFFSET + FP2_SUBTRACTION_1_OFFSET, + MULTIPLY_BY_1_Z_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET, + ) + }; + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_Y_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_Y_CALC_OFFSET + + addition_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_014_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + subtraction_offset + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + subtraction_offset + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values + [start_col + MULTIPLY_BY_014_T1_CALC_OFFSET + input_offset + i]), + ); + } + } + add_subtraction_with_reduction_constranints_fp6( + local_values, + yield_constr, + start_col + MULTIPLY_BY_014_Y_CALC_OFFSET, + bit_selector, + ); +} + +pub fn add_multiply_by_014_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..12 { + let mul_tmp = local_values[start_col + MULTIPLY_BY_014_SELECTOR_OFFSET]; + for j in 0..12 { + let sub_tmp = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_014_INPUT_OFFSET + j * 12 + i], + next_values[start_col + MULTIPLY_BY_014_INPUT_OFFSET + j * 12 + i], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint_transition(builder, c); + } + for j in 0..2 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_014_O0_OFFSET + j * 12 + i], + next_values[start_col + MULTIPLY_BY_014_O0_OFFSET + j * 12 + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint_transition(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_014_O1_OFFSET + j * 12 + i], + next_values[start_col + MULTIPLY_BY_014_O1_OFFSET + j * 12 + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint_transition(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_014_O4_OFFSET + j * 12 + i], + next_values[start_col + MULTIPLY_BY_014_O4_OFFSET + j * 12 + i], + ); + let c3 = builder.mul_extension(sub_tmp3, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint_transition(builder, c); + } + } + + // T0 + for i in 0..12 { + let mul_tmp = local_values + [start_col + MULTIPLY_BY_014_T0_CALC_OFFSET + MULTIPLY_BY_01_SELECTOR_OFFSET]; + for j in 0..6 { + let sub_tmp = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + MULTIPLY_BY_01_INPUT_OFFSET + + j * 12 + + i], + local_values[start_col + MULTIPLY_BY_014_INPUT_OFFSET + j * 12 + i], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + for j in 0..2 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + MULTIPLY_BY_01_B0_OFFSET + + j * 12 + + i], + local_values[start_col + MULTIPLY_BY_014_O0_OFFSET + j * 12 + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + MULTIPLY_BY_01_B1_OFFSET + + j * 12 + + i], + local_values[start_col + MULTIPLY_BY_014_O1_OFFSET + j * 12 + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + add_multiply_by_01_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_BY_014_T0_CALC_OFFSET, + bit_selector, + ); + + // T1 + for i in 0..12 { + let mul_tmp = local_values + [start_col + MULTIPLY_BY_014_T1_CALC_OFFSET + MULTIPLY_BY_1_SELECTOR_OFFSET]; + for j in 0..6 { + let sub_tmp = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T1_CALC_OFFSET + + MULTIPLY_BY_1_INPUT_OFFSET + + j * 12 + + i], + local_values[start_col + MULTIPLY_BY_014_INPUT_OFFSET + j * 12 + i + 24 * 3], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + for j in 0..2 { + let sub_tmp = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T1_CALC_OFFSET + + MULTIPLY_BY_1_B1_OFFSET + + j * 12 + + i], + local_values[start_col + MULTIPLY_BY_014_O4_OFFSET + j * 12 + i], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + } + add_multiply_by_1_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_BY_014_T1_CALC_OFFSET, + bit_selector, + ); + + // T2 + for j in 0..2 { + let (x_offset, yz_offset) = if j == 0 { + (FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET, Z1_REDUCE_OFFSET) + } else { + (FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET, Z2_REDUCE_OFFSET) + }; + for i in 0..12 { + let mul_tmp = local_values + [start_col + MULTIPLY_BY_014_T2_CALC_OFFSET + FP6_NON_RESIDUE_MUL_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + j * 12], + local_values[start_col + + MULTIPLY_BY_014_T1_CALC_OFFSET + + MULTIPLY_BY_1_X_CALC_OFFSET + + x_offset + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + j * 12 + + 24], + local_values[start_col + + MULTIPLY_BY_014_T1_CALC_OFFSET + + MULTIPLY_BY_1_Y_CALC_OFFSET + + yz_offset + + REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + j * 12 + + 48], + local_values[start_col + + MULTIPLY_BY_014_T1_CALC_OFFSET + + MULTIPLY_BY_1_Z_CALC_OFFSET + + yz_offset + + REDUCED_OFFSET + + i], + ); + let c3 = builder.mul_extension(sub_tmp3, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + } + } + add_non_residue_multiplication_fp6_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_014_T2_CALC_OFFSET, + bit_selector, + ); + + // X + for j in 0..6 { + let (addition_offset, x_offset, y_offset) = if j == 0 { + ( + FP6_ADDITION_0_OFFSET + FP2_ADDITION_0_OFFSET, + FP6_NON_RESIDUE_MUL_C2 + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET, + MULTIPLY_BY_01_X_CALC_OFFSET + FP2_ADDITION_TOTAL, + ) + } else if j == 1 { + ( + FP6_ADDITION_0_OFFSET + FP2_ADDITION_1_OFFSET, + FP6_NON_RESIDUE_MUL_C2 + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET, + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL, + ) + } else if j == 2 { + ( + FP6_ADDITION_1_OFFSET + FP2_ADDITION_0_OFFSET, + FP6_NON_RESIDUE_MUL_INPUT_OFFSET, + MULTIPLY_BY_01_Y_CALC_OFFSET + FP2_ADDITION_TOTAL + FP2_SUBTRACTION_TOTAL, + ) + } else if j == 3 { + ( + FP6_ADDITION_1_OFFSET + FP2_ADDITION_1_OFFSET, + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + 12, + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL, + ) + } else if j == 4 { + ( + FP6_ADDITION_2_OFFSET + FP2_ADDITION_0_OFFSET, + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + 24, + MULTIPLY_BY_01_Z_CALC_OFFSET + FP2_ADDITION_TOTAL, + ) + } else { + ( + FP6_ADDITION_2_OFFSET + FP2_ADDITION_1_OFFSET, + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + 36, + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL, + ) + }; + for i in 0..12 { + let mul_tmp = local_values[start_col + + MULTIPLY_BY_014_X_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_X_CALC_OFFSET + + addition_offset + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_014_T2_CALC_OFFSET + x_offset + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_X_CALC_OFFSET + + addition_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + y_offset + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_fp6_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_014_X_CALC_OFFSET, + bit_selector, + ); + + // T3 + for j in 0..3 { + let mut addition_offset = if j == 0 { + FP6_ADDITION_0_OFFSET + } else if j == 1 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + for k in 0..2 { + addition_offset += if k == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + for i in 0..12 { + let mul_tmp = local_values[start_col + + MULTIPLY_BY_014_T3_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T3_CALC_OFFSET + + addition_offset + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_014_INPUT_OFFSET + j * 24 + k * 12 + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T3_CALC_OFFSET + + addition_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_014_INPUT_OFFSET + j * 24 + k * 12 + i + 24 * 3], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + } + add_addition_with_reduction_constraints_fp6_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_014_T3_CALC_OFFSET, + bit_selector, + ); + + // T4 + for j in 0..2 { + let addition_offset = if j == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + for i in 0..12 { + let mul_tmp = local_values[start_col + + MULTIPLY_BY_014_T4_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T4_CALC_OFFSET + + addition_offset + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_014_O1_OFFSET + j * 12 + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T4_CALC_OFFSET + + addition_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_014_O4_OFFSET + j * 12 + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_014_T4_CALC_OFFSET, + bit_selector, + ); + + // T5 + for i in 0..12 { + let mul_tmp = local_values + [start_col + MULTIPLY_BY_014_T5_CALC_OFFSET + MULTIPLY_BY_01_SELECTOR_OFFSET]; + for j in 0..6 { + let sub_tmp = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T5_CALC_OFFSET + + MULTIPLY_BY_01_INPUT_OFFSET + + j * 12 + + i], + local_values[start_col + + MULTIPLY_BY_014_T3_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + for j in 0..2 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T5_CALC_OFFSET + + MULTIPLY_BY_01_B0_OFFSET + + j * 12 + + i], + local_values[start_col + MULTIPLY_BY_014_O0_OFFSET + j * 12 + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T5_CALC_OFFSET + + MULTIPLY_BY_01_B1_OFFSET + + j * 12 + + i], + local_values[start_col + + MULTIPLY_BY_014_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + add_multiply_by_01_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_BY_014_T5_CALC_OFFSET, + bit_selector, + ); + + // T6 + for j in 0..3 { + let (mut addition_offset, mut subtraction_offset, input_offset) = if j == 0 { + ( + FP6_ADDITION_0_OFFSET, + FP6_SUBTRACTION_0_OFFSET, + MULTIPLY_BY_01_X_CALC_OFFSET + FP2_ADDITION_TOTAL, + ) + } else if j == 1 { + ( + FP6_ADDITION_1_OFFSET, + FP6_SUBTRACTION_1_OFFSET, + MULTIPLY_BY_01_Y_CALC_OFFSET + FP2_ADDITION_TOTAL + FP2_SUBTRACTION_TOTAL, + ) + } else { + ( + FP6_ADDITION_2_OFFSET, + FP6_SUBTRACTION_2_OFFSET, + MULTIPLY_BY_01_Z_CALC_OFFSET + FP2_ADDITION_TOTAL, + ) + }; + for k in 0..2 { + addition_offset += if k == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + subtraction_offset += if k == 0 { + FP2_SUBTRACTION_0_OFFSET + } else { + FP2_SUBTRACTION_1_OFFSET + }; + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T6_CALC_OFFSET + + addition_offset + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + MULTIPLY_BY_014_T5_CALC_OFFSET + + input_offset + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * k + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + MULTIPLY_BY_014_T6_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + subtraction_offset + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values[start_col + + MULTIPLY_BY_014_T0_CALC_OFFSET + + input_offset + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * k + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + MULTIPLY_BY_014_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + subtraction_offset + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + } + add_subtraction_with_reduction_constraints_fp6_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_014_T6_CALC_OFFSET, + bit_selector, + ); + + // Y + for j in 0..6 { + let (addition_offset, subtraction_offset, input_offset) = if j == 0 { + ( + FP6_ADDITION_0_OFFSET + FP2_ADDITION_0_OFFSET, + FP6_SUBTRACTION_0_OFFSET + FP2_SUBTRACTION_0_OFFSET, + MULTIPLY_BY_1_X_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET, + ) + } else if j == 1 { + ( + FP6_ADDITION_0_OFFSET + FP2_ADDITION_1_OFFSET, + FP6_SUBTRACTION_0_OFFSET + FP2_SUBTRACTION_1_OFFSET, + MULTIPLY_BY_1_X_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET, + ) + } else if j == 2 { + ( + FP6_ADDITION_1_OFFSET + FP2_ADDITION_0_OFFSET, + FP6_SUBTRACTION_1_OFFSET + FP2_SUBTRACTION_0_OFFSET, + MULTIPLY_BY_1_Y_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET, + ) + } else if j == 3 { + ( + FP6_ADDITION_1_OFFSET + FP2_ADDITION_1_OFFSET, + FP6_SUBTRACTION_1_OFFSET + FP2_SUBTRACTION_1_OFFSET, + MULTIPLY_BY_1_Y_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET, + ) + } else if j == 4 { + ( + FP6_ADDITION_2_OFFSET + FP2_ADDITION_0_OFFSET, + FP6_SUBTRACTION_2_OFFSET + FP2_SUBTRACTION_0_OFFSET, + MULTIPLY_BY_1_Z_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET, + ) + } else { + ( + FP6_ADDITION_2_OFFSET + FP2_ADDITION_1_OFFSET, + FP6_SUBTRACTION_2_OFFSET + FP2_SUBTRACTION_1_OFFSET, + MULTIPLY_BY_1_Z_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET, + ) + }; + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_Y_CALC_OFFSET + + addition_offset + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + MULTIPLY_BY_014_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + MULTIPLY_BY_014_Y_CALC_OFFSET + + addition_offset + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + subtraction_offset + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_014_T1_CALC_OFFSET + input_offset + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + subtraction_offset + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + add_subtraction_with_reduction_constraints_fp6_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_014_Y_CALC_OFFSET, + bit_selector, + ); +} + +/// Constraints fp12 multiplication. +/// +/// Constraints inputs across this and next row, wherever selector is set to on. Constraints all the Ti's (defined in the [function](super::native::mul_fp_12)) accordinng to their respective operations. +pub fn add_fp12_multiplication_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for i in 0..24 * 3 * 2 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + FP12_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP12_MUL_X_INPUT_OFFSET + i] + - next_values[start_col + FP12_MUL_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + FP12_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP12_MUL_Y_INPUT_OFFSET + i] + - next_values[start_col + FP12_MUL_Y_INPUT_OFFSET + i]), + ); + } + + // T0 + for i in 0..24 * 3 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP12_MUL_T0_CALC_OFFSET + FP6_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP12_MUL_T0_CALC_OFFSET + FP6_MUL_X_INPUT_OFFSET + i] + - local_values[start_col + FP12_MUL_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP12_MUL_T0_CALC_OFFSET + FP6_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP12_MUL_T0_CALC_OFFSET + FP6_MUL_Y_INPUT_OFFSET + i] + - local_values[start_col + FP12_MUL_Y_INPUT_OFFSET + i]), + ); + } + add_fp6_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + FP12_MUL_T0_CALC_OFFSET, + bit_selector, + ); + + // T1 + for i in 0..24 * 3 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP12_MUL_T1_CALC_OFFSET + FP6_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP12_MUL_T1_CALC_OFFSET + FP6_MUL_X_INPUT_OFFSET + i] + - local_values[start_col + FP12_MUL_X_INPUT_OFFSET + i + 24 * 3]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP12_MUL_T1_CALC_OFFSET + FP6_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP12_MUL_T1_CALC_OFFSET + FP6_MUL_Y_INPUT_OFFSET + i] + - local_values[start_col + FP12_MUL_Y_INPUT_OFFSET + i + 24 * 3]), + ); + } + add_fp6_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + FP12_MUL_T1_CALC_OFFSET, + bit_selector, + ); + + // T2 + for i in 0..6 { + let fp2_offset = if i < 2 { + FP6_MUL_X_CALC_OFFSET + } else if i < 4 { + FP6_MUL_Y_CALC_OFFSET + } else { + FP6_MUL_Z_CALC_OFFSET + }; + let fp_offset = i % 2; + for j in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + FP12_MUL_T2_CALC_OFFSET + FP6_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + FP12_MUL_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + + i * 12 + + j] + - local_values[start_col + + FP12_MUL_T1_CALC_OFFSET + + fp2_offset + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * fp_offset + + FP_SINGLE_REDUCED_OFFSET + + j]), + ); + } + } + add_non_residue_multiplication_fp6_constraints( + local_values, + yield_constr, + start_col + FP12_MUL_T2_CALC_OFFSET, + bit_selector, + ); + + // X + for i in 0..6 { + let (fp2_offset_l, fp2_offset_r) = if i < 2 { + (FP6_ADDITION_0_OFFSET, FP6_MUL_X_CALC_OFFSET) + } else if i < 4 { + (FP6_ADDITION_1_OFFSET, FP6_MUL_Y_CALC_OFFSET) + } else { + (FP6_ADDITION_2_OFFSET, FP6_MUL_Z_CALC_OFFSET) + }; + let (fp_offset, num_redn) = if i % 2 == 0 { + (FP2_ADDITION_0_OFFSET, 0) + } else { + (FP2_ADDITION_1_OFFSET, 1) + }; + for j in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP12_MUL_X_CALC_OFFSET + + fp2_offset_l + + fp_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP12_MUL_X_CALC_OFFSET + + fp2_offset_l + + fp_offset + + FP_ADDITION_X_OFFSET + + j] + - local_values[start_col + + FP12_MUL_T0_CALC_OFFSET + + fp2_offset_r + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * num_redn + + FP_SINGLE_REDUCED_OFFSET + + j]), + ); + if i < 2 { + let y_offset = if i == 0 { + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + } else { + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + }; + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP12_MUL_X_CALC_OFFSET + + fp2_offset_l + + fp_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP12_MUL_X_CALC_OFFSET + + fp2_offset_l + + fp_offset + + FP_ADDITION_Y_OFFSET + + j] + - local_values[start_col + + FP12_MUL_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_C2 + + y_offset + + FP_SINGLE_REDUCED_OFFSET + + j]), + ) + } else { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP12_MUL_X_CALC_OFFSET + + fp2_offset_l + + fp_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP12_MUL_X_CALC_OFFSET + + fp2_offset_l + + fp_offset + + FP_ADDITION_Y_OFFSET + + j] + - local_values[start_col + + FP12_MUL_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + + (i - 2) * 12 + + j]), + ); + } + } + } + add_addition_with_reduction_constranints_fp6( + local_values, + yield_constr, + start_col + FP12_MUL_X_CALC_OFFSET, + bit_selector, + ); + + // T3 + for i in 0..6 { + let fp2_offset = if i < 2 { + FP6_ADDITION_0_OFFSET + } else if i < 4 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + let fp_offset = if i % 2 == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + for j in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP12_MUL_T3_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP12_MUL_T3_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_X_OFFSET + + j] + - local_values[start_col + FP12_MUL_X_INPUT_OFFSET + i * 12 + j]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP12_MUL_T3_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP12_MUL_T3_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_Y_OFFSET + + j] + - local_values[start_col + FP12_MUL_X_INPUT_OFFSET + i * 12 + j + 24 * 3]), + ); + } + } + add_addition_with_reduction_constranints_fp6( + local_values, + yield_constr, + start_col + FP12_MUL_T3_CALC_OFFSET, + bit_selector, + ); + + // T4 + for i in 0..6 { + let fp2_offset = if i < 2 { + FP6_ADDITION_0_OFFSET + } else if i < 4 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + let fp_offset = if i % 2 == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + for j in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP12_MUL_T4_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP12_MUL_T4_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_X_OFFSET + + j] + - local_values[start_col + FP12_MUL_Y_INPUT_OFFSET + i * 12 + j]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP12_MUL_T4_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP12_MUL_T4_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_Y_OFFSET + + j] + - local_values[start_col + FP12_MUL_Y_INPUT_OFFSET + i * 12 + j + 24 * 3]), + ); + } + } + add_addition_with_reduction_constranints_fp6( + local_values, + yield_constr, + start_col + FP12_MUL_T4_CALC_OFFSET, + bit_selector, + ); + + // T5 + for i in 0..6 { + for j in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP12_MUL_T5_CALC_OFFSET + FP6_MUL_SELECTOR_OFFSET] + * (local_values[start_col + + FP12_MUL_T5_CALC_OFFSET + + FP6_MUL_X_INPUT_OFFSET + + i * 12 + + j] + - local_values[start_col + + FP12_MUL_T3_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * i + + FP_SINGLE_REDUCED_OFFSET + + j]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP12_MUL_T5_CALC_OFFSET + FP6_MUL_SELECTOR_OFFSET] + * (local_values[start_col + + FP12_MUL_T5_CALC_OFFSET + + FP6_MUL_Y_INPUT_OFFSET + + i * 12 + + j] + - local_values[start_col + + FP12_MUL_T4_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * i + + FP_SINGLE_REDUCED_OFFSET + + j]), + ); + } + } + add_fp6_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + FP12_MUL_T5_CALC_OFFSET, + bit_selector, + ); + + // T6 + for i in 0..6 { + let (fp2_offset_lx, fp2_offset_ly, fp2_offset_r) = if i < 2 { + ( + FP6_ADDITION_0_OFFSET, + FP6_SUBTRACTION_0_OFFSET, + FP6_MUL_X_CALC_OFFSET, + ) + } else if i < 4 { + ( + FP6_ADDITION_1_OFFSET, + FP6_SUBTRACTION_1_OFFSET, + FP6_MUL_Y_CALC_OFFSET, + ) + } else { + ( + FP6_ADDITION_2_OFFSET, + FP6_SUBTRACTION_2_OFFSET, + FP6_MUL_Z_CALC_OFFSET, + ) + }; + let (fp_offset_x, fp_offset_y, num_redn) = if i % 2 == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET, 0) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET, 1) + }; + for j in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP12_MUL_T6_CALC_OFFSET + + fp2_offset_lx + + fp_offset_x + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP12_MUL_T6_CALC_OFFSET + + fp2_offset_lx + + fp_offset_x + + FP_ADDITION_X_OFFSET + + j] + - local_values[start_col + + FP12_MUL_T5_CALC_OFFSET + + fp2_offset_r + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * num_redn + + FP_SINGLE_REDUCED_OFFSET + + j]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP12_MUL_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + fp2_offset_ly + + fp_offset_y + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP12_MUL_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + fp2_offset_ly + + fp_offset_y + + FP_SUBTRACTION_Y_OFFSET + + j] + - local_values[start_col + + FP12_MUL_T0_CALC_OFFSET + + fp2_offset_r + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * num_redn + + FP_SINGLE_REDUCED_OFFSET + + j]), + ); + } + } + add_subtraction_with_reduction_constranints_fp6( + local_values, + yield_constr, + start_col + FP12_MUL_T6_CALC_OFFSET, + bit_selector, + ); + + // Y + for i in 0..6 { + let (fp2_offset_lx, fp2_offset_ly, fp2_offset_r) = if i < 2 { + ( + FP6_ADDITION_0_OFFSET, + FP6_SUBTRACTION_0_OFFSET, + FP6_MUL_X_CALC_OFFSET, + ) + } else if i < 4 { + ( + FP6_ADDITION_1_OFFSET, + FP6_SUBTRACTION_1_OFFSET, + FP6_MUL_Y_CALC_OFFSET, + ) + } else { + ( + FP6_ADDITION_2_OFFSET, + FP6_SUBTRACTION_2_OFFSET, + FP6_MUL_Z_CALC_OFFSET, + ) + }; + let (fp_offset_x, fp_offset_y, num_redn) = if i % 2 == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET, 0) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET, 1) + }; + for j in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP12_MUL_Y_CALC_OFFSET + + fp2_offset_lx + + fp_offset_x + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP12_MUL_Y_CALC_OFFSET + + fp2_offset_lx + + fp_offset_x + + FP_ADDITION_X_OFFSET + + j] + - local_values[start_col + + FP12_MUL_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * i + + FP_SINGLE_REDUCED_OFFSET + + j]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP12_MUL_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + fp2_offset_ly + + fp_offset_y + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP12_MUL_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + fp2_offset_ly + + fp_offset_y + + FP_SUBTRACTION_Y_OFFSET + + j] + - local_values[start_col + + FP12_MUL_T1_CALC_OFFSET + + fp2_offset_r + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * num_redn + + FP_SINGLE_REDUCED_OFFSET + + j]), + ); + } + } + add_subtraction_with_reduction_constranints_fp6( + local_values, + yield_constr, + start_col + FP12_MUL_Y_CALC_OFFSET, + bit_selector, + ); +} + +pub fn add_fp12_multiplication_constraints_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..24 * 3 * 2 { + let mul_tmp = local_values[start_col + FP12_MUL_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP12_MUL_X_INPUT_OFFSET + i], + next_values[start_col + FP12_MUL_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint_transition(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + FP12_MUL_Y_INPUT_OFFSET + i], + next_values[start_col + FP12_MUL_Y_INPUT_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint_transition(builder, c); + } + + // T0 + for i in 0..24 * 3 { + let mul_tmp = local_values[start_col + FP12_MUL_T0_CALC_OFFSET + FP6_MUL_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP12_MUL_T0_CALC_OFFSET + FP6_MUL_X_INPUT_OFFSET + i], + local_values[start_col + FP12_MUL_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + FP12_MUL_T0_CALC_OFFSET + FP6_MUL_Y_INPUT_OFFSET + i], + local_values[start_col + FP12_MUL_Y_INPUT_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_fp6_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP12_MUL_T0_CALC_OFFSET, + bit_selector, + ); + + // T1 + for i in 0..24 * 3 { + let mul_tmp = local_values[start_col + FP12_MUL_T1_CALC_OFFSET + FP6_MUL_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP12_MUL_T1_CALC_OFFSET + FP6_MUL_X_INPUT_OFFSET + i], + local_values[start_col + FP12_MUL_X_INPUT_OFFSET + i + 24 * 3], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + FP12_MUL_T1_CALC_OFFSET + FP6_MUL_Y_INPUT_OFFSET + i], + local_values[start_col + FP12_MUL_Y_INPUT_OFFSET + i + 24 * 3], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_fp6_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP12_MUL_T1_CALC_OFFSET, + bit_selector, + ); + + // T2 + for i in 0..6 { + let fp2_offset = if i < 2 { + FP6_MUL_X_CALC_OFFSET + } else if i < 4 { + FP6_MUL_Y_CALC_OFFSET + } else { + FP6_MUL_Z_CALC_OFFSET + }; + let fp_offset = i % 2; + for j in 0..12 { + let sub_tmp = builder.sub_extension( + local_values[start_col + + FP12_MUL_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + + i * 12 + + j], + local_values[start_col + + FP12_MUL_T1_CALC_OFFSET + + fp2_offset + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * fp_offset + + FP_SINGLE_REDUCED_OFFSET + + j], + ); + let c = builder.mul_extension( + sub_tmp, + local_values + [start_col + FP12_MUL_T2_CALC_OFFSET + FP6_NON_RESIDUE_MUL_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + } + add_non_residue_multiplication_fp6_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP12_MUL_T2_CALC_OFFSET, + bit_selector, + ); + + // X + for i in 0..6 { + let (fp2_offset_l, fp2_offset_r) = if i < 2 { + (FP6_ADDITION_0_OFFSET, FP6_MUL_X_CALC_OFFSET) + } else if i < 4 { + (FP6_ADDITION_1_OFFSET, FP6_MUL_Y_CALC_OFFSET) + } else { + (FP6_ADDITION_2_OFFSET, FP6_MUL_Z_CALC_OFFSET) + }; + let (fp_offset, num_redn) = if i % 2 == 0 { + (FP2_ADDITION_0_OFFSET, 0) + } else { + (FP2_ADDITION_1_OFFSET, 1) + }; + for j in 0..12 { + let mul_tmp = local_values[start_col + + FP12_MUL_X_CALC_OFFSET + + fp2_offset_l + + fp_offset + + FP_ADDITION_CHECK_OFFSET]; + + let sub_tmp = builder.sub_extension( + local_values[start_col + + FP12_MUL_X_CALC_OFFSET + + fp2_offset_l + + fp_offset + + FP_ADDITION_X_OFFSET + + j], + local_values[start_col + + FP12_MUL_T0_CALC_OFFSET + + fp2_offset_r + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * num_redn + + FP_SINGLE_REDUCED_OFFSET + + j], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + + if i < 2 { + let y_offset = if i == 0 { + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + } else { + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + }; + + let sub_tmp = builder.sub_extension( + local_values[start_col + + FP12_MUL_X_CALC_OFFSET + + fp2_offset_l + + fp_offset + + FP_ADDITION_Y_OFFSET + + j], + local_values[start_col + + FP12_MUL_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_C2 + + y_offset + + FP_SINGLE_REDUCED_OFFSET + + j], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } else { + let sub_tmp = builder.sub_extension( + local_values[start_col + + FP12_MUL_X_CALC_OFFSET + + fp2_offset_l + + fp_offset + + FP_ADDITION_Y_OFFSET + + j], + local_values[start_col + + FP12_MUL_T2_CALC_OFFSET + + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + + (i - 2) * 12 + + j], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + } + } + add_addition_with_reduction_constraints_fp6_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP12_MUL_X_CALC_OFFSET, + bit_selector, + ); + + // T3 + for i in 0..6 { + let fp2_offset = if i < 2 { + FP6_ADDITION_0_OFFSET + } else if i < 4 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + let fp_offset = if i % 2 == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + for j in 0..12 { + let mul_tmp = local_values[start_col + + FP12_MUL_T3_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP12_MUL_T3_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_X_OFFSET + + j], + local_values[start_col + FP12_MUL_X_INPUT_OFFSET + i * 12 + j], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP12_MUL_T3_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_Y_OFFSET + + j], + local_values[start_col + FP12_MUL_X_INPUT_OFFSET + i * 12 + j + 24 * 3], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_fp6_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP12_MUL_T3_CALC_OFFSET, + bit_selector, + ); + + // T4 + for i in 0..6 { + let fp2_offset = if i < 2 { + FP6_ADDITION_0_OFFSET + } else if i < 4 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + let fp_offset = if i % 2 == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + for j in 0..12 { + let mul_tmp = local_values[start_col + + FP12_MUL_T4_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP12_MUL_T4_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_X_OFFSET + + j], + local_values[start_col + FP12_MUL_Y_INPUT_OFFSET + i * 12 + j], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP12_MUL_T4_CALC_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_Y_OFFSET + + j], + local_values[start_col + FP12_MUL_Y_INPUT_OFFSET + i * 12 + j + 24 * 3], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_fp6_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP12_MUL_T4_CALC_OFFSET, + bit_selector, + ); + + // T5 + for i in 0..6 { + for j in 0..12 { + let mul_tmp = + local_values[start_col + FP12_MUL_T5_CALC_OFFSET + FP6_MUL_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values + [start_col + FP12_MUL_T5_CALC_OFFSET + FP6_MUL_X_INPUT_OFFSET + i * 12 + j], + local_values[start_col + + FP12_MUL_T3_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * i + + FP_SINGLE_REDUCED_OFFSET + + j], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values + [start_col + FP12_MUL_T5_CALC_OFFSET + FP6_MUL_Y_INPUT_OFFSET + i * 12 + j], + local_values[start_col + + FP12_MUL_T4_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * i + + FP_SINGLE_REDUCED_OFFSET + + j], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + add_fp6_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP12_MUL_T5_CALC_OFFSET, + bit_selector, + ); + + // T6 + for i in 0..6 { + let (fp2_offset_lx, fp2_offset_ly, fp2_offset_r) = if i < 2 { + ( + FP6_ADDITION_0_OFFSET, + FP6_SUBTRACTION_0_OFFSET, + FP6_MUL_X_CALC_OFFSET, + ) + } else if i < 4 { + ( + FP6_ADDITION_1_OFFSET, + FP6_SUBTRACTION_1_OFFSET, + FP6_MUL_Y_CALC_OFFSET, + ) + } else { + ( + FP6_ADDITION_2_OFFSET, + FP6_SUBTRACTION_2_OFFSET, + FP6_MUL_Z_CALC_OFFSET, + ) + }; + let (fp_offset_x, fp_offset_y, num_redn) = if i % 2 == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET, 0) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET, 1) + }; + for j in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP12_MUL_T6_CALC_OFFSET + + fp2_offset_lx + + fp_offset_x + + FP_ADDITION_X_OFFSET + + j], + local_values[start_col + + FP12_MUL_T5_CALC_OFFSET + + fp2_offset_r + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * num_redn + + FP_SINGLE_REDUCED_OFFSET + + j], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP12_MUL_T6_CALC_OFFSET + + fp2_offset_lx + + fp_offset_x + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP12_MUL_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + fp2_offset_ly + + fp_offset_y + + FP_SUBTRACTION_Y_OFFSET + + j], + local_values[start_col + + FP12_MUL_T0_CALC_OFFSET + + fp2_offset_r + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * num_redn + + FP_SINGLE_REDUCED_OFFSET + + j], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP12_MUL_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + fp2_offset_ly + + fp_offset_y + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + add_subtraction_with_reduction_constraints_fp6_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP12_MUL_T6_CALC_OFFSET, + bit_selector, + ); + + // Y + for i in 0..6 { + let (fp2_offset_lx, fp2_offset_ly, fp2_offset_r) = if i < 2 { + ( + FP6_ADDITION_0_OFFSET, + FP6_SUBTRACTION_0_OFFSET, + FP6_MUL_X_CALC_OFFSET, + ) + } else if i < 4 { + ( + FP6_ADDITION_1_OFFSET, + FP6_SUBTRACTION_1_OFFSET, + FP6_MUL_Y_CALC_OFFSET, + ) + } else { + ( + FP6_ADDITION_2_OFFSET, + FP6_SUBTRACTION_2_OFFSET, + FP6_MUL_Z_CALC_OFFSET, + ) + }; + let (fp_offset_x, fp_offset_y, num_redn) = if i % 2 == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET, 0) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET, 1) + }; + for j in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP12_MUL_Y_CALC_OFFSET + + fp2_offset_lx + + fp_offset_x + + FP_ADDITION_X_OFFSET + + j], + local_values[start_col + + FP12_MUL_T6_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * i + + FP_SINGLE_REDUCED_OFFSET + + j], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP12_MUL_Y_CALC_OFFSET + + fp2_offset_lx + + fp_offset_x + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP12_MUL_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + fp2_offset_ly + + fp_offset_y + + FP_SUBTRACTION_Y_OFFSET + + j], + local_values[start_col + + FP12_MUL_T1_CALC_OFFSET + + fp2_offset_r + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * num_redn + + FP_SINGLE_REDUCED_OFFSET + + j], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP12_MUL_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + fp2_offset_ly + + fp_offset_y + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + add_subtraction_with_reduction_constraints_fp6_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP12_MUL_Y_CALC_OFFSET, + bit_selector, + ); +} + +/// Constraints for [cyclotomicSquare](super::native::Fp12::cyclotomicSquare) function. +/// +/// Constraints inputs across this and next row, wherever selector is set to on. Constraints all the Ti's (defined in the native function) accordinng to their respective operations. +pub fn add_cyclotomic_sq_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + for i in 0..24 * 3 * 2 { + yield_constr.constraint_transition( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + CYCLOTOMIC_SQ_SELECTOR_OFFSET] + * (local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + i] + - next_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + i]), + ); + } + + for i in 0..24 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + CYCLOTOMIC_SQ_T0_CALC_OFFSET + FP4_SQ_SELECTOR_OFFSET] + * (local_values + [start_col + CYCLOTOMIC_SQ_T0_CALC_OFFSET + FP4_SQ_INPUT_X_OFFSET + i] + - local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + CYCLOTOMIC_SQ_T0_CALC_OFFSET + FP4_SQ_SELECTOR_OFFSET] + * (local_values + [start_col + CYCLOTOMIC_SQ_T0_CALC_OFFSET + FP4_SQ_INPUT_Y_OFFSET + i] + - local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + 24 * 4 + i]), + ); + } + add_fp4_sq_constraints( + local_values, + next_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T0_CALC_OFFSET, + bit_selector, + ); + + for i in 0..24 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + CYCLOTOMIC_SQ_T1_CALC_OFFSET + FP4_SQ_SELECTOR_OFFSET] + * (local_values + [start_col + CYCLOTOMIC_SQ_T1_CALC_OFFSET + FP4_SQ_INPUT_X_OFFSET + i] + - local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + 24 * 3 + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + CYCLOTOMIC_SQ_T1_CALC_OFFSET + FP4_SQ_SELECTOR_OFFSET] + * (local_values + [start_col + CYCLOTOMIC_SQ_T1_CALC_OFFSET + FP4_SQ_INPUT_Y_OFFSET + i] + - local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + 24 * 2 + i]), + ); + } + add_fp4_sq_constraints( + local_values, + next_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T1_CALC_OFFSET, + bit_selector, + ); + + for i in 0..24 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + CYCLOTOMIC_SQ_T2_CALC_OFFSET + FP4_SQ_SELECTOR_OFFSET] + * (local_values + [start_col + CYCLOTOMIC_SQ_T2_CALC_OFFSET + FP4_SQ_INPUT_X_OFFSET + i] + - local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + 24 * 1 + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + CYCLOTOMIC_SQ_T2_CALC_OFFSET + FP4_SQ_SELECTOR_OFFSET] + * (local_values + [start_col + CYCLOTOMIC_SQ_T2_CALC_OFFSET + FP4_SQ_INPUT_Y_OFFSET + i] + - local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + 24 * 5 + i]), + ); + } + add_fp4_sq_constraints( + local_values, + next_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T2_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T3_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T3_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T2_CALC_OFFSET + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T3_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T3_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + 12] + - local_values[start_col + + CYCLOTOMIC_SQ_T2_CALC_OFFSET + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + add_non_residue_multiplication_constraints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T3_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (fp_add_offset, fp_sub_offset) = if j == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET) + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_T4_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T4_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T0_CALC_OFFSET + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + fp_sub_offset + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + fp_sub_offset + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + j * 12 + i]), + ) + } + } + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T4_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T5_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T5_CALC_OFFSET + + FP2_FP_X_INPUT_OFFSET + + j * 12 + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + let val = if i == 0 { FE::TWO } else { FE::ZERO }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T5_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values + [start_col + CYCLOTOMIC_SQ_T5_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i] + - val), + ); + } + add_fp2_fp_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T5_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (x_offset, fp_add_offset) = if j == 0 { + (X0_Y_REDUCE_OFFSET, FP2_ADDITION_0_OFFSET) + } else { + (X1_Y_REDUCE_OFFSET, FP2_ADDITION_1_OFFSET) + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_C0_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_C0_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T5_CALC_OFFSET + + x_offset + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_C0_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_C0_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T0_CALC_OFFSET + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_C0_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (fp_add_offset, fp_sub_offset) = if j == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET) + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_T6_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T6_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T1_CALC_OFFSET + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + fp_sub_offset + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + fp_sub_offset + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + j * 12 + i + 24]), + ) + } + } + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T6_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T7_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T7_CALC_OFFSET + + FP2_FP_X_INPUT_OFFSET + + j * 12 + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + let val = if i == 0 { FE::TWO } else { FE::ZERO }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T7_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values + [start_col + CYCLOTOMIC_SQ_T7_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i] + - val), + ); + } + add_fp2_fp_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T7_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (x_offset, fp_add_offset) = if j == 0 { + (X0_Y_REDUCE_OFFSET, FP2_ADDITION_0_OFFSET) + } else { + (X1_Y_REDUCE_OFFSET, FP2_ADDITION_1_OFFSET) + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_C1_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_C1_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T7_CALC_OFFSET + + x_offset + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_C1_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_C1_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T1_CALC_OFFSET + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_C1_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (fp_add_offset, fp_sub_offset) = if j == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET) + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_T8_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T8_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T2_CALC_OFFSET + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_T8_CALC_OFFSET + + FP2_ADDITION_TOTAL + + fp_sub_offset + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T8_CALC_OFFSET + + FP2_ADDITION_TOTAL + + fp_sub_offset + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values + [start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + j * 12 + i + 24 * 2]), + ) + } + } + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T8_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T9_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T9_CALC_OFFSET + + FP2_FP_X_INPUT_OFFSET + + j * 12 + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T8_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + let val = if i == 0 { FE::TWO } else { FE::ZERO }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T9_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values + [start_col + CYCLOTOMIC_SQ_T9_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i] + - val), + ); + } + add_fp2_fp_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T9_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (x_offset, fp_add_offset) = if j == 0 { + (X0_Y_REDUCE_OFFSET, FP2_ADDITION_0_OFFSET) + } else { + (X1_Y_REDUCE_OFFSET, FP2_ADDITION_1_OFFSET) + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_C2_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_C2_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T9_CALC_OFFSET + + x_offset + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_C2_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_C2_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T2_CALC_OFFSET + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_C2_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (fp_add_offset, x_offset) = if j == 0 { + (FP2_ADDITION_0_OFFSET, FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET) + } else { + (FP2_ADDITION_1_OFFSET, FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET) + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_T10_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T10_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T3_CALC_OFFSET + + x_offset + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_T10_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T10_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values + [start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + j * 12 + i + 24 * 3]), + ) + } + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T10_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T11_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T11_CALC_OFFSET + + FP2_FP_X_INPUT_OFFSET + + j * 12 + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T10_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + let val = if i == 0 { FE::TWO } else { FE::ZERO }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T11_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values + [start_col + CYCLOTOMIC_SQ_T11_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i] + - val), + ); + } + add_fp2_fp_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T11_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (x_offset, y_offset, fp_add_offset) = if j == 0 { + ( + X0_Y_REDUCE_OFFSET, + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET, + FP2_ADDITION_0_OFFSET, + ) + } else { + ( + X1_Y_REDUCE_OFFSET, + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET, + FP2_ADDITION_1_OFFSET, + ) + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_C3_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_C3_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T11_CALC_OFFSET + + x_offset + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_C3_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_C3_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T3_CALC_OFFSET + + y_offset + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_C3_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let fp_add_offset = if j == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_T12_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T12_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T0_CALC_OFFSET + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_T12_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T12_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values + [start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + j * 12 + i + 24 * 4]), + ) + } + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T12_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T13_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T13_CALC_OFFSET + + FP2_FP_X_INPUT_OFFSET + + j * 12 + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + let val = if i == 0 { FE::TWO } else { FE::ZERO }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T13_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values + [start_col + CYCLOTOMIC_SQ_T13_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i] + - val), + ); + } + add_fp2_fp_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T13_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (x_offset, fp_add_offset) = if j == 0 { + (X0_Y_REDUCE_OFFSET, FP2_ADDITION_0_OFFSET) + } else { + (X1_Y_REDUCE_OFFSET, FP2_ADDITION_1_OFFSET) + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_C4_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_C4_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T13_CALC_OFFSET + + x_offset + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_C4_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_C4_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T0_CALC_OFFSET + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_C4_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let fp_add_offset = if j == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_T14_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T14_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T1_CALC_OFFSET + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_T14_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T14_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values + [start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + j * 12 + i + 24 * 5]), + ) + } + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T14_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T15_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_T15_CALC_OFFSET + + FP2_FP_X_INPUT_OFFSET + + j * 12 + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T14_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + let val = if i == 0 { FE::TWO } else { FE::ZERO }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + CYCLOTOMIC_SQ_T15_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values + [start_col + CYCLOTOMIC_SQ_T15_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i] + - val), + ); + } + add_fp2_fp_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_T15_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (x_offset, fp_add_offset) = if j == 0 { + (X0_Y_REDUCE_OFFSET, FP2_ADDITION_0_OFFSET) + } else { + (X1_Y_REDUCE_OFFSET, FP2_ADDITION_1_OFFSET) + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_C5_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_C5_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T15_CALC_OFFSET + + x_offset + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + CYCLOTOMIC_SQ_C5_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + CYCLOTOMIC_SQ_C5_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + CYCLOTOMIC_SQ_T1_CALC_OFFSET + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + CYCLOTOMIC_SQ_C5_CALC_OFFSET, + bit_selector, + ); +} + +pub fn add_cyclotomic_sq_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..24 * 3 * 2 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + CYCLOTOMIC_SQ_SELECTOR_OFFSET], + ); + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + i], + next_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..24 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + CYCLOTOMIC_SQ_T0_CALC_OFFSET + FP4_SQ_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_T0_CALC_OFFSET + FP4_SQ_INPUT_X_OFFSET + i], + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_T0_CALC_OFFSET + FP4_SQ_INPUT_Y_OFFSET + i], + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + 24 * 4 + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp4_sq_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + CYCLOTOMIC_SQ_T0_CALC_OFFSET, + bit_selector, + ); + + for i in 0..24 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + CYCLOTOMIC_SQ_T1_CALC_OFFSET + FP4_SQ_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_T1_CALC_OFFSET + FP4_SQ_INPUT_X_OFFSET + i], + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + 24 * 3 + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_T1_CALC_OFFSET + FP4_SQ_INPUT_Y_OFFSET + i], + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + 24 * 2 + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp4_sq_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + CYCLOTOMIC_SQ_T1_CALC_OFFSET, + bit_selector, + ); + + for i in 0..24 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + CYCLOTOMIC_SQ_T2_CALC_OFFSET + FP4_SQ_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_T2_CALC_OFFSET + FP4_SQ_INPUT_X_OFFSET + i], + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + 24 * 1 + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_T2_CALC_OFFSET + FP4_SQ_INPUT_Y_OFFSET + i], + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + 24 * 5 + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp4_sq_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + CYCLOTOMIC_SQ_T2_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values + [start_col + CYCLOTOMIC_SQ_T3_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values + [start_col + CYCLOTOMIC_SQ_T3_CALC_OFFSET + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i], + local_values[start_col + + CYCLOTOMIC_SQ_T2_CALC_OFFSET + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T3_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + 12], + local_values[start_col + + CYCLOTOMIC_SQ_T2_CALC_OFFSET + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_non_residue_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_T3_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (fp_add_offset, fp_sub_offset) = if j == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET) + }; + let tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_T4_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + let tmp2 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + fp_sub_offset + + FP_SUBTRACTION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T4_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T0_CALC_OFFSET + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp1, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + fp_sub_offset + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + j * 12 + i], + ); + let c = builder.mul_extension(tmp2, c); + yield_constr.constraint(builder, c); + } + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_T4_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + CYCLOTOMIC_SQ_T5_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET], + ); + for j in 0..2 { + let c = builder.sub_extension( + local_values + [start_col + CYCLOTOMIC_SQ_T5_CALC_OFFSET + FP2_FP_X_INPUT_OFFSET + j * 12 + i], + local_values[start_col + + CYCLOTOMIC_SQ_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + let val = if i == 0 { + builder.constant_extension(F::Extension::TWO) + } else { + builder.constant_extension(F::Extension::ZERO) + }; + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_T5_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i], + val, + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_fp_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + CYCLOTOMIC_SQ_T5_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (x_offset, fp_add_offset) = if j == 0 { + (X0_Y_REDUCE_OFFSET, FP2_ADDITION_0_OFFSET) + } else { + (X1_Y_REDUCE_OFFSET, FP2_ADDITION_1_OFFSET) + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_C0_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_C0_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + CYCLOTOMIC_SQ_T5_CALC_OFFSET + x_offset + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_C0_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T0_CALC_OFFSET + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_C0_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (fp_add_offset, fp_sub_offset) = if j == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET) + }; + let tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_T6_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + let tmp2 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + fp_sub_offset + + FP_SUBTRACTION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T6_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T1_CALC_OFFSET + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp1, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + fp_sub_offset + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + j * 12 + i + 24], + ); + let c = builder.mul_extension(tmp2, c); + yield_constr.constraint(builder, c); + } + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_T6_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + CYCLOTOMIC_SQ_T7_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET], + ); + for j in 0..2 { + let c = builder.sub_extension( + local_values + [start_col + CYCLOTOMIC_SQ_T7_CALC_OFFSET + FP2_FP_X_INPUT_OFFSET + j * 12 + i], + local_values[start_col + + CYCLOTOMIC_SQ_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + let val = if i == 0 { + builder.constant_extension(F::Extension::TWO) + } else { + builder.constant_extension(F::Extension::ZERO) + }; + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_T7_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i], + val, + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_fp_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + CYCLOTOMIC_SQ_T7_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (x_offset, fp_add_offset) = if j == 0 { + (X0_Y_REDUCE_OFFSET, FP2_ADDITION_0_OFFSET) + } else { + (X1_Y_REDUCE_OFFSET, FP2_ADDITION_1_OFFSET) + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_C1_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_C1_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + CYCLOTOMIC_SQ_T7_CALC_OFFSET + x_offset + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_C1_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T1_CALC_OFFSET + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_C1_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (fp_add_offset, fp_sub_offset) = if j == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET) + }; + let tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_T8_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + let tmp2 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_T8_CALC_OFFSET + + FP2_ADDITION_TOTAL + + fp_sub_offset + + FP_SUBTRACTION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T8_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T2_CALC_OFFSET + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp1, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T8_CALC_OFFSET + + FP2_ADDITION_TOTAL + + fp_sub_offset + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + j * 12 + i + 24 * 2], + ); + let c = builder.mul_extension(tmp2, c); + yield_constr.constraint(builder, c); + } + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_T8_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + CYCLOTOMIC_SQ_T9_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET], + ); + for j in 0..2 { + let c = builder.sub_extension( + local_values + [start_col + CYCLOTOMIC_SQ_T9_CALC_OFFSET + FP2_FP_X_INPUT_OFFSET + j * 12 + i], + local_values[start_col + + CYCLOTOMIC_SQ_T8_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + let val = if i == 0 { + builder.constant_extension(F::Extension::TWO) + } else { + builder.constant_extension(F::Extension::ZERO) + }; + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_T9_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i], + val, + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_fp_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + CYCLOTOMIC_SQ_T9_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (x_offset, fp_add_offset) = if j == 0 { + (X0_Y_REDUCE_OFFSET, FP2_ADDITION_0_OFFSET) + } else { + (X1_Y_REDUCE_OFFSET, FP2_ADDITION_1_OFFSET) + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_C2_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_C2_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + CYCLOTOMIC_SQ_T9_CALC_OFFSET + x_offset + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_C2_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T2_CALC_OFFSET + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_C2_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (fp_add_offset, x_offset) = if j == 0 { + (FP2_ADDITION_0_OFFSET, FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET) + } else { + (FP2_ADDITION_1_OFFSET, FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET) + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_T10_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T10_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T3_CALC_OFFSET + + x_offset + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T10_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + j * 12 + i + 24 * 3], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_T10_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + CYCLOTOMIC_SQ_T11_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET], + ); + for j in 0..2 { + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T11_CALC_OFFSET + + FP2_FP_X_INPUT_OFFSET + + j * 12 + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T10_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + let val = if i == 0 { + builder.constant_extension(F::Extension::TWO) + } else { + builder.constant_extension(F::Extension::ZERO) + }; + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_T11_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i], + val, + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_fp_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + CYCLOTOMIC_SQ_T11_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (x_offset, y_offset, fp_add_offset) = if j == 0 { + ( + X0_Y_REDUCE_OFFSET, + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET, + FP2_ADDITION_0_OFFSET, + ) + } else { + ( + X1_Y_REDUCE_OFFSET, + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET, + FP2_ADDITION_1_OFFSET, + ) + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_C3_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_C3_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + CYCLOTOMIC_SQ_T11_CALC_OFFSET + x_offset + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_C3_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T3_CALC_OFFSET + + y_offset + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_C3_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let fp_add_offset = if j == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_T12_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T12_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T0_CALC_OFFSET + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T12_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + j * 12 + i + 24 * 4], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_T12_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + CYCLOTOMIC_SQ_T13_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET], + ); + for j in 0..2 { + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T13_CALC_OFFSET + + FP2_FP_X_INPUT_OFFSET + + j * 12 + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + let val = if i == 0 { + builder.constant_extension(F::Extension::TWO) + } else { + builder.constant_extension(F::Extension::ZERO) + }; + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_T13_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i], + val, + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_fp_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + CYCLOTOMIC_SQ_T13_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (x_offset, fp_add_offset) = if j == 0 { + (X0_Y_REDUCE_OFFSET, FP2_ADDITION_0_OFFSET) + } else { + (X1_Y_REDUCE_OFFSET, FP2_ADDITION_1_OFFSET) + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_C4_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_C4_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + CYCLOTOMIC_SQ_T13_CALC_OFFSET + x_offset + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_C4_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T0_CALC_OFFSET + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_C4_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let fp_add_offset = if j == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_T14_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T14_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T1_CALC_OFFSET + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T14_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + CYCLOTOMIC_SQ_INPUT_OFFSET + j * 12 + i + 24 * 5], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_T14_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + CYCLOTOMIC_SQ_T15_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET], + ); + for j in 0..2 { + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_T15_CALC_OFFSET + + FP2_FP_X_INPUT_OFFSET + + j * 12 + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T14_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + let val = if i == 0 { + builder.constant_extension(F::Extension::TWO) + } else { + builder.constant_extension(F::Extension::ZERO) + }; + let c = builder.sub_extension( + local_values[start_col + CYCLOTOMIC_SQ_T15_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i], + val, + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_fp_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + CYCLOTOMIC_SQ_T15_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let (x_offset, fp_add_offset) = if j == 0 { + (X0_Y_REDUCE_OFFSET, FP2_ADDITION_0_OFFSET) + } else { + (X1_Y_REDUCE_OFFSET, FP2_ADDITION_1_OFFSET) + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + + CYCLOTOMIC_SQ_C5_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_C5_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + CYCLOTOMIC_SQ_T15_CALC_OFFSET + x_offset + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + CYCLOTOMIC_SQ_C5_CALC_OFFSET + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + + CYCLOTOMIC_SQ_T1_CALC_OFFSET + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + CYCLOTOMIC_SQ_C5_CALC_OFFSET, + bit_selector, + ); +} + +/// Constraints for [cyclotomicExponent](super::native::Fp12::cyclotocmicExponent) function. +/// +/// Constraints inputs across this and next row, wherever selector is set to on. When `CYCLOTOMIC_EXP_START_ROW` is set, constraints z to be 1. Creates two `bit_selector` values from `BIT1_SELECTOR`. Constraints cyclotomicSquare function with `bit0` and constraints fp12 multiplication with `bit1`. What it does is switch on the constraints of cyclotomicSquare when `BIT1_SELECTOR` is off and switch on the constraints of fp12 multiplication when `BIT1_SELECTOR` is on. When `FIRST_ROW_SELECTOR` is on in the next row, constraints z value of the next row with result of cyclotmicSquare function and `bit0` of current row and constraints z value of the next row with result of fp12 multiplication and `bit1` of current row. +pub fn add_cyclotomic_exp_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + op_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + for i in 0..24 * 3 * 2 { + yield_constr.constraint_transition( + op_selector.unwrap_or(P::ONES) + * local_values[start_col + CYCLOTOMIC_EXP_SELECTOR_OFFSET] + * (local_values[start_col + INPUT_OFFSET + i] + - next_values[start_col + INPUT_OFFSET + i]), + ); + } + for i in 0..24 * 3 * 2 { + let val = if i == 0 { P::ONES } else { P::ZEROS }; + yield_constr.constraint( + op_selector.unwrap_or(P::ONES) + * local_values[start_col + CYCLOTOMIC_EXP_START_ROW] + * (local_values[start_col + Z_OFFSET + i] - val), + ); + } + + let bit1 = (local_values[start_col + BIT1_SELECTOR_OFFSET]) * op_selector.unwrap_or(P::ONES); + let bit0 = + (P::ONES - local_values[start_col + BIT1_SELECTOR_OFFSET]) * op_selector.unwrap_or(P::ONES); + + for i in 0..12 { + for j in 0..6 { + let c_offset = if j == 0 { + CYCLOTOMIC_SQ_C0_CALC_OFFSET + } else if j == 1 { + CYCLOTOMIC_SQ_C1_CALC_OFFSET + } else if j == 2 { + CYCLOTOMIC_SQ_C2_CALC_OFFSET + } else if j == 3 { + CYCLOTOMIC_SQ_C3_CALC_OFFSET + } else if j == 4 { + CYCLOTOMIC_SQ_C4_CALC_OFFSET + } else { + CYCLOTOMIC_SQ_C5_CALC_OFFSET + }; + for k in 0..2 { + yield_constr.constraint_transition( + bit0 * local_values[start_col + CYCLOTOMIC_EXP_SELECTOR_OFFSET] + * next_values[start_col + FIRST_ROW_SELECTOR_OFFSET] + * (next_values[start_col + Z_OFFSET + j * 24 + k * 12 + i] + - local_values[start_col + + Z_CYCLOTOMIC_SQ_OFFSET + + c_offset + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * k + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + } + + for i in 0..12 { + for j in 0..6 { + yield_constr.constraint_transition( + bit1 * local_values[start_col + CYCLOTOMIC_EXP_SELECTOR_OFFSET] + * next_values[start_col + FIRST_ROW_SELECTOR_OFFSET] + * (next_values[start_col + Z_OFFSET + j * 12 + i] + - local_values[start_col + + Z_MUL_INPUT_OFFSET + + FP12_MUL_X_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint_transition( + bit1 * local_values[start_col + CYCLOTOMIC_EXP_SELECTOR_OFFSET] + * next_values[start_col + FIRST_ROW_SELECTOR_OFFSET] + * (next_values[start_col + Z_OFFSET + j * 12 + i + 24 * 3] + - local_values[start_col + + Z_MUL_INPUT_OFFSET + + FP12_MUL_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + + for i in 0..24 * 3 * 2 { + yield_constr.constraint( + bit0 * local_values[start_col + Z_CYCLOTOMIC_SQ_OFFSET + CYCLOTOMIC_SQ_SELECTOR_OFFSET] + * (local_values + [start_col + Z_CYCLOTOMIC_SQ_OFFSET + CYCLOTOMIC_SQ_INPUT_OFFSET + i] + - local_values[start_col + Z_OFFSET + i]), + ); + } + add_cyclotomic_sq_constraints( + local_values, + next_values, + yield_constr, + start_col + Z_CYCLOTOMIC_SQ_OFFSET, + Some(bit0), + ); + + for i in 0..24 * 3 * 2 { + yield_constr.constraint( + bit1 * local_values[start_col + Z_MUL_INPUT_OFFSET + FP12_MUL_SELECTOR_OFFSET] + * (local_values[start_col + Z_MUL_INPUT_OFFSET + FP12_MUL_X_INPUT_OFFSET + i] + - local_values[start_col + Z_OFFSET + i]), + ); + yield_constr.constraint( + bit1 * local_values[start_col + Z_MUL_INPUT_OFFSET + FP12_MUL_SELECTOR_OFFSET] + * (local_values[start_col + Z_MUL_INPUT_OFFSET + FP12_MUL_Y_INPUT_OFFSET + i] + - local_values[start_col + INPUT_OFFSET + i]), + ); + } + add_fp12_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + Z_MUL_INPUT_OFFSET, + Some(bit1), + ); + + for i in 0..12 { + for j in 0..6 { + let c_offset = if j == 0 { + CYCLOTOMIC_SQ_C0_CALC_OFFSET + } else if j == 1 { + CYCLOTOMIC_SQ_C1_CALC_OFFSET + } else if j == 2 { + CYCLOTOMIC_SQ_C2_CALC_OFFSET + } else if j == 3 { + CYCLOTOMIC_SQ_C3_CALC_OFFSET + } else if j == 4 { + CYCLOTOMIC_SQ_C4_CALC_OFFSET + } else { + CYCLOTOMIC_SQ_C5_CALC_OFFSET + }; + for k in 0..2 { + yield_constr.constraint_transition( + op_selector.unwrap_or(P::ONES) + * local_values[start_col + CYCLOTOMIC_EXP_SELECTOR_OFFSET] + * next_values[start_col + RES_ROW_SELECTOR_OFFSET] + * (next_values[start_col + Z_OFFSET + j * 24 + k * 12 + i] + - local_values[start_col + + Z_CYCLOTOMIC_SQ_OFFSET + + c_offset + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * k + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + } +} + +pub fn add_cyclotomic_exp_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + op_selector: Option>, +) { + let one = builder.constant_extension(F::Extension::ONE); + let op_selector_val = op_selector.unwrap_or(one); + + for i in 0..24 * 3 * 2 { + let tmp = builder.mul_extension( + op_selector_val, + local_values[start_col + CYCLOTOMIC_EXP_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + INPUT_OFFSET + i], + next_values[start_col + INPUT_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..24 * 3 * 2 { + let val = if i == 0 { + one + } else { + builder.constant_extension(F::Extension::ZERO) + }; + let tmp = builder.mul_extension( + op_selector_val, + local_values[start_col + CYCLOTOMIC_EXP_START_ROW], + ); + + let c = builder.sub_extension(local_values[start_col + Z_OFFSET + i], val); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + + let bit1 = builder.mul_extension( + op_selector_val, + local_values[start_col + BIT1_SELECTOR_OFFSET], + ); + let bit0 = builder.sub_extension(one, local_values[start_col + BIT1_SELECTOR_OFFSET]); + let bit0 = builder.mul_extension(op_selector_val, bit0); + + for i in 0..12 { + for j in 0..6 { + let c_offset = if j == 0 { + CYCLOTOMIC_SQ_C0_CALC_OFFSET + } else if j == 1 { + CYCLOTOMIC_SQ_C1_CALC_OFFSET + } else if j == 2 { + CYCLOTOMIC_SQ_C2_CALC_OFFSET + } else if j == 3 { + CYCLOTOMIC_SQ_C3_CALC_OFFSET + } else if j == 4 { + CYCLOTOMIC_SQ_C4_CALC_OFFSET + } else { + CYCLOTOMIC_SQ_C5_CALC_OFFSET + }; + let mul = builder.mul_extension( + bit0, + local_values[start_col + CYCLOTOMIC_EXP_SELECTOR_OFFSET], + ); + let mul = + builder.mul_extension(mul, next_values[start_col + FIRST_ROW_SELECTOR_OFFSET]); + for k in 0..2 { + let c = builder.sub_extension( + next_values[start_col + Z_OFFSET + j * 24 + k * 12 + i], + local_values[start_col + + Z_CYCLOTOMIC_SQ_OFFSET + + c_offset + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * k + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(mul, c); + yield_constr.constraint_transition(builder, c); + } + } + } + + for i in 0..12 { + let mul = builder.mul_extension( + bit1, + local_values[start_col + CYCLOTOMIC_EXP_SELECTOR_OFFSET], + ); + let mul = builder.mul_extension(mul, next_values[start_col + FIRST_ROW_SELECTOR_OFFSET]); + for j in 0..6 { + let c = builder.sub_extension( + next_values[start_col + Z_OFFSET + j * 12 + i], + local_values[start_col + + Z_MUL_INPUT_OFFSET + + FP12_MUL_X_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(mul, c); + yield_constr.constraint_transition(builder, c); + + let c = builder.sub_extension( + next_values[start_col + Z_OFFSET + j * 12 + i + 24 * 3], + local_values[start_col + + Z_MUL_INPUT_OFFSET + + FP12_MUL_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(mul, c); + yield_constr.constraint_transition(builder, c); + } + } + + for i in 0..24 * 3 * 2 { + let tmp = builder.mul_extension( + bit0, + local_values[start_col + Z_CYCLOTOMIC_SQ_OFFSET + CYCLOTOMIC_SQ_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + Z_CYCLOTOMIC_SQ_OFFSET + CYCLOTOMIC_SQ_INPUT_OFFSET + i], + local_values[start_col + Z_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_cyclotomic_sq_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + Z_CYCLOTOMIC_SQ_OFFSET, + Some(bit0), + ); + + for i in 0..24 * 3 * 2 { + let tmp = builder.mul_extension( + bit1, + local_values[start_col + Z_MUL_INPUT_OFFSET + FP12_MUL_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + Z_MUL_INPUT_OFFSET + FP12_MUL_X_INPUT_OFFSET + i], + local_values[start_col + Z_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + Z_MUL_INPUT_OFFSET + FP12_MUL_Y_INPUT_OFFSET + i], + local_values[start_col + INPUT_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp12_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + Z_MUL_INPUT_OFFSET, + Some(bit1), + ); + + for i in 0..12 { + let mul = builder.mul_extension( + op_selector_val, + local_values[start_col + CYCLOTOMIC_EXP_SELECTOR_OFFSET], + ); + let mul = builder.mul_extension(mul, next_values[start_col + RES_ROW_SELECTOR_OFFSET]); + for j in 0..6 { + let c_offset = if j == 0 { + CYCLOTOMIC_SQ_C0_CALC_OFFSET + } else if j == 1 { + CYCLOTOMIC_SQ_C1_CALC_OFFSET + } else if j == 2 { + CYCLOTOMIC_SQ_C2_CALC_OFFSET + } else if j == 3 { + CYCLOTOMIC_SQ_C3_CALC_OFFSET + } else if j == 4 { + CYCLOTOMIC_SQ_C4_CALC_OFFSET + } else { + CYCLOTOMIC_SQ_C5_CALC_OFFSET + }; + for k in 0..2 { + let c = builder.sub_extension( + next_values[start_col + Z_OFFSET + j * 24 + k * 12 + i], + local_values[start_col + + Z_CYCLOTOMIC_SQ_OFFSET + + c_offset + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * k + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(mul, c); + yield_constr.constraint_transition(builder, c); + } + } + } +} + +/// Constraints for [forbenius_map](super::native::Fp12::forbenius_map) function. +/// +/// Constraints both input and power across this and next row, wherever selector is set to on. Constraint the divisor and remainder with power for `power == divisor*12 + remainder`. Constraints the bit decomposition as `remainder == bit0 + bit1*2 + bit2*4 + bit3*8`. Selects the forbenius constant using mupliplexer logic. Then constraints fp6 forbenius map, multiplication, reduction and range check operations. +pub fn add_fp12_forbenius_map_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + for i in 0..24 * 3 * 2 { + yield_constr.constraint_transition( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP12_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + FP12_FORBENIUS_MAP_INPUT_OFFSET + i] + - next_values[start_col + FP12_FORBENIUS_MAP_INPUT_OFFSET + i]), + ); + } + yield_constr.constraint_transition( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP12_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + FP12_FORBENIUS_MAP_POW_OFFSET] + - next_values[start_col + FP12_FORBENIUS_MAP_POW_OFFSET]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP12_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + FP12_FORBENIUS_MAP_DIV_OFFSET] + * FE::from_canonical_usize(12) + + local_values[start_col + FP12_FORBENIUS_MAP_REM_OFFSET] + - local_values[start_col + FP12_FORBENIUS_MAP_POW_OFFSET]), + ); + let bit0 = local_values[start_col + FP12_FORBENIUS_MAP_BIT0_OFFSET]; + let bit1 = local_values[start_col + FP12_FORBENIUS_MAP_BIT1_OFFSET]; + let bit2 = local_values[start_col + FP12_FORBENIUS_MAP_BIT2_OFFSET]; + let bit3 = local_values[start_col + FP12_FORBENIUS_MAP_BIT3_OFFSET]; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP12_FORBENIUS_MAP_SELECTOR_OFFSET] + * (bit0 + + bit1 * FE::TWO + + bit2 * FE::from_canonical_usize(4) + + bit3 * FE::from_canonical_usize(8) + - local_values[start_col + FP12_FORBENIUS_MAP_REM_OFFSET]), + ); + let forbenius_coefficients = Fp12::forbenius_coefficients() + .iter() + .map(|fp2| fp2.get_u32_slice().concat().try_into().unwrap()) + .collect::>(); + let y = (0..24) + .map(|i| { + (P::ONES - bit0) + * (P::ONES - bit1) + * (P::ONES - bit2) + * FE::from_canonical_u32(forbenius_coefficients[0][i]) + + (bit0) + * (P::ONES - bit1) + * (P::ONES - bit2) + * FE::from_canonical_u32(forbenius_coefficients[1][i]) + + (P::ONES - bit0) + * (bit1) + * (P::ONES - bit2) + * FE::from_canonical_u32(forbenius_coefficients[2][i]) + + (bit0) + * (bit1) + * (P::ONES - bit2) + * FE::from_canonical_u32(forbenius_coefficients[3][i]) + + (P::ONES - bit0) + * (P::ONES - bit1) + * (bit2) + * FE::from_canonical_u32(forbenius_coefficients[4][i]) + + (bit0) + * (P::ONES - bit1) + * (bit2) + * FE::from_canonical_u32(forbenius_coefficients[5][i]) + + (P::ONES - bit0) + * (bit1) + * (bit2) + * FE::from_canonical_u32(forbenius_coefficients[6][i]) + }) + .collect::>(); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + FP6_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values + [start_col + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + FP6_FORBENIUS_MAP_POW_OFFSET] + - local_values[start_col + FP12_FORBENIUS_MAP_POW_OFFSET]), + ); + for i in 0..24 * 3 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_INPUT_OFFSET + + i] + - local_values[start_col + FP12_FORBENIUS_MAP_INPUT_OFFSET + i]), + ); + } + add_fp6_forbenius_map_constraints( + local_values, + next_values, + yield_constr, + start_col + FP12_FORBENIUS_MAP_R0_CALC_OFFSET, + bit_selector, + ); + + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + + FP6_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values + [start_col + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + FP6_FORBENIUS_MAP_POW_OFFSET] + - local_values[start_col + FP12_FORBENIUS_MAP_POW_OFFSET]), + ); + for i in 0..24 * 3 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + + FP6_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + + FP6_FORBENIUS_MAP_INPUT_OFFSET + + i] + - local_values[start_col + FP12_FORBENIUS_MAP_INPUT_OFFSET + i + 24 * 3]), + ); + } + add_fp6_forbenius_map_constraints( + local_values, + next_values, + yield_constr, + start_col + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let offset = if j == 0 { + FP6_FORBENIUS_MAP_X_CALC_OFFSET + FP2_FORBENIUS_MAP_INPUT_OFFSET + } else { + FP6_FORBENIUS_MAP_X_CALC_OFFSET + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP12_FORBENIUS_MAP_C0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP12_FORBENIUS_MAP_C0_CALC_OFFSET + + FP2_FP2_X_INPUT_OFFSET + + j * 12 + + i] + - local_values + [start_col + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + offset + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP12_FORBENIUS_MAP_C0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP12_FORBENIUS_MAP_C0_CALC_OFFSET + + FP2_FP2_Y_INPUT_OFFSET + + j * 12 + + i] + - y[j * 12 + i]), + ); + } + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP12_FORBENIUS_MAP_C0_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let offset = if j == 0 { + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + } else { + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP12_FORBENIUS_MAP_C1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP12_FORBENIUS_MAP_C1_CALC_OFFSET + + FP2_FP2_X_INPUT_OFFSET + + j * 12 + + i] + - local_values + [start_col + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + offset + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP12_FORBENIUS_MAP_C1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP12_FORBENIUS_MAP_C1_CALC_OFFSET + + FP2_FP2_Y_INPUT_OFFSET + + j * 12 + + i] + - y[j * 12 + i]), + ); + } + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP12_FORBENIUS_MAP_C1_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let offset = if j == 0 { + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + } else { + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP12_FORBENIUS_MAP_C2_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP12_FORBENIUS_MAP_C2_CALC_OFFSET + + FP2_FP2_X_INPUT_OFFSET + + j * 12 + + i] + - local_values + [start_col + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + offset + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP12_FORBENIUS_MAP_C2_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP12_FORBENIUS_MAP_C2_CALC_OFFSET + + FP2_FP2_Y_INPUT_OFFSET + + j * 12 + + i] + - y[j * 12 + i]), + ); + } + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP12_FORBENIUS_MAP_C2_CALC_OFFSET, + bit_selector, + ); +} + +pub fn add_fp12_forbenius_map_constraints_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let one = builder.constant_extension(F::Extension::ONE); + let bit_selector_val = bit_selector.unwrap_or(one); + + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP12_FORBENIUS_MAP_SELECTOR_OFFSET], + ); + + for i in 0..24 * 3 * 2 { + let c = builder.sub_extension( + local_values[start_col + FP12_FORBENIUS_MAP_INPUT_OFFSET + i], + next_values[start_col + FP12_FORBENIUS_MAP_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint_transition(builder, c); + } + + let c = builder.sub_extension( + local_values[start_col + FP12_FORBENIUS_MAP_POW_OFFSET], + next_values[start_col + FP12_FORBENIUS_MAP_POW_OFFSET], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint_transition(builder, c); + + let twelve = builder.constant_extension(F::Extension::from_canonical_u32(12)); + let c = builder.mul_extension( + local_values[start_col + FP12_FORBENIUS_MAP_DIV_OFFSET], + twelve, + ); + let c = builder.add_extension(c, local_values[start_col + FP12_FORBENIUS_MAP_REM_OFFSET]); + let c = builder.sub_extension(c, local_values[start_col + FP12_FORBENIUS_MAP_POW_OFFSET]); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let bit0 = local_values[start_col + FP12_FORBENIUS_MAP_BIT0_OFFSET]; + let bit1 = local_values[start_col + FP12_FORBENIUS_MAP_BIT1_OFFSET]; + let bit2 = local_values[start_col + FP12_FORBENIUS_MAP_BIT2_OFFSET]; + let bit3 = local_values[start_col + FP12_FORBENIUS_MAP_BIT3_OFFSET]; + let one_bit0 = builder.sub_extension(one, bit0); + let one_bit1 = builder.sub_extension(one, bit1); + let one_bit2 = builder.sub_extension(one, bit2); + + let two = builder.constant_extension(F::Extension::TWO); + let four = builder.constant_extension(F::Extension::from_canonical_u32(4)); + let eight = builder.constant_extension(F::Extension::from_canonical_u32(8)); + let c = builder.mul_add_extension(bit1, two, bit0); + let c = builder.mul_add_extension(bit2, four, c); + let c = builder.mul_add_extension(bit3, eight, c); + let c = builder.sub_extension(c, local_values[start_col + FP12_FORBENIUS_MAP_REM_OFFSET]); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let forbenius_coefficients = Fp12::forbenius_coefficients() + .iter() + .map(|fp2| fp2.get_u32_slice().concat().try_into().unwrap()) + .collect::>(); + let y = (0..24) + .map(|i| { + let fc0 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients[0][i], + )); + let fc1 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients[1][i], + )); + let fc2 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients[2][i], + )); + let fc3 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients[3][i], + )); + let fc4 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients[4][i], + )); + let fc5 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients[5][i], + )); + let fc6 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients[6][i], + )); + + let val_zero = builder.mul_many_extension([one_bit0, one_bit1, one_bit2, fc0]); + let val_one = builder.mul_many_extension([bit0, one_bit1, one_bit2, fc1]); + let val_two = builder.mul_many_extension([one_bit0, bit1, one_bit2, fc2]); + let val_three = builder.mul_many_extension([bit0, bit1, one_bit2, fc3]); + let val_four = builder.mul_many_extension([one_bit0, one_bit1, bit2, fc4]); + let val_five = builder.mul_many_extension([bit0, one_bit1, bit2, fc5]); + let val_six = builder.mul_many_extension([one_bit0, bit1, bit2, fc6]); + + let c = builder.add_many_extension([ + val_zero, val_one, val_two, val_three, val_four, val_five, val_six, + ]); + c + }) + .collect::>>(); + + let tmp = builder.mul_extension( + bit_selector_val, + local_values + [start_col + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + FP6_FORBENIUS_MAP_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + FP6_FORBENIUS_MAP_POW_OFFSET], + local_values[start_col + FP12_FORBENIUS_MAP_POW_OFFSET], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + for i in 0..24 * 3 { + let c = builder.sub_extension( + local_values[start_col + + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_INPUT_OFFSET + + i], + local_values[start_col + FP12_FORBENIUS_MAP_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp6_forbenius_map_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP12_FORBENIUS_MAP_R0_CALC_OFFSET, + bit_selector, + ); + + let tmp = builder.mul_extension( + bit_selector_val, + local_values + [start_col + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + FP6_FORBENIUS_MAP_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values + [start_col + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + FP6_FORBENIUS_MAP_POW_OFFSET], + local_values[start_col + FP12_FORBENIUS_MAP_POW_OFFSET], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + for i in 0..24 * 3 { + let c = builder.sub_extension( + local_values[start_col + + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + + FP6_FORBENIUS_MAP_INPUT_OFFSET + + i], + local_values[start_col + FP12_FORBENIUS_MAP_INPUT_OFFSET + i + 24 * 3], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp6_forbenius_map_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let offset = if j == 0 { + FP6_FORBENIUS_MAP_X_CALC_OFFSET + FP2_FORBENIUS_MAP_INPUT_OFFSET + } else { + FP6_FORBENIUS_MAP_X_CALC_OFFSET + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values + [start_col + FP12_FORBENIUS_MAP_C0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + FP12_FORBENIUS_MAP_C0_CALC_OFFSET + + FP2_FP2_X_INPUT_OFFSET + + j * 12 + + i], + local_values[start_col + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + offset + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP12_FORBENIUS_MAP_C0_CALC_OFFSET + + FP2_FP2_Y_INPUT_OFFSET + + j * 12 + + i], + y[j * 12 + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP12_FORBENIUS_MAP_C0_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let offset = if j == 0 { + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + } else { + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values + [start_col + FP12_FORBENIUS_MAP_C1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + FP12_FORBENIUS_MAP_C1_CALC_OFFSET + + FP2_FP2_X_INPUT_OFFSET + + j * 12 + + i], + local_values[start_col + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + offset + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP12_FORBENIUS_MAP_C1_CALC_OFFSET + + FP2_FP2_Y_INPUT_OFFSET + + j * 12 + + i], + y[j * 12 + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP12_FORBENIUS_MAP_C1_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + for j in 0..2 { + let offset = if j == 0 { + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + } else { + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values + [start_col + FP12_FORBENIUS_MAP_C2_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + FP12_FORBENIUS_MAP_C2_CALC_OFFSET + + FP2_FP2_X_INPUT_OFFSET + + j * 12 + + i], + local_values[start_col + FP12_FORBENIUS_MAP_C0C1C2_CALC_OFFSET + offset + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP12_FORBENIUS_MAP_C2_CALC_OFFSET + + FP2_FP2_Y_INPUT_OFFSET + + j * 12 + + i], + y[j * 12 + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP12_FORBENIUS_MAP_C2_CALC_OFFSET, + bit_selector, + ); +} + +/// Constraints for [conjugate](super::native::Fp12::conjugate) function. +pub fn add_fp12_conjugate_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + for i in 0..12 { + for j in 0..3 { + let fp2_offset = if j == 0 { + FP6_ADDITION_0_OFFSET + } else if j == 1 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + for k in 0..2 { + let fp_offset = if k == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP12_CONJUGATE_ADDITIION_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP12_CONJUGATE_ADDITIION_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP12_CONJUGATE_INPUT_OFFSET + + (j + 3) * 24 + + k * 12 + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP12_CONJUGATE_ADDITIION_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP12_CONJUGATE_ADDITIION_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + FP12_CONJUGATE_OUTPUT_OFFSET + + (j + 3) * 24 + + k * 12 + + i]), + ); + } + } + } + add_negate_fp6_constraints( + local_values, + yield_constr, + start_col + FP12_CONJUGATE_ADDITIION_OFFSET, + bit_selector, + ); +} + +pub fn add_fp12_conjugate_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..12 { + for j in 0..3 { + let fp2_offset = if j == 0 { + FP6_ADDITION_0_OFFSET + } else if j == 1 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + for k in 0..2 { + let fp_offset = if k == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP12_CONJUGATE_ADDITIION_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + FP12_CONJUGATE_ADDITIION_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + FP12_CONJUGATE_INPUT_OFFSET + (j + 3) * 24 + k * 12 + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP12_CONJUGATE_ADDITIION_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_Y_OFFSET + + i], + local_values + [start_col + FP12_CONJUGATE_OUTPUT_OFFSET + (j + 3) * 24 + k * 12 + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + } + } + add_negate_fp6_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP12_CONJUGATE_ADDITIION_OFFSET, + bit_selector, + ); +} diff --git a/casper-finality-proofs/src/verification/fields/starky/fp2.rs b/casper-finality-proofs/src/verification/fields/starky/fp2.rs new file mode 100644 index 000000000..67b825d13 --- /dev/null +++ b/casper-finality-proofs/src/verification/fields/starky/fp2.rs @@ -0,0 +1,4477 @@ +//! This module contains functions for filling the stark trace and adding constraints for the corresponding trace for some Fp2 operations (multiplication, addition, subtraction, etc). One Fp2 element is represented as \[u32; 24\] inside the trace. But most of the time, Fp2 elements are broken up into two Fp elements, hence represented as two \[u32; 12\]. +use crate::verification::{ + fields::starky::fp::*, + utils::{ + native_bls::{ + get_u32_vec_from_literal, get_u32_vec_from_literal_24, modulus, Fp, Fp2 + }, + starky_utils::*, + }, +}; +use num_bigint::BigUint; +use plonky2::{ + field::{ + extension::{Extendable, FieldExtension}, + packed::PackedField, + types::Field, + }, + hash::hash_types::RichField, + iop::ext_target::ExtensionTarget, + plonk::circuit_builder::CircuitBuilder, +}; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; + +// Fp2 Multiplication layout offsets +/* + These trace offsets are for Fp2 multiplication. It needs 12 rows. + [x0, x1] * [y0, y1] = [x0*y0 - x1*y1, x0*y1 + x1*y0] + FP2_FP2_SELECTOR_OFFSET -> Selector to ensure that the input is same across all rows. Set 1 in all rows except last one. + FP2_FP2_X_INPUT_OFFSET -> offset where input x is set. + FP2_FP2_Y_INPUT_OFFSET -> offset where input y is set. + X_0_Y_0_MULTIPLICATION_OFFSET -> offset where x0*y0 multiplication is set. + X_1_Y_1_MULTIPLICATION_OFFSET -> offset where x1*y1 multiplication is set. + Z1_ADD_MODULUS_OFFSET -> Addition operation to add x0*y0 + p*p (required because we don't know if x0*y0 - x1*y1 will overflow). + Z1_SUBTRACTION_OFFSET -> Subtraction operation for x0*y0 + p*p - x1*y1. + Z1_REDUCE_OFFSET -> Reduction operation for Z1 (z1 is the real part of the result). + Z1_RANGECHECK_OFFSET -> Range check the result of Z1 reduction. + X_0_Y_1_MULTIPLICATION_OFFSET -> offset where x0*y1 multiplication is set. + X_1_Y_0_MULTIPLICATION_OFFSET -> offset where x1*y0 multiplication is set. + Z2_ADDITION_OFFSET -> Addition operation for x0*y1 + x1*y0. + Z2_REDUCE_OFFSET -> Reduction operation for Z2 (z2 is the imaginary part of the result). + Z2_RANGECHECK_OFFSET -> Range check the result of Z2 reduction. +*/ +pub const FP2_FP2_SELECTOR_OFFSET: usize = 0; +pub const FP2_FP2_X_INPUT_OFFSET: usize = FP2_FP2_SELECTOR_OFFSET + 1; +pub const FP2_FP2_Y_INPUT_OFFSET: usize = FP2_FP2_X_INPUT_OFFSET + 24; +pub const X_0_Y_0_MULTIPLICATION_OFFSET: usize = FP2_FP2_Y_INPUT_OFFSET + 24; +pub const X_1_Y_1_MULTIPLICATION_OFFSET: usize = + X_0_Y_0_MULTIPLICATION_OFFSET + FP_MULTIPLICATION_TOTAL_COLUMNS; + +pub const Z1_ADD_MODULUS_OFFSET: usize = + X_1_Y_1_MULTIPLICATION_OFFSET + FP_MULTIPLICATION_TOTAL_COLUMNS; +pub const Z1_SUBTRACTION_OFFSET: usize = Z1_ADD_MODULUS_OFFSET + ADDITION_TOTAL; +pub const Z1_REDUCE_OFFSET: usize = Z1_SUBTRACTION_OFFSET + SUBTRACTION_TOTAL; +pub const Z1_RANGECHECK_OFFSET: usize = Z1_REDUCE_OFFSET + REDUCTION_TOTAL; + +pub const X_0_Y_1_MULTIPLICATION_OFFSET: usize = Z1_RANGECHECK_OFFSET + RANGE_CHECK_TOTAL; +pub const X_1_Y_0_MULTIPLICATION_OFFSET: usize = + X_0_Y_1_MULTIPLICATION_OFFSET + FP_MULTIPLICATION_TOTAL_COLUMNS; + +pub const Z2_ADDITION_OFFSET: usize = + X_1_Y_0_MULTIPLICATION_OFFSET + FP_MULTIPLICATION_TOTAL_COLUMNS; +pub const Z2_REDUCE_OFFSET: usize = Z2_ADDITION_OFFSET + ADDITION_TOTAL; +pub const Z2_RANGECHECK_OFFSET: usize = Z2_REDUCE_OFFSET + REDUCTION_TOTAL; + +pub const TOTAL_COLUMNS_FP2_MULTIPLICATION: usize = Z2_RANGECHECK_OFFSET + RANGE_CHECK_TOTAL; + +// Fp2 * Fp multiplication layout offsets +/* + These trace offsets are for multiplication of Fp2 with Fp. It needs 12 rows. + [x0, x1] * y = [x0y, x1y] + FP2_FP_MUL_SELECTOR_OFFSET -> Selector to ensure that the input is same across all rows. Set 1 in all rows except last one. + X0_Y_REDUCE_OFFSET -> Reduction operation for x0y. + X0_Y_RANGECHECK_OFFSET -> Range check for result of x0y reduction. + X1_Y_REDUCE_OFFSET -> Reduction operation for x1y. + X1_Y_RANGECHECK_OFFSET -> Range check for result of x1y reduction. +*/ +pub const FP2_FP_MUL_SELECTOR_OFFSET: usize = 0; +pub const FP2_FP_X_INPUT_OFFSET: usize = FP2_FP_MUL_SELECTOR_OFFSET + 1; +pub const FP2_FP_Y_INPUT_OFFSET: usize = FP2_FP_X_INPUT_OFFSET + 24; +pub const X0_Y_MULTIPLICATION_OFFSET: usize = FP2_FP_Y_INPUT_OFFSET + 12; +pub const X0_Y_REDUCE_OFFSET: usize = X0_Y_MULTIPLICATION_OFFSET + FP_MULTIPLICATION_TOTAL_COLUMNS; +pub const X0_Y_RANGECHECK_OFFSET: usize = X0_Y_REDUCE_OFFSET + REDUCTION_TOTAL; +pub const X1_Y_MULTIPLICATION_OFFSET: usize = X0_Y_RANGECHECK_OFFSET + RANGE_CHECK_TOTAL; +pub const X1_Y_REDUCE_OFFSET: usize = X1_Y_MULTIPLICATION_OFFSET + FP_MULTIPLICATION_TOTAL_COLUMNS; +pub const X1_Y_RANGECHECK_OFFSET: usize = X1_Y_REDUCE_OFFSET + REDUCTION_TOTAL; +pub const FP2_FP_TOTAL_COLUMNS: usize = X1_Y_RANGECHECK_OFFSET + RANGE_CHECK_TOTAL; + +// Multiply by B layout offsets +/* + These trace offsets are for `multiply_by_b` function (super::native::Fp2::multiply_by_B). It needs 12 rows. + MULTIPLY_BY_B_SELECTOR_OFFSET -> Selector to ensure that the input is same across all rows. Set 1 in all rows except last one. + MULTIPLY_BY_B_ADD_MODSQ_OFFSET -> Addition operation to add x0*4 + p*p (required because we don't know if x0*4 - x1*4 will overflow). + MULTIPLY_BY_B_Z0_REDUCE_OFFSET -> Reduction operation for Z0 (z0 is the real part of the result). + MULTIPLY_BY_B_Z0_RANGECHECK_OFFSET -> Range check for result of Z0 reduction. + MULTIPLY_BY_B_Z1_REDUCE_OFFSET -> Reduction operation for Z1 (z1 is the imaginary part of the result). + MULTIPLY_BY_B_Z1_RANGECHECK_OFFSET -> Range check for result of Z1 reduction. +*/ +pub const MULTIPLY_B_SELECTOR_OFFSET: usize = 0; +pub const MULTIPLY_B_X_OFFSET: usize = MULTIPLY_B_SELECTOR_OFFSET + 1; +pub const MULTIPLY_B_X0_B_MUL_OFFSET: usize = MULTIPLY_B_X_OFFSET + 24; +pub const MULTIPLY_B_X1_B_MUL_OFFSET: usize = + MULTIPLY_B_X0_B_MUL_OFFSET + FP_MULTIPLICATION_TOTAL_COLUMNS; +pub const MULTIPLY_B_ADD_MODSQ_OFFSET: usize = + MULTIPLY_B_X1_B_MUL_OFFSET + FP_MULTIPLICATION_TOTAL_COLUMNS; +pub const MULTIPLY_B_SUB_OFFSET: usize = MULTIPLY_B_ADD_MODSQ_OFFSET + ADDITION_TOTAL; +pub const MULTIPLY_B_Z0_REDUCE_OFFSET: usize = MULTIPLY_B_SUB_OFFSET + SUBTRACTION_TOTAL; +pub const MULTIPLY_B_Z0_RANGECHECK_OFFSET: usize = MULTIPLY_B_Z0_REDUCE_OFFSET + REDUCTION_TOTAL; +pub const MULTIPLY_B_ADD_OFFSET: usize = MULTIPLY_B_Z0_RANGECHECK_OFFSET + RANGE_CHECK_TOTAL; +pub const MULTIPLY_B_Z1_REDUCE_OFFSET: usize = MULTIPLY_B_ADD_OFFSET + ADDITION_TOTAL; +pub const MULTIPLY_B_Z1_RANGECHECK_OFFSET: usize = MULTIPLY_B_Z1_REDUCE_OFFSET + REDUCTION_TOTAL; +pub const MULTIPLY_B_TOTAL_COLUMS: usize = MULTIPLY_B_Z1_RANGECHECK_OFFSET + RANGE_CHECK_TOTAL; + +// Fp2 addition layout offsets +/* + These trace offsets are for addition for two Fp2 elements. In essence it's two concatenated Fp additions. It needs 1 row. +*/ +pub const FP2_ADDITION_0_OFFSET: usize = 0; +pub const FP2_ADDITION_1_OFFSET: usize = FP2_ADDITION_0_OFFSET + FP_ADDITION_TOTAL; +pub const FP2_ADDITION_TOTAL: usize = FP2_ADDITION_1_OFFSET + FP_ADDITION_TOTAL; + +// Fp2 subtraction layout offsets +/* + These trace offsets are for subtraction for two Fp2 elements. In essence it's two concatenated Fp subtractions. It needs 1 row. +*/ +pub const FP2_SUBTRACTION_0_OFFSET: usize = 0; +pub const FP2_SUBTRACTION_1_OFFSET: usize = FP2_SUBTRACTION_0_OFFSET + FP_SUBTRACTION_TOTAL; +pub const FP2_SUBTRACTION_TOTAL: usize = FP2_SUBTRACTION_1_OFFSET + FP_SUBTRACTION_TOTAL; + +// Fp2 multiply single +/* + These trace offsets are for multiply by single for two Fp2 elements. In essence it's two concatenated Fp multiply by single. It needs 1 row. +*/ +pub const FP2_MULTIPLY_SINGLE_0_OFFSET: usize = 0; +pub const FP2_MULTIPLY_SINGLE_1_OFFSET: usize = + FP2_MULTIPLY_SINGLE_0_OFFSET + FP_MULTIPLY_SINGLE_TOTAL; +pub const FP2_MULTIPLY_SINGLE_TOTAL: usize = + FP2_MULTIPLY_SINGLE_1_OFFSET + FP_MULTIPLY_SINGLE_TOTAL; + +// FP2 non residue multiplication +/* + These trace offsets are for Fp2 non residue multiplication (super::native::Fp2::mul_by_nonresidue). It needs 1 row. + FP2_NON_RESIDUE_MUL_CHECK_OFFSET -> Selector to indicate the operation is on. + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET -> This offset is for two operations in one. First is addition with bls12-381 field prime, followed by subtraction. + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET -> Reduction operation for Z0 (z0 is the real part of the result). + FP2_NON_RESIDUE_MUL_Z0_RANGECHECK_OFFSET -> Range check for result of Z0 reduction. + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET -> Reduction operation for Z1 (z1 is the imaginary part of the result). + FP2_NON_RESIDUE_MUL_Z1_RANGECHECK_OFFSET -> Range check for result of Z1 reduction. +*/ +pub const FP2_NON_RESIDUE_MUL_CHECK_OFFSET: usize = 0; +pub const FP2_NON_RESIDUE_MUL_INPUT_OFFSET: usize = FP2_NON_RESIDUE_MUL_CHECK_OFFSET + 1; +pub const FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET: usize = FP2_NON_RESIDUE_MUL_INPUT_OFFSET + 24; +pub const FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET: usize = + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + FP_ADDITION_TOTAL + FP_SUBTRACTION_TOTAL; +pub const FP2_NON_RESIDUE_MUL_Z0_RANGECHECK_OFFSET: usize = + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + FP_SINGLE_REDUCE_TOTAL; +pub const FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET: usize = + FP2_NON_RESIDUE_MUL_Z0_RANGECHECK_OFFSET + RANGE_CHECK_TOTAL; +pub const FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET: usize = + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET + FP_ADDITION_TOTAL; +pub const FP2_NON_RESIDUE_MUL_Z1_RANGECHECK_OFFSET: usize = + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + FP_SINGLE_REDUCE_TOTAL; +pub const FP2_NON_RESIDUE_MUL_TOTAL: usize = + FP2_NON_RESIDUE_MUL_Z1_RANGECHECK_OFFSET + RANGE_CHECK_TOTAL; + +// FP4 Sq +/* + These trace offsets are for Fp4 square function (super::native::fp4_square). It needs 12 rows. + FP4_SQ_SELECTOR_OFFSET -> Selector to ensure that the input is same across all rows. Set 1 in all rows except last one. + T0 -> a*a + T1 -> b*b + T2 -> mul_by_nonresidue(T1) + X -> T2 + T0 + T3 -> a+b + T4 -> T3*T3 + T5 -> T4 - T0 + Y -> T5 - T1 + FP4_SQ_X_CALC_OFFSET, FP4_SQ_T3_CALC_OFFSET -> offset including 3 operations (fp2 addition, reduction of both real and imaginary parts of the result, range check of both real and imaginary parts of the result). + FP4_SQ_T5_CALC_OFFSET, FP4_SQ_Y_CALC_OFFSET -> offset including 4 operations (fp2 addition (adding bls12-381 field prime to mitigate overflow), fp2 subtraction, reduction of both real and imaginary parts of the result, range check of both real and imaginary parts of the result). +*/ +pub const FP4_SQ_SELECTOR_OFFSET: usize = 0; +pub const FP4_SQ_INPUT_X_OFFSET: usize = FP4_SQ_SELECTOR_OFFSET + 1; +pub const FP4_SQ_INPUT_Y_OFFSET: usize = FP4_SQ_INPUT_X_OFFSET + 24; +pub const FP4_SQ_T0_CALC_OFFSET: usize = FP4_SQ_INPUT_Y_OFFSET + 24; +pub const FP4_SQ_T1_CALC_OFFSET: usize = FP4_SQ_T0_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const FP4_SQ_T2_CALC_OFFSET: usize = FP4_SQ_T1_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const FP4_SQ_X_CALC_OFFSET: usize = FP4_SQ_T2_CALC_OFFSET + FP2_NON_RESIDUE_MUL_TOTAL; +pub const FP4_SQ_T3_CALC_OFFSET: usize = + FP4_SQ_X_CALC_OFFSET + FP2_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP4_SQ_T4_CALC_OFFSET: usize = + FP4_SQ_T3_CALC_OFFSET + FP2_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP4_SQ_T5_CALC_OFFSET: usize = FP4_SQ_T4_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const FP4_SQ_Y_CALC_OFFSET: usize = FP4_SQ_T5_CALC_OFFSET + + FP2_SUBTRACTION_TOTAL + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP4_SQ_TOTAL_COLUMNS: usize = FP4_SQ_Y_CALC_OFFSET + + FP2_SUBTRACTION_TOTAL + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; + +// Forbenius map Fp2 +/* + These trace offsets are for fp2 forbenius map (super::native::Fp2::forbenius_map). It needs 12 rows. + FP2_FORBENIUS_MAP_SELECTOR_OFFSET -> Selector to ensure that the input is same across all rows. Set 1 in all rows except last one. + FP2_FORBENIUS_MAP_POW_OFFSET -> offset where power is set. + FP2_FORBENIUS_MAP_DIV_OFFSET -> offset of integer division power/2. + FP2_FORBENIUS_MAP_REM_OFFSET -> offset of power%2. + T0 -> x1 * forbenius_constant + FP2_FORBENIUS_MAP_T0_CALC_OFFSET -> offset including 3 operations (multiplication, reduction of the result, range check of the result). + FP2_FORBENIUS_MAP_MUL_RES_ROW -> Selector indicating which row contains result of the multiplication. Set 1 on the 11th row. +*/ +pub const FP2_FORBENIUS_MAP_SELECTOR_OFFSET: usize = 0; +pub const FP2_FORBENIUS_MAP_INPUT_OFFSET: usize = FP2_FORBENIUS_MAP_SELECTOR_OFFSET + 1; +pub const FP2_FORBENIUS_MAP_POW_OFFSET: usize = FP2_FORBENIUS_MAP_INPUT_OFFSET + 24; +pub const FP2_FORBENIUS_MAP_DIV_OFFSET: usize = FP2_FORBENIUS_MAP_POW_OFFSET + 1; +pub const FP2_FORBENIUS_MAP_REM_OFFSET: usize = FP2_FORBENIUS_MAP_DIV_OFFSET + 1; +pub const FP2_FORBENIUS_MAP_T0_CALC_OFFSET: usize = FP2_FORBENIUS_MAP_REM_OFFSET + 1; +pub const FP2_FORBENIUS_MAP_MUL_RES_ROW: usize = FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL + + RANGE_CHECK_TOTAL; +pub const FP2_FORBENIUS_MAP_TOTAL_COLUMNS: usize = FP2_FORBENIUS_MAP_MUL_RES_ROW + 1; + +/// Fills the stark trace of fp2 addition. Inputs are 12*2 limbs each. Needs 1 row. +pub fn fill_trace_addition_fp2, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[[u32; 12]; 2], + y: &[[u32; 12]; 2], + row: usize, + start_col: usize, +) { + fill_trace_addition_fp(trace, &x[0], &y[0], row, start_col + FP2_ADDITION_0_OFFSET); + fill_trace_addition_fp(trace, &x[1], &y[1], row, start_col + FP2_ADDITION_1_OFFSET); +} + +/// Fills the stark trace of fp2 subtraction. Inputs are 12*2 limbs each. Needs 1 row. Assume x > y. +pub fn fill_trace_subtraction_fp2, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[[u32; 12]; 2], + y: &[[u32; 12]; 2], + row: usize, + start_col: usize, +) { + fill_trace_subtraction_fp( + trace, + &x[0], + &y[0], + row, + start_col + FP2_SUBTRACTION_0_OFFSET, + ); + fill_trace_subtraction_fp( + trace, + &x[1], + &y[1], + row, + start_col + FP2_SUBTRACTION_1_OFFSET, + ); +} + +/// Fills the stark trace of multiplication following long multiplication. Inputs are 12\*2 limbs and 1\*2 limbs respectively. Needs 1 row. +pub fn fill_trace_multiply_single_fp2< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &[[u32; 12]; 2], + y: &[u32; 2], + row: usize, + start_col: usize, +) { + fill_trace_multiply_single_fp( + trace, + &x[0], + y[0], + row, + start_col + FP2_SUBTRACTION_0_OFFSET, + ); + fill_trace_multiply_single_fp( + trace, + &x[1], + y[1], + row, + start_col + FP2_SUBTRACTION_1_OFFSET, + ); +} + +/// Fills the stark trace of negation. Input is 12*2 limbs. Needs 1 row. In essence, it fills an addition trace with inputs as `x` and `-x`. +pub fn fill_trace_negate_fp2, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[[u32; 12]; 2], + row: usize, + start_col: usize, +) { + let minus_x: [[u32; 12]; 2] = (-Fp2([Fp(x[0].to_owned()), Fp(x[1].to_owned())])) + .0 + .iter() + .map(|x| x.0) + .collect::>() + .try_into() + .unwrap(); + fill_trace_addition_fp2(trace, x, &minus_x, row, start_col); +} + +/// Fills stark trace for fp2 multiplication. Inputs are 12*2 limbs each. Needs 12 rows. Sets addition and subtraction selectors to 1 only in 11th row, becuase that's where multiplication result is set. +pub fn generate_trace_fp2_mul, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: [[u32; 12]; 2], + y: [[u32; 12]; 2], + start_row: usize, + end_row: usize, + start_col: usize, +) { + let modulus = modulus(); + + for i in start_row..end_row + 1 { + trace[i][start_col + FP2_FP2_SELECTOR_OFFSET] = F::ONE; + assign_u32_in_series(trace, i, start_col + FP2_FP2_X_INPUT_OFFSET, &x[0]); + assign_u32_in_series(trace, i, start_col + FP2_FP2_X_INPUT_OFFSET + 12, &x[1]); + assign_u32_in_series(trace, i, start_col + FP2_FP2_Y_INPUT_OFFSET, &y[0]); + assign_u32_in_series(trace, i, start_col + FP2_FP2_Y_INPUT_OFFSET + 12, &y[1]); + } + trace[end_row][start_col + FP2_FP2_SELECTOR_OFFSET] = F::ZERO; + // filling trace for X0*Y0 - X1*Y1 + fill_multiplication_trace_no_mod_reduction( + trace, + &x[0], + &y[0], + start_row, + end_row, + start_col + X_0_Y_0_MULTIPLICATION_OFFSET, + ); + fill_multiplication_trace_no_mod_reduction( + trace, + &x[1], + &y[1], + start_row, + end_row, + start_col + X_1_Y_1_MULTIPLICATION_OFFSET, + ); + + let x0y0 = + get_u32_vec_from_literal_24(BigUint::new(x[0].to_vec()) * BigUint::new(y[0].to_vec())); + let modulus_sq = get_u32_vec_from_literal_24(modulus.clone() * modulus.clone()); + fill_addition_trace( + trace, + &x0y0, + &modulus_sq, + start_row + 11, + start_col + Z1_ADD_MODULUS_OFFSET, + ); + + let x0y0_add_modsq = + get_u32_vec_from_literal_24(BigUint::new(x0y0.to_vec()) + modulus.clone() * modulus); + let x1y1 = + get_u32_vec_from_literal_24(BigUint::new(x[1].to_vec()) * BigUint::new(y[1].to_vec())); + fill_subtraction_trace( + trace, + &x0y0_add_modsq, + &x1y1, + start_row + 11, + start_col + Z1_SUBTRACTION_OFFSET, + ); + + let x0y0_x1y1 = get_u32_vec_from_literal_24( + BigUint::new(x0y0_add_modsq.to_vec()) - BigUint::new(x1y1.to_vec()), + ); + let rem = fill_reduction_trace( + trace, + &x0y0_x1y1, + start_row, + end_row, + start_col + Z1_REDUCE_OFFSET, + ); + fill_range_check_trace(trace, &rem, start_row, start_col + Z1_RANGECHECK_OFFSET); + + // filling trace for X0*Y1 + X1*Y0 + fill_multiplication_trace_no_mod_reduction( + trace, + &x[0], + &y[1], + start_row, + end_row, + start_col + X_0_Y_1_MULTIPLICATION_OFFSET, + ); + fill_multiplication_trace_no_mod_reduction( + trace, + &x[1], + &y[0], + start_row, + end_row, + start_col + X_1_Y_0_MULTIPLICATION_OFFSET, + ); + + let x0y1 = + get_u32_vec_from_literal_24(BigUint::new(x[0].to_vec()) * BigUint::new(y[1].to_vec())); + let x1y0 = + get_u32_vec_from_literal_24(BigUint::new(x[1].to_vec()) * BigUint::new(y[0].to_vec())); + fill_addition_trace( + trace, + &x0y1, + &x1y0, + start_row + 11, + start_col + Z2_ADDITION_OFFSET, + ); + + let x0y1_x1y0 = + get_u32_vec_from_literal_24(BigUint::new(x0y1.to_vec()) + BigUint::new(x1y0.to_vec())); + let rem = fill_reduction_trace( + trace, + &x0y1_x1y0, + start_row, + end_row, + start_col + Z2_REDUCE_OFFSET, + ); + fill_range_check_trace(trace, &rem, start_row, start_col + Z2_RANGECHECK_OFFSET); +} + +/// Fill trace of fp2 fp multiplication. Inputs are 12*2 limbs and 12 limbs respectively. Needs 12 rows. +pub fn fill_trace_fp2_fp_mul, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[[u32; 12]; 2], + y: &[u32; 12], + start_row: usize, + end_row: usize, + start_col: usize, +) { + for i in start_row..end_row + 1 { + trace[i][start_col + FP2_FP_MUL_SELECTOR_OFFSET] = F::ONE; + assign_u32_in_series(trace, i, start_col + FP2_FP_X_INPUT_OFFSET, &x[0]); + assign_u32_in_series(trace, i, start_col + FP2_FP_X_INPUT_OFFSET + 12, &x[1]); + assign_u32_in_series(trace, i, start_col + FP2_FP_Y_INPUT_OFFSET, y); + } + trace[end_row][start_col + FP2_FP_MUL_SELECTOR_OFFSET] = F::ZERO; + fill_multiplication_trace_no_mod_reduction( + trace, + &x[0], + y, + start_row, + end_row, + start_col + X0_Y_MULTIPLICATION_OFFSET, + ); + let x0y = get_u32_vec_from_literal_24(BigUint::new(x[0].to_vec()) * BigUint::new(y.to_vec())); + let rem = fill_reduction_trace( + trace, + &x0y, + start_row, + end_row, + start_col + X0_Y_REDUCE_OFFSET, + ); + fill_range_check_trace(trace, &rem, start_row, start_col + X0_Y_RANGECHECK_OFFSET); + fill_multiplication_trace_no_mod_reduction( + trace, + &x[1], + y, + start_row, + end_row, + start_col + X1_Y_MULTIPLICATION_OFFSET, + ); + let x1y = get_u32_vec_from_literal_24(BigUint::new(x[1].to_vec()) * BigUint::new(y.to_vec())); + let rem = fill_reduction_trace( + trace, + &x1y, + start_row, + end_row, + start_col + X1_Y_REDUCE_OFFSET, + ); + fill_range_check_trace(trace, &rem, start_row, start_col + X1_Y_RANGECHECK_OFFSET); +} + +/// Fills trace of fp2 subtraction combined with reduction and range check. Inputs are 12*2 limbs each. Needs 1 row. Fills trace of adding field prime p to x first, and then the trace for subtraction with y. +pub fn fill_trace_subtraction_with_reduction< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &[[u32; 12]; 2], + y: &[[u32; 12]; 2], + row: usize, + start_col: usize, +) { + let modulus = get_u32_vec_from_literal(modulus()); + fill_trace_addition_fp2(trace, x, &[modulus, modulus], row, start_col); + let x0_modulus = + get_u32_vec_from_literal(BigUint::new(x[0].to_vec()) + BigUint::new(modulus.to_vec())); + let x1_modulus = + get_u32_vec_from_literal(BigUint::new(x[1].to_vec()) + BigUint::new(modulus.to_vec())); + fill_trace_subtraction_fp2( + trace, + &[x0_modulus, x1_modulus], + y, + row, + start_col + FP2_ADDITION_TOTAL, + ); + let x0_y0 = + get_u32_vec_from_literal(BigUint::new(x0_modulus.to_vec()) - BigUint::new(y[0].to_vec())); + let x1_y1 = + get_u32_vec_from_literal(BigUint::new(x1_modulus.to_vec()) - BigUint::new(y[1].to_vec())); + let rem = fill_trace_reduce_single( + trace, + &x0_y0, + row, + start_col + FP2_ADDITION_TOTAL + FP2_SUBTRACTION_TOTAL, + ); + fill_range_check_trace( + trace, + &rem, + row, + start_col + FP2_ADDITION_TOTAL + FP2_SUBTRACTION_TOTAL + FP_SINGLE_REDUCE_TOTAL, + ); + let rem = fill_trace_reduce_single( + trace, + &x1_y1, + row, + start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL, + ); + fill_range_check_trace( + trace, + &rem, + row, + start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL * 2 + + RANGE_CHECK_TOTAL, + ); +} + +/// Fills trace of [multiply_by_b](super::native::Fp2::multiply_by_B) function. Input is 12*2 limbs. Needs 12 rows. Sets addition and subtraction selectors to 1 only in 11th row, becuase that's where multiplication result is set. +pub fn fill_multiply_by_b_trace, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[[u32; 12]; 2], + start_row: usize, + end_row: usize, + start_col: usize, +) { + for i in start_row..end_row + 1 { + trace[i][start_col + MULTIPLY_B_SELECTOR_OFFSET] = F::ONE; + assign_u32_in_series(trace, i, start_col + MULTIPLY_B_X_OFFSET, &x[0]); + assign_u32_in_series(trace, i, start_col + MULTIPLY_B_X_OFFSET + 12, &x[1]); + } + trace[end_row][start_col + MULTIPLY_B_SELECTOR_OFFSET] = F::ZERO; + let y = Fp::get_fp_from_biguint(BigUint::from(4 as u32)).0; + fill_multiplication_trace_no_mod_reduction( + trace, + &x[0], + &y, + start_row, + end_row, + start_col + MULTIPLY_B_X0_B_MUL_OFFSET, + ); + fill_multiplication_trace_no_mod_reduction( + trace, + &x[1], + &y, + start_row, + end_row, + start_col + MULTIPLY_B_X1_B_MUL_OFFSET, + ); + let x0y = get_u32_vec_from_literal_24(BigUint::new(x[0].to_vec()) * BigUint::new(y.to_vec())); + let x1y = get_u32_vec_from_literal_24(BigUint::new(x[1].to_vec()) * BigUint::new(y.to_vec())); + let modulus = modulus(); + let modulus_sq = get_u32_vec_from_literal_24(modulus.clone() * modulus.clone()); + fill_addition_trace( + trace, + &x0y, + &modulus_sq, + start_row + 11, + start_col + MULTIPLY_B_ADD_MODSQ_OFFSET, + ); + let x0y_add_modsq = + get_u32_vec_from_literal_24(BigUint::new(x0y.to_vec()) + BigUint::new(modulus_sq.to_vec())); + fill_subtraction_trace( + trace, + &x0y_add_modsq, + &x1y, + start_row + 11, + start_col + MULTIPLY_B_SUB_OFFSET, + ); + let x0y_x1y = get_u32_vec_from_literal_24( + BigUint::new(x0y_add_modsq.to_vec()) - BigUint::new(x1y.to_vec()), + ); + let rem = fill_reduction_trace( + trace, + &x0y_x1y, + start_row, + end_row, + start_col + MULTIPLY_B_Z0_REDUCE_OFFSET, + ); + fill_range_check_trace( + trace, + &rem, + start_row, + start_col + MULTIPLY_B_Z0_RANGECHECK_OFFSET, + ); + + fill_addition_trace( + trace, + &x0y, + &x1y, + start_row + 11, + start_col + MULTIPLY_B_ADD_OFFSET, + ); + let x0y_x1y = + get_u32_vec_from_literal_24(BigUint::new(x0y.to_vec()) + BigUint::new(x1y.to_vec())); + let rem = fill_reduction_trace( + trace, + &x0y_x1y, + start_row, + end_row, + start_col + MULTIPLY_B_Z1_REDUCE_OFFSET, + ); + fill_range_check_trace( + trace, + &rem, + start_row, + start_col + MULTIPLY_B_Z1_RANGECHECK_OFFSET, + ); +} + +/// Fills trace of fp2 addition combined with reduction and range check. Inputs are 12*2 limbs each. Needs 1 row. +pub fn fill_trace_addition_with_reduction< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &[[u32; 12]; 2], + y: &[[u32; 12]; 2], + row: usize, + start_col: usize, +) { + fill_trace_addition_fp2(trace, x, y, row, start_col); + let x0_y0 = get_u32_vec_from_literal(BigUint::new(x[0].to_vec()) + BigUint::new(y[0].to_vec())); + let x1_y1 = get_u32_vec_from_literal(BigUint::new(x[1].to_vec()) + BigUint::new(y[1].to_vec())); + let rem = fill_trace_reduce_single(trace, &x0_y0, row, start_col + FP2_ADDITION_TOTAL); + fill_range_check_trace( + trace, + &rem, + row, + start_col + FP2_ADDITION_TOTAL + FP_SINGLE_REDUCE_TOTAL, + ); + let rem = fill_trace_reduce_single( + trace, + &x1_y1, + row, + start_col + FP2_ADDITION_TOTAL + FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL, + ); + fill_range_check_trace( + trace, + &rem, + row, + start_col + FP2_ADDITION_TOTAL + FP_SINGLE_REDUCE_TOTAL * 2 + RANGE_CHECK_TOTAL, + ); +} + +/// Fills trace of [mul_by_nonresidue](super::native::Fp2::mul_by_nonresidue) function. Input is 12*2 limbs. Needs 1 row. +pub fn fill_trace_non_residue_multiplication< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &[[u32; 12]; 2], + row: usize, + start_col: usize, +) { + trace[row][start_col + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] = F::ONE; + assign_u32_in_series( + trace, + row, + start_col + FP2_NON_RESIDUE_MUL_INPUT_OFFSET, + &x.concat(), + ); + fill_trace_addition_fp( + trace, + &x[0], + &get_u32_vec_from_literal(modulus()), + row, + start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET, + ); + let add_modulus = get_u32_vec_from_literal(BigUint::new(x[0].to_vec()) + modulus()); + fill_trace_subtraction_fp( + trace, + &add_modulus, + &x[1], + row, + start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + FP_ADDITION_TOTAL, + ); + let c0_c1_sub = + get_u32_vec_from_literal(BigUint::new(add_modulus.to_vec()) - BigUint::new(x[1].to_vec())); + let rem = fill_trace_reduce_single( + trace, + &c0_c1_sub, + row, + start_col + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET, + ); + fill_range_check_trace( + trace, + &rem, + row, + start_col + FP2_NON_RESIDUE_MUL_Z0_RANGECHECK_OFFSET, + ); + fill_trace_addition_fp( + trace, + &x[0], + &x[1], + row, + start_col + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET, + ); + let c0_c1_add = + get_u32_vec_from_literal(BigUint::new(x[0].to_vec()) + BigUint::new(x[1].to_vec())); + let rem = fill_trace_reduce_single( + trace, + &c0_c1_add, + row, + start_col + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET, + ); + fill_range_check_trace( + trace, + &rem, + row, + start_col + FP2_NON_RESIDUE_MUL_Z1_RANGECHECK_OFFSET, + ); +} + +/// Fills trace of [fp4_sqaure](super::native::fp4_square) function. Inputs are 12*2 limbs each. Needs 12 rows. +pub fn fill_trace_fp4_sq, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp2, + y: &Fp2, + start_row: usize, + end_row: usize, + start_col: usize, +) { + for row in start_row..end_row + 1 { + assign_u32_in_series( + trace, + row, + start_col + FP4_SQ_INPUT_X_OFFSET, + &x.get_u32_slice().concat(), + ); + assign_u32_in_series( + trace, + row, + start_col + FP4_SQ_INPUT_Y_OFFSET, + &y.get_u32_slice().concat(), + ); + trace[row][start_col + FP4_SQ_SELECTOR_OFFSET] = F::ONE; + } + trace[end_row][start_col + FP4_SQ_SELECTOR_OFFSET] = F::ZERO; + + let t0 = (*x) * (*x); + generate_trace_fp2_mul( + trace, + x.get_u32_slice(), + x.get_u32_slice(), + start_row, + end_row, + start_col + FP4_SQ_T0_CALC_OFFSET, + ); + + let t1 = (*y) * (*y); + generate_trace_fp2_mul( + trace, + y.get_u32_slice(), + y.get_u32_slice(), + start_row, + end_row, + start_col + FP4_SQ_T1_CALC_OFFSET, + ); + + let t2 = t1.mul_by_nonresidue(); + for row in start_row..end_row + 1 { + fill_trace_non_residue_multiplication( + trace, + &t1.get_u32_slice(), + row, + start_col + FP4_SQ_T2_CALC_OFFSET, + ); + } + + let _x = t2 + t0; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t2.get_u32_slice(), + &t0.get_u32_slice(), + row, + start_col + FP4_SQ_X_CALC_OFFSET, + ); + } + + let t3 = (*x) + (*y); + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &x.get_u32_slice(), + &y.get_u32_slice(), + row, + start_col + FP4_SQ_T3_CALC_OFFSET, + ); + } + + let t4 = t3 * t3; + generate_trace_fp2_mul( + trace, + t3.get_u32_slice(), + t3.get_u32_slice(), + start_row, + end_row, + start_col + FP4_SQ_T4_CALC_OFFSET, + ); + + let t5 = t4 - t0; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t4.get_u32_slice(), + &t0.get_u32_slice(), + row, + start_col + FP4_SQ_T5_CALC_OFFSET, + ); + } + + let _y = t5 - t1; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t5.get_u32_slice(), + &t1.get_u32_slice(), + row, + start_col + FP4_SQ_Y_CALC_OFFSET, + ); + } +} + +/// Fills trace of [forbenius_map](super::native::Fp2::forbenius_map) function. Input is 12*2 limbs and usize. Needs 12 rows. +pub fn fill_trace_fp2_forbenius_map< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &Fp2, + pow: usize, + start_row: usize, + end_row: usize, + start_col: usize, +) { + let div = pow / 2; + let rem = pow % 2; + for row in start_row..end_row + 1 { + assign_u32_in_series( + trace, + row, + start_col + FP2_FORBENIUS_MAP_INPUT_OFFSET, + &x.get_u32_slice().concat(), + ); + trace[row][start_col + FP2_FORBENIUS_MAP_SELECTOR_OFFSET] = F::ONE; + trace[row][start_col + FP2_FORBENIUS_MAP_POW_OFFSET] = F::from_canonical_usize(pow); + trace[row][start_col + FP2_FORBENIUS_MAP_DIV_OFFSET] = F::from_canonical_usize(div); + trace[row][start_col + FP2_FORBENIUS_MAP_REM_OFFSET] = F::from_canonical_usize(rem); + } + trace[end_row][start_col + FP2_FORBENIUS_MAP_SELECTOR_OFFSET] = F::ZERO; + let forbenius_coefficients = Fp2::forbenius_coefficients(); + fill_multiplication_trace_no_mod_reduction( + trace, + &x.0[1].0, + &forbenius_coefficients[rem].0, + start_row, + end_row, + start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET, + ); + trace[start_row + 11][start_col + FP2_FORBENIUS_MAP_MUL_RES_ROW] = F::ONE; + let x_y = + get_u32_vec_from_literal_24(x.0[1].to_biguint() * forbenius_coefficients[rem].to_biguint()); + let res = fill_reduction_trace( + trace, + &x_y, + start_row, + end_row, + start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + FP_MULTIPLICATION_TOTAL_COLUMNS, + ); + for row in start_row..end_row + 1 { + fill_range_check_trace( + trace, + &res, + row, + start_col + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL, + ); + } + let res = Fp2([x.0[0], Fp(res)]); + assert_eq!(res, x.forbenius_map(pow)); +} + +/// Constraints fp2 addition. In essence, constraints two Fp addititons. +pub fn add_addition_fp2_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + add_addition_fp_constraints( + local_values, + yield_constr, + start_col + FP2_ADDITION_0_OFFSET, + bit_selector, + ); + add_addition_fp_constraints( + local_values, + yield_constr, + start_col + FP2_ADDITION_1_OFFSET, + bit_selector, + ); +} + +pub fn add_addition_fp2_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + add_addition_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_ADDITION_0_OFFSET, + bit_selector, + ); + add_addition_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_ADDITION_1_OFFSET, + bit_selector, + ); +} + +/// Constraints fp2 subtraction. In essence, constraints two Fp subtractions. +pub fn add_subtraction_fp2_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + add_subtraction_fp_constraints( + local_values, + yield_constr, + start_col + FP2_SUBTRACTION_0_OFFSET, + bit_selector, + ); + add_subtraction_fp_constraints( + local_values, + yield_constr, + start_col + FP2_SUBTRACTION_1_OFFSET, + bit_selector, + ); +} + +pub fn add_subtraction_fp2_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + add_subtraction_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_SUBTRACTION_0_OFFSET, + bit_selector, + ); + add_subtraction_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_SUBTRACTION_1_OFFSET, + bit_selector, + ); +} + +/// Constraints fp2 multiply by single. In essence, constraints two Fp multiply by single. +pub fn add_fp2_single_multiply_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + add_fp_single_multiply_constraints( + local_values, + yield_constr, + start_col + FP2_MULTIPLY_SINGLE_0_OFFSET, + bit_selector, + ); + add_fp_single_multiply_constraints( + local_values, + yield_constr, + start_col + FP2_MULTIPLY_SINGLE_1_OFFSET, + bit_selector, + ); +} + +pub fn add_fp2_single_multiply_constraints_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + add_fp_single_multiply_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_MULTIPLY_SINGLE_0_OFFSET, + bit_selector, + ); + add_fp_single_multiply_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_MULTIPLY_SINGLE_1_OFFSET, + bit_selector, + ); +} + +/// Constraints fp2 negation. First add constraints for fp2 addition. Followed by constraining the result of the addition with bls12-381 field prime p. +pub fn add_negate_fp2_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + add_addition_fp2_constraints(local_values, yield_constr, start_col, bit_selector); + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + let mod_u32 = get_u32_vec_from_literal(modulus()); + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_SUM_OFFSET + i] + - FE::from_canonical_u32(mod_u32[i])), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_SUM_OFFSET + i] + - FE::from_canonical_u32(mod_u32[i])), + ); + } +} + +pub fn add_negate_fp2_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + add_addition_fp2_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col, + bit_selector, + ); + let mod_u32 = get_u32_vec_from_literal(modulus()); + for i in 0..12 { + let lc = builder.constant_extension(F::Extension::from_canonical_u32(mod_u32[i])); + + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_SUM_OFFSET + i], + lc, + ); + + let c1 = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c1); + + let mul_tmp2 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_CHECK_OFFSET], + ); + let sub_tmp2 = builder.sub_extension( + local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_SUM_OFFSET + i], + lc, + ); + + let c2 = builder.mul_extension(mul_tmp2, sub_tmp2); + yield_constr.constraint(builder, c2); + } +} + +/// Constraints fp2 multiplication. +/// +/// Constraints inputs across this and next row, wherever selector is set to on. Constraints x0\*y0, x1\*y1, x0\*y1, x1\*y0 multiplication operations. Then constraints the x0\*y0 + p^2 operation, followed by x0\*y0 + p^2 - x1\*y1 operation. Constraints the reduction of result of the previous subtraction, followed by a range check operation. Constraints x0\*y1 + x1\*y0. Constraints the reduction of result of the previous addition, followed by a range check operation. +pub fn add_fp2_mul_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + // for i in 0..12 { + // yield_constr.constraint_transition(local_values[start_col + X_0_Y_0_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i]) + // } + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FP2_X_INPUT_OFFSET + i] + - next_values[start_col + FP2_FP2_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FP2_Y_INPUT_OFFSET + i] + - next_values[start_col + FP2_FP2_Y_INPUT_OFFSET + i]), + ); + } + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + X_0_Y_0_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i] + - local_values[start_col + FP2_FP2_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + X_0_Y_0_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i] + - local_values[start_col + FP2_FP2_Y_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + X_0_Y_1_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i] + - local_values[start_col + FP2_FP2_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + X_0_Y_1_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i] + - local_values[start_col + FP2_FP2_Y_INPUT_OFFSET + i + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + X_1_Y_0_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i] + - local_values[start_col + FP2_FP2_X_INPUT_OFFSET + i + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + X_1_Y_0_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i] + - local_values[start_col + FP2_FP2_Y_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + X_1_Y_1_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i] + - local_values[start_col + FP2_FP2_X_INPUT_OFFSET + i + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + X_1_Y_1_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i] + - local_values[start_col + FP2_FP2_Y_INPUT_OFFSET + i + 12]), + ); + } + + // constrain X_0*Y_0 + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + X_0_Y_0_MULTIPLICATION_OFFSET, + bit_selector, + ); + + // constrain X_1*Y_1 + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + X_1_Y_1_MULTIPLICATION_OFFSET, + bit_selector, + ); + + // constrain X0*Y0 with X0*Y0 + modulus^2 + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + Z1_ADD_MODULUS_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + Z1_ADD_MODULUS_OFFSET + ADDITION_X_OFFSET + i] + - local_values[start_col + X_0_Y_0_MULTIPLICATION_OFFSET + SUM_OFFSET + i]), + ); + } + + // constrain modulus^2 with X0*Y0 + modulus^2 + let modulus = modulus(); + let modulus_sq_u32 = get_u32_vec_from_literal_24(modulus.clone() * modulus); + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + Z1_ADD_MODULUS_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + Z1_ADD_MODULUS_OFFSET + ADDITION_Y_OFFSET + i] + - FE::from_canonical_u32(modulus_sq_u32[i])), + ); + } + + // constrain X0*Y0 + modulus^2 + add_addition_constraints( + local_values, + yield_constr, + start_col + Z1_ADD_MODULUS_OFFSET, + bit_selector, + ); + + // constrain X0*Y0 + modulus^2 with X0*Y0 + modulus^2 - X1Y1 + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + Z1_SUBTRACTION_OFFSET + SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + Z1_SUBTRACTION_OFFSET + SUBTRACTION_X_OFFSET + i] + - local_values[start_col + Z1_ADD_MODULUS_OFFSET + ADDITION_SUM_OFFSET + i]), + ); + } + + // constrain X1*Y1 + modulus^2 with X0*Y0 + modulus^2 - X1Y1 + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + Z1_SUBTRACTION_OFFSET + SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + Z1_SUBTRACTION_OFFSET + SUBTRACTION_Y_OFFSET + i] + - local_values[start_col + X_1_Y_1_MULTIPLICATION_OFFSET + SUM_OFFSET + i]), + ); + } + + // constrain X0*Y0 + modulus^2 - X1Y1 + add_subtraction_constraints( + local_values, + yield_constr, + start_col + Z1_SUBTRACTION_OFFSET, + bit_selector, + ); + + // constrain X0*Y0 + modulus^2 - X1Y1 with reduction + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + Z1_SUBTRACTION_OFFSET + SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + Z1_SUBTRACTION_OFFSET + SUBTRACTION_DIFF_OFFSET + i] + - local_values[start_col + Z1_REDUCE_OFFSET + REDUCE_X_OFFSET + i]), + ); + } + + // constrain reduction + add_reduce_constraints( + local_values, + next_values, + yield_constr, + start_col + Z1_REDUCE_OFFSET, + start_col + FP2_FP2_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + Z1_RANGECHECK_OFFSET, + bit_selector, + ); + + // constrain X_1*Y_0 + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + X_0_Y_1_MULTIPLICATION_OFFSET, + bit_selector, + ); + + // constrain X_1*Y_0 + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + X_1_Y_0_MULTIPLICATION_OFFSET, + bit_selector, + ); + + // constrain X0*Y1 with X0*Y1 + X1*Y0 + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + Z2_ADDITION_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + Z2_ADDITION_OFFSET + ADDITION_X_OFFSET + i] + - local_values[start_col + X_0_Y_1_MULTIPLICATION_OFFSET + SUM_OFFSET + i]), + ); + } + + // constrain X1*Y0 with X0*Y1 + X1*Y0 + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + Z2_ADDITION_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + Z2_ADDITION_OFFSET + ADDITION_Y_OFFSET + i] + - local_values[start_col + X_1_Y_0_MULTIPLICATION_OFFSET + SUM_OFFSET + i]), + ); + } + + // constrain X0*Y1 + X1*Y0 + add_addition_constraints( + local_values, + yield_constr, + start_col + Z2_ADDITION_OFFSET, + bit_selector, + ); + + // constrain X0*Y1 + X1*Y0 with reduction + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + Z2_ADDITION_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + Z2_ADDITION_OFFSET + ADDITION_SUM_OFFSET + i] + - local_values[start_col + Z2_REDUCE_OFFSET + REDUCE_X_OFFSET + i]), + ); + } + + // constrain reduction + add_reduce_constraints( + local_values, + next_values, + yield_constr, + start_col + Z2_REDUCE_OFFSET, + start_col + FP2_FP2_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + Z2_RANGECHECK_OFFSET, + bit_selector, + ); +} + +pub fn add_fp2_mul_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + // let constant = builder.constant_extension(F::Extension::from_canonical_u64(1<<32)); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_FP2_SELECTOR_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP2_FP2_X_INPUT_OFFSET + i], + next_values[start_col + FP2_FP2_X_INPUT_OFFSET + i], + ); + + let c1 = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c1); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + FP2_FP2_Y_INPUT_OFFSET + i], + next_values[start_col + FP2_FP2_Y_INPUT_OFFSET + i], + ); + + let c2 = builder.mul_extension(mul_tmp1, sub_tmp2); + yield_constr.constraint_transition(builder, c2); + } + + for i in 0..12 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_FP2_SELECTOR_OFFSET], + ); + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + X_0_Y_0_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i], + local_values[start_col + FP2_FP2_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c1); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + X_0_Y_0_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i], + local_values[start_col + FP2_FP2_Y_INPUT_OFFSET + i], + ); + let c2 = builder.mul_extension(mul_tmp1, sub_tmp2); + yield_constr.constraint(builder, c2); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + X_0_Y_1_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i], + local_values[start_col + FP2_FP2_X_INPUT_OFFSET + i], + ); + let c3 = builder.mul_extension(mul_tmp1, sub_tmp3); + yield_constr.constraint(builder, c3); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + X_0_Y_1_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i], + local_values[start_col + FP2_FP2_Y_INPUT_OFFSET + i + 12], + ); + let c4 = builder.mul_extension(mul_tmp1, sub_tmp4); + yield_constr.constraint(builder, c4); + + let sub_tmp5 = builder.sub_extension( + local_values[start_col + X_1_Y_0_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i], + local_values[start_col + FP2_FP2_X_INPUT_OFFSET + i + 12], + ); + let c5 = builder.mul_extension(mul_tmp1, sub_tmp5); + yield_constr.constraint(builder, c5); + + let sub_tmp6 = builder.sub_extension( + local_values[start_col + X_1_Y_0_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i], + local_values[start_col + FP2_FP2_Y_INPUT_OFFSET + i], + ); + let c6 = builder.mul_extension(mul_tmp1, sub_tmp6); + yield_constr.constraint(builder, c6); + + let sub_tmp7 = builder.sub_extension( + local_values[start_col + X_1_Y_1_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i], + local_values[start_col + FP2_FP2_X_INPUT_OFFSET + i + 12], + ); + let c7 = builder.mul_extension(mul_tmp1, sub_tmp7); + yield_constr.constraint(builder, c7); + + let sub_tmp8 = builder.sub_extension( + local_values[start_col + X_1_Y_1_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i], + local_values[start_col + FP2_FP2_Y_INPUT_OFFSET + i + 12], + ); + let c8 = builder.mul_extension(mul_tmp1, sub_tmp8); + yield_constr.constraint(builder, c8); + } + + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + X_0_Y_0_MULTIPLICATION_OFFSET, + bit_selector, + ); + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + X_1_Y_1_MULTIPLICATION_OFFSET, + bit_selector, + ); + + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + Z1_ADD_MODULUS_OFFSET + ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + Z1_ADD_MODULUS_OFFSET + ADDITION_X_OFFSET + i], + local_values[start_col + X_0_Y_0_MULTIPLICATION_OFFSET + SUM_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + let modulus = modulus(); + let modulus_sq_u32 = get_u32_vec_from_literal_24(modulus.clone() * modulus); + for i in 0..24 { + let lc = builder.constant_extension(F::Extension::from_canonical_u32(modulus_sq_u32[i])); + + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + Z1_ADD_MODULUS_OFFSET + ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + Z1_ADD_MODULUS_OFFSET + ADDITION_Y_OFFSET + i], + lc, + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + add_addition_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + Z1_ADD_MODULUS_OFFSET, + bit_selector, + ); + + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + Z1_SUBTRACTION_OFFSET + SUBTRACTION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + Z1_SUBTRACTION_OFFSET + SUBTRACTION_X_OFFSET + i], + local_values[start_col + Z1_ADD_MODULUS_OFFSET + ADDITION_SUM_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + Z1_SUBTRACTION_OFFSET + SUBTRACTION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + Z1_SUBTRACTION_OFFSET + SUBTRACTION_Y_OFFSET + i], + local_values[start_col + X_1_Y_1_MULTIPLICATION_OFFSET + SUM_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + add_subtraction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + Z1_SUBTRACTION_OFFSET, + bit_selector, + ); + + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + Z1_SUBTRACTION_OFFSET + SUBTRACTION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + Z1_SUBTRACTION_OFFSET + SUBTRACTION_DIFF_OFFSET + i], + local_values[start_col + Z1_REDUCE_OFFSET + REDUCE_X_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + add_reduce_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + Z1_REDUCE_OFFSET, + start_col + FP2_FP2_SELECTOR_OFFSET, + bit_selector, + ); + + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + Z1_RANGECHECK_OFFSET, + bit_selector, + ); + + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + X_0_Y_1_MULTIPLICATION_OFFSET, + bit_selector, + ); + + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + X_1_Y_0_MULTIPLICATION_OFFSET, + bit_selector, + ); + + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + Z2_ADDITION_OFFSET + ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + Z2_ADDITION_OFFSET + ADDITION_X_OFFSET + i], + local_values[start_col + X_0_Y_1_MULTIPLICATION_OFFSET + SUM_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + Z2_ADDITION_OFFSET + ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + Z2_ADDITION_OFFSET + ADDITION_Y_OFFSET + i], + local_values[start_col + X_1_Y_0_MULTIPLICATION_OFFSET + SUM_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + add_addition_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + Z2_ADDITION_OFFSET, + bit_selector, + ); + + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + Z2_ADDITION_OFFSET + ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + Z2_ADDITION_OFFSET + ADDITION_SUM_OFFSET + i], + local_values[start_col + Z2_REDUCE_OFFSET + REDUCE_X_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + add_reduce_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + Z2_REDUCE_OFFSET, + start_col + FP2_FP2_SELECTOR_OFFSET, + bit_selector, + ); + + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + Z2_RANGECHECK_OFFSET, + bit_selector, + ); +} + +/// Constraints fp2 fp multiplication. +/// +/// Constraints inputs across this and next row, wherever selector is set to on. Constraints x0\*y, x1\*y multiplication operations. Constraints the reduction of result of the previous multiplications, followed by a range check operations. +pub fn add_fp2_fp_mul_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for i in 0..12 { + for j in 0..2 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FP_X_INPUT_OFFSET + j * 12 + i] + - next_values[start_col + FP2_FP_X_INPUT_OFFSET + j * 12 + i]), + ); + } + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FP_Y_INPUT_OFFSET + i] + - next_values[start_col + FP2_FP_Y_INPUT_OFFSET + i]), + ); + } + // constrain inputs to multiplication + for i in 0..12 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FP_X_INPUT_OFFSET + i] + - local_values[start_col + X0_Y_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FP_X_INPUT_OFFSET + 12 + i] + - local_values[start_col + X1_Y_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FP_Y_INPUT_OFFSET + i] + - local_values[start_col + X0_Y_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FP_Y_INPUT_OFFSET + i] + - local_values[start_col + X1_Y_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i]), + ); + } + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + X0_Y_MULTIPLICATION_OFFSET, + bit_selector, + ); + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + X0_Y_REDUCE_OFFSET + + REDUCTION_ADDITION_OFFSET + + ADDITION_CHECK_OFFSET] + * (local_values[start_col + + X0_Y_REDUCE_OFFSET + + REDUCTION_ADDITION_OFFSET + + ADDITION_SUM_OFFSET + + i] + - local_values[start_col + X0_Y_MULTIPLICATION_OFFSET + SUM_OFFSET + i]), + ); + } + add_reduce_constraints( + local_values, + next_values, + yield_constr, + start_col + X0_Y_REDUCE_OFFSET, + start_col + FP2_FP_MUL_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + X0_Y_RANGECHECK_OFFSET, + bit_selector, + ); + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + X1_Y_MULTIPLICATION_OFFSET, + bit_selector, + ); + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + X1_Y_REDUCE_OFFSET + + REDUCTION_ADDITION_OFFSET + + ADDITION_CHECK_OFFSET] + * (local_values[start_col + + X1_Y_REDUCE_OFFSET + + REDUCTION_ADDITION_OFFSET + + ADDITION_SUM_OFFSET + + i] + - local_values[start_col + X1_Y_MULTIPLICATION_OFFSET + SUM_OFFSET + i]), + ); + } + add_reduce_constraints( + local_values, + next_values, + yield_constr, + start_col + X1_Y_REDUCE_OFFSET, + start_col + FP2_FP_MUL_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + X1_Y_RANGECHECK_OFFSET, + bit_selector, + ); +} + +pub fn add_fp2_fp_mul_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..12 { + for j in 0..2 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_FP_MUL_SELECTOR_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP2_FP_X_INPUT_OFFSET + j * 12 + i], + next_values[start_col + FP2_FP_X_INPUT_OFFSET + j * 12 + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_FP_MUL_SELECTOR_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP2_FP_Y_INPUT_OFFSET + i], + next_values[start_col + FP2_FP_Y_INPUT_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..12 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_FP_MUL_SELECTOR_OFFSET], + ); + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP2_FP_X_INPUT_OFFSET + i], + local_values[start_col + X0_Y_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c1); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + FP2_FP_X_INPUT_OFFSET + 12 + i], + local_values[start_col + X1_Y_MULTIPLICATION_OFFSET + X_INPUT_OFFSET + i], + ); + let c2 = builder.mul_extension(mul_tmp1, sub_tmp2); + yield_constr.constraint_transition(builder, c2); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + FP2_FP_Y_INPUT_OFFSET + i], + local_values[start_col + X0_Y_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i], + ); + let c3 = builder.mul_extension(mul_tmp1, sub_tmp3); + yield_constr.constraint_transition(builder, c3); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + FP2_FP_Y_INPUT_OFFSET + i], + local_values[start_col + X1_Y_MULTIPLICATION_OFFSET + Y_INPUT_OFFSET + i], + ); + let c4 = builder.mul_extension(mul_tmp1, sub_tmp4); + yield_constr.constraint_transition(builder, c4); + } + + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + X0_Y_MULTIPLICATION_OFFSET, + bit_selector, + ); + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + X0_Y_REDUCE_OFFSET + + REDUCTION_ADDITION_OFFSET + + ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + X0_Y_REDUCE_OFFSET + + REDUCTION_ADDITION_OFFSET + + ADDITION_SUM_OFFSET + + i], + local_values[start_col + X0_Y_MULTIPLICATION_OFFSET + SUM_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c); + } + + add_reduce_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + X0_Y_REDUCE_OFFSET, + start_col + FP2_FP_MUL_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + X0_Y_RANGECHECK_OFFSET, + bit_selector, + ); + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + X1_Y_MULTIPLICATION_OFFSET, + bit_selector, + ); + + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + X1_Y_REDUCE_OFFSET + + REDUCTION_ADDITION_OFFSET + + ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + X1_Y_REDUCE_OFFSET + + REDUCTION_ADDITION_OFFSET + + ADDITION_SUM_OFFSET + + i], + local_values[start_col + X1_Y_MULTIPLICATION_OFFSET + SUM_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c); + } + + add_reduce_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + X1_Y_REDUCE_OFFSET, + start_col + FP2_FP_MUL_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + X1_Y_RANGECHECK_OFFSET, + bit_selector, + ); +} + +/// Constraints for [multiply_by_b](super::native::Fp2::multiply_by_B) function. +/// +/// Constraints inputs across this and next row, wherever selector is set to on. Constraints x0\*4, x1\*4 multiplications. Constraints y input of the multiplications to 4. Constraints respective addition and subtraction operations followed by reduction and range check constraints. +pub fn add_multiply_by_b_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLY_B_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_B_X_OFFSET + i] + - next_values[start_col + MULTIPLY_B_X_OFFSET + i]), + ); + } + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_B_X_OFFSET + i] + - local_values[start_col + MULTIPLY_B_X0_B_MUL_OFFSET + X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_B_X_OFFSET + 12 + i] + - local_values[start_col + MULTIPLY_B_X1_B_MUL_OFFSET + X_INPUT_OFFSET + i]), + ); + if i == 0 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_B_X0_B_MUL_OFFSET + Y_INPUT_OFFSET + i] + - FE::from_canonical_u32(4)), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_B_X1_B_MUL_OFFSET + Y_INPUT_OFFSET + i] + - FE::from_canonical_u32(4)), + ); + } else { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_SELECTOR_OFFSET] + * local_values[start_col + MULTIPLY_B_X0_B_MUL_OFFSET + Y_INPUT_OFFSET + i], + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_SELECTOR_OFFSET] + * local_values[start_col + MULTIPLY_B_X1_B_MUL_OFFSET + Y_INPUT_OFFSET + i], + ); + } + } + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_B_X0_B_MUL_OFFSET, + bit_selector, + ); + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_B_X1_B_MUL_OFFSET, + bit_selector, + ); + let modulus = modulus(); + let modulus_sq_u32 = get_u32_vec_from_literal_24(modulus.clone() * modulus); + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_ADD_MODSQ_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + MULTIPLY_B_ADD_MODSQ_OFFSET + ADDITION_X_OFFSET + i] + - local_values[start_col + MULTIPLY_B_X0_B_MUL_OFFSET + SUM_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_ADD_MODSQ_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + MULTIPLY_B_ADD_MODSQ_OFFSET + ADDITION_Y_OFFSET + i] + - FE::from_canonical_u32(modulus_sq_u32[i])), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_SUB_OFFSET + SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + MULTIPLY_B_SUB_OFFSET + SUBTRACTION_X_OFFSET + i] + - local_values + [start_col + MULTIPLY_B_ADD_MODSQ_OFFSET + ADDITION_SUM_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_SUB_OFFSET + SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + MULTIPLY_B_SUB_OFFSET + SUBTRACTION_Y_OFFSET + i] + - local_values[start_col + MULTIPLY_B_X1_B_MUL_OFFSET + SUM_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_ADD_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + MULTIPLY_B_ADD_OFFSET + ADDITION_X_OFFSET + i] + - local_values[start_col + MULTIPLY_B_X0_B_MUL_OFFSET + SUM_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_ADD_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + MULTIPLY_B_ADD_OFFSET + ADDITION_Y_OFFSET + i] + - local_values[start_col + MULTIPLY_B_X1_B_MUL_OFFSET + SUM_OFFSET + i]), + ); + } + add_addition_constraints( + local_values, + yield_constr, + start_col + MULTIPLY_B_ADD_MODSQ_OFFSET, + bit_selector, + ); + add_subtraction_constraints( + local_values, + yield_constr, + start_col + MULTIPLY_B_SUB_OFFSET, + bit_selector, + ); + add_addition_constraints( + local_values, + yield_constr, + start_col + MULTIPLY_B_ADD_OFFSET, + bit_selector, + ); + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_SUB_OFFSET + SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + MULTIPLY_B_Z0_REDUCE_OFFSET + REDUCE_X_OFFSET + i] + - local_values + [start_col + MULTIPLY_B_SUB_OFFSET + SUBTRACTION_DIFF_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_B_ADD_OFFSET + ADDITION_CHECK_OFFSET] + * (local_values[start_col + MULTIPLY_B_Z1_REDUCE_OFFSET + REDUCE_X_OFFSET + i] + - local_values[start_col + MULTIPLY_B_ADD_OFFSET + ADDITION_SUM_OFFSET + i]), + ); + } + add_reduce_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_B_Z0_REDUCE_OFFSET, + start_col + MULTIPLY_B_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + MULTIPLY_B_Z0_RANGECHECK_OFFSET, + bit_selector, + ); + add_reduce_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_B_Z1_REDUCE_OFFSET, + start_col + MULTIPLY_B_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + MULTIPLY_B_Z1_RANGECHECK_OFFSET, + bit_selector, + ); +} + +pub fn add_multiply_by_b_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let constant = builder.constant_extension(F::Extension::from_canonical_u32(4)); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLY_B_SELECTOR_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_X_OFFSET + i], + next_values[start_col + MULTIPLY_B_X_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..12 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLY_B_SELECTOR_OFFSET], + ); + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_X_OFFSET + i], + local_values[start_col + MULTIPLY_B_X0_B_MUL_OFFSET + X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c1); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_X_OFFSET + 12 + i], + local_values[start_col + MULTIPLY_B_X1_B_MUL_OFFSET + X_INPUT_OFFSET + i], + ); + let c2 = builder.mul_extension(mul_tmp1, sub_tmp2); + yield_constr.constraint(builder, c2); + + if i == 0 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLY_B_SELECTOR_OFFSET], + ); + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_X0_B_MUL_OFFSET + Y_INPUT_OFFSET + i], + constant, + ); + let c1 = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c1); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_X1_B_MUL_OFFSET + Y_INPUT_OFFSET + i], + constant, + ); + let c2 = builder.mul_extension(mul_tmp1, sub_tmp2); + yield_constr.constraint(builder, c2); + } else { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLY_B_SELECTOR_OFFSET], + ); + + let c1 = builder.mul_extension( + mul_tmp1, + local_values[start_col + MULTIPLY_B_X0_B_MUL_OFFSET + Y_INPUT_OFFSET + i], + ); + yield_constr.constraint(builder, c1); + + let c2 = builder.mul_extension( + mul_tmp1, + local_values[start_col + MULTIPLY_B_X1_B_MUL_OFFSET + Y_INPUT_OFFSET + i], + ); + yield_constr.constraint(builder, c2); + } + } + + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_B_X0_B_MUL_OFFSET, + bit_selector, + ); + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_B_X1_B_MUL_OFFSET, + bit_selector, + ); + let modulus = modulus(); + let modulus_sq_u32 = get_u32_vec_from_literal_24(modulus.clone() * modulus); + for i in 0..24 { + let lc = builder.constant_extension(F::Extension::from_canonical_u32(modulus_sq_u32[i])); + + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLY_B_ADD_MODSQ_OFFSET + ADDITION_CHECK_OFFSET], + ); + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_ADD_MODSQ_OFFSET + ADDITION_X_OFFSET + i], + local_values[start_col + MULTIPLY_B_X0_B_MUL_OFFSET + SUM_OFFSET + i], + ); + let c1 = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c1); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_ADD_MODSQ_OFFSET + ADDITION_Y_OFFSET + i], + lc, + ); + let c2 = builder.mul_extension(mul_tmp1, sub_tmp2); + yield_constr.constraint(builder, c2); + + let mul_tmp2 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLY_B_SUB_OFFSET + SUBTRACTION_CHECK_OFFSET], + ); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_SUB_OFFSET + SUBTRACTION_X_OFFSET + i], + local_values[start_col + MULTIPLY_B_ADD_MODSQ_OFFSET + ADDITION_SUM_OFFSET + i], + ); + let c3 = builder.mul_extension(mul_tmp2, sub_tmp3); + yield_constr.constraint(builder, c3); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_SUB_OFFSET + SUBTRACTION_Y_OFFSET + i], + local_values[start_col + MULTIPLY_B_X1_B_MUL_OFFSET + SUM_OFFSET + i], + ); + let c4 = builder.mul_extension(mul_tmp2, sub_tmp4); + yield_constr.constraint(builder, c4); + + let mul_tmp3 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLY_B_ADD_OFFSET + ADDITION_CHECK_OFFSET], + ); + + let sub_tmp5 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_ADD_OFFSET + ADDITION_X_OFFSET + i], + local_values[start_col + MULTIPLY_B_X0_B_MUL_OFFSET + SUM_OFFSET + i], + ); + let c5 = builder.mul_extension(mul_tmp3, sub_tmp5); + yield_constr.constraint(builder, c5); + + let sub_tmp6 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_ADD_OFFSET + ADDITION_Y_OFFSET + i], + local_values[start_col + MULTIPLY_B_X1_B_MUL_OFFSET + SUM_OFFSET + i], + ); + let c6 = builder.mul_extension(mul_tmp3, sub_tmp6); + yield_constr.constraint(builder, c6); + } + + add_addition_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_B_ADD_MODSQ_OFFSET, + bit_selector, + ); + add_subtraction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_B_SUB_OFFSET, + bit_selector, + ); + add_addition_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_B_ADD_OFFSET, + bit_selector, + ); + for i in 0..24 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLY_B_SUB_OFFSET + SUBTRACTION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_Z0_REDUCE_OFFSET + REDUCE_X_OFFSET + i], + local_values[start_col + MULTIPLY_B_SUB_OFFSET + SUBTRACTION_DIFF_OFFSET + i], + ); + let c1 = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c1); + + let mul_tmp2 = builder.mul_extension( + bit_selector_val, + local_values[start_col + MULTIPLY_B_ADD_OFFSET + ADDITION_CHECK_OFFSET], + ); + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_B_Z1_REDUCE_OFFSET + REDUCE_X_OFFSET + i], + local_values[start_col + MULTIPLY_B_ADD_OFFSET + ADDITION_SUM_OFFSET + i], + ); + let c2 = builder.mul_extension(mul_tmp2, sub_tmp2); + yield_constr.constraint(builder, c2); + } + + add_reduce_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_B_Z0_REDUCE_OFFSET, + start_col + MULTIPLY_B_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_B_Z0_RANGECHECK_OFFSET, + bit_selector, + ); + add_reduce_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_B_Z1_REDUCE_OFFSET, + start_col + MULTIPLY_B_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_B_Z1_RANGECHECK_OFFSET, + bit_selector, + ) +} + +/// Constraints fp2 subtraction followed by reduction and range check constraints. First, constraints of adding field prime p to x to prevent overflow, because x > y assumption is not valid here. Then constraints the subtraction operation. Then reduce and range check constraints. +pub fn add_subtraction_with_reduction_constranints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + let modulus = get_u32_vec_from_literal(modulus()); + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_Y_OFFSET + i] + - FE::from_canonical_u32(modulus[i])), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_Y_OFFSET + i] + - FE::from_canonical_u32(modulus[i])), + ); + } + add_addition_fp2_constraints(local_values, yield_constr, start_col, bit_selector); + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_X_OFFSET + + i] + - local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_SUM_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_X_OFFSET + + i] + - local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_SUM_OFFSET + i]), + ); + } + add_subtraction_fp2_constraints( + local_values, + yield_constr, + start_col + FP2_ADDITION_TOTAL, + bit_selector, + ); + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_DIFF_OFFSET + + i] + - local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_X_OFFSET + + i]), + ); + } + add_fp_reduce_single_constraints( + local_values, + yield_constr, + start_col + FP2_ADDITION_TOTAL + FP2_SUBTRACTION_TOTAL, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + FP2_ADDITION_TOTAL + FP2_SUBTRACTION_TOTAL + FP_SINGLE_REDUCE_TOTAL, + bit_selector, + ); + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_DIFF_OFFSET + + i] + - local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCE_X_OFFSET + + i]), + ); + } + add_fp_reduce_single_constraints( + local_values, + yield_constr, + start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL * 2 + + RANGE_CHECK_TOTAL, + bit_selector, + ); +} + +pub fn add_subtraction_with_reduction_constraints_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let modulus = get_u32_vec_from_literal(modulus()); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..12 { + let lc = builder.constant_extension(F::Extension::from_canonical_u32(modulus[i])); + + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_Y_OFFSET + i], + lc, + ); + let c1 = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c1); + + let mul_tmp2 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_CHECK_OFFSET], + ); + let sub_tmp2 = builder.sub_extension( + local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_Y_OFFSET + i], + lc, + ); + let c2 = builder.mul_extension(mul_tmp2, sub_tmp2); + yield_constr.constraint(builder, c2); + } + add_addition_fp2_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col, + bit_selector, + ); + + for i in 0..12 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_X_OFFSET + + i], + local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_SUM_OFFSET + i], + ); + let c1 = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c1); + + let mul_tmp2 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_X_OFFSET + + i], + local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_SUM_OFFSET + i], + ); + let c2 = builder.mul_extension(mul_tmp2, sub_tmp2); + yield_constr.constraint(builder, c2); + } + add_subtraction_fp2_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_ADDITION_TOTAL, + bit_selector, + ); + for i in 0..12 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_DIFF_OFFSET + + i], + local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_X_OFFSET + + i], + ); + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c); + } + add_fp_reduce_single_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_ADDITION_TOTAL + FP2_SUBTRACTION_TOTAL, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_ADDITION_TOTAL + FP2_SUBTRACTION_TOTAL + FP_SINGLE_REDUCE_TOTAL, + bit_selector, + ); + for i in 0..12 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_DIFF_OFFSET + + i], + local_values[start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCE_X_OFFSET + + i], + ); + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c); + } + + add_fp_reduce_single_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL * 2 + + RANGE_CHECK_TOTAL, + bit_selector, + ); +} + +/// Constraints fp2 addition followed by reduction and range check constraints. +pub fn add_addition_with_reduction_constranints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + add_addition_fp2_constraints(local_values, yield_constr, start_col, bit_selector); + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_SUM_OFFSET + i] + - local_values[start_col + FP2_ADDITION_TOTAL + FP_SINGLE_REDUCE_X_OFFSET + i]), + ); + } + add_fp_reduce_single_constraints( + local_values, + yield_constr, + start_col + FP2_ADDITION_TOTAL, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + FP2_ADDITION_TOTAL + FP_SINGLE_REDUCE_TOTAL, + bit_selector, + ); + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_SUM_OFFSET + i] + - local_values[start_col + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCE_X_OFFSET + + i]), + ); + } + add_fp_reduce_single_constraints( + local_values, + yield_constr, + start_col + FP2_ADDITION_TOTAL + FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + FP2_ADDITION_TOTAL + FP_SINGLE_REDUCE_TOTAL * 2 + RANGE_CHECK_TOTAL, + bit_selector, + ); +} + +pub fn add_addition_with_reduction_constraints_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + add_addition_fp2_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col, + bit_selector, + ); + for i in 0..12 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP2_ADDITION_0_OFFSET + FP_ADDITION_SUM_OFFSET + i], + local_values[start_col + FP2_ADDITION_TOTAL + FP_SINGLE_REDUCE_X_OFFSET + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c); + } + add_fp_reduce_single_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_ADDITION_TOTAL, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_ADDITION_TOTAL + FP_SINGLE_REDUCE_TOTAL, + bit_selector, + ); + for i in 0..12 { + let mul_tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP2_ADDITION_1_OFFSET + FP_ADDITION_SUM_OFFSET + i], + local_values[start_col + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCE_X_OFFSET + + i], + ); + + let c = builder.mul_extension(mul_tmp1, sub_tmp1); + yield_constr.constraint(builder, c); + } + add_fp_reduce_single_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_ADDITION_TOTAL + FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_ADDITION_TOTAL + FP_SINGLE_REDUCE_TOTAL * 2 + RANGE_CHECK_TOTAL, + bit_selector, + ); +} + +/// Constraints [mul_by_nonresidue](super::native::Fp2::mul_by_nonresidue) function. +/// +/// For the real part, constraints addition with field prime first, and then constraints subtraction, followed by reduction and range check constraints. For imaginary part, constraints addition, followed by reduction and range check constraints. +pub fn add_non_residue_multiplication_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let modulus = get_u32_vec_from_literal(modulus()); + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + FP_ADDITION_X_OFFSET + i] + - local_values[start_col + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + FP_ADDITION_Y_OFFSET + i] + - FE::from_canonical_u32(modulus[i])), + ); + } + add_addition_fp_constraints( + local_values, + yield_constr, + start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET, + bit_selector, + ); + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_X_OFFSET + + i] + - local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + + FP_ADDITION_SUM_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i + 12]), + ); + } + add_subtraction_fp_constraints( + local_values, + yield_constr, + start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + FP_ADDITION_TOTAL, + bit_selector, + ); + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_DIFF_OFFSET + + i] + - local_values[start_col + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCE_X_OFFSET + + i]), + ); + } + add_fp_reduce_single_constraints( + local_values, + yield_constr, + start_col + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + FP2_NON_RESIDUE_MUL_Z0_RANGECHECK_OFFSET, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET + FP_ADDITION_X_OFFSET + i] + - local_values[start_col + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET + FP_ADDITION_CHECK_OFFSET] + * (local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET + FP_ADDITION_Y_OFFSET + i] + - local_values[start_col + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i + 12]), + ); + } + add_addition_fp_constraints( + local_values, + yield_constr, + start_col + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET, + bit_selector, + ); + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET + + FP_ADDITION_TOTAL + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET + + FP_ADDITION_SUM_OFFSET + + i] + - local_values[start_col + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCE_X_OFFSET + + i]), + ); + } + add_fp_reduce_single_constraints( + local_values, + yield_constr, + start_col + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + FP2_NON_RESIDUE_MUL_Z1_RANGECHECK_OFFSET, + bit_selector, + ); +} + +pub fn add_non_residue_multiplication_constraints_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let modulus = get_u32_vec_from_literal(modulus()); + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..12 { + let lc = builder.constant_extension(F::Extension::from_canonical_u32(modulus[i])); + + let mul_tmp = local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + FP_ADDITION_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + FP_ADDITION_X_OFFSET + i], + local_values[start_col + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(mul_tmp, sub_tmp1); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + FP_ADDITION_Y_OFFSET + i], + lc, + ); + let c2 = builder.mul_extension(mul_tmp, sub_tmp2); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_addition_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET, + bit_selector, + ); + for i in 0..12 { + let mul_tmp = local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_X_OFFSET + + i], + local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + FP_ADDITION_SUM_OFFSET + i], + ); + let c1 = builder.mul_extension(mul_tmp, sub_tmp1); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values[start_col + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i + 12], + ); + let c2 = builder.mul_extension(mul_tmp, sub_tmp2); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_subtraction_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + FP_ADDITION_TOTAL, + bit_selector, + ); + for i in 0..12 { + let sub_tmp = builder.sub_extension( + local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_DIFF_OFFSET + + i], + local_values + [start_col + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + FP_SINGLE_REDUCE_X_OFFSET + i], + ); + let c = builder.mul_extension( + local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_SUB_OFFSET + + FP_ADDITION_TOTAL + + FP_SUBTRACTION_CHECK_OFFSET], + sub_tmp, + ); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + add_fp_reduce_single_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_NON_RESIDUE_MUL_Z0_RANGECHECK_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let mul_tmp = local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET + FP_ADDITION_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET + FP_ADDITION_X_OFFSET + i], + local_values[start_col + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(mul_tmp, sub_tmp1); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET + FP_ADDITION_Y_OFFSET + i], + local_values[start_col + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i + 12], + ); + let c2 = builder.mul_extension(mul_tmp, sub_tmp2); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_addition_fp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET, + bit_selector, + ); + for i in 0..12 { + let sub_tmp = builder.sub_extension( + local_values + [start_col + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET + FP_ADDITION_SUM_OFFSET + i], + local_values + [start_col + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + FP_SINGLE_REDUCE_X_OFFSET + i], + ); + let c = builder.mul_extension( + local_values[start_col + + FP2_NON_RESIDUE_MUL_C0_C1_ADD_OFFSET + + FP_ADDITION_TOTAL + + FP_ADDITION_CHECK_OFFSET], + sub_tmp, + ); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + add_fp_reduce_single_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP2_NON_RESIDUE_MUL_Z1_RANGECHECK_OFFSET, + bit_selector, + ); +} + +/// Constraints for [fp4_square](super::native::fp4_square) function. +/// +/// Constraints inputs across this and next row, wherever selector is set to on. Constraints the respective multiplication, addition and subtraction operations. +pub fn add_fp4_sq_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP4_SQ_SELECTOR_OFFSET] + * (local_values[start_col + FP4_SQ_INPUT_X_OFFSET + i] + - next_values[start_col + FP4_SQ_INPUT_X_OFFSET + i]), + ); + yield_constr.constraint_transition( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP4_SQ_SELECTOR_OFFSET] + * (local_values[start_col + FP4_SQ_INPUT_Y_OFFSET + i] + - next_values[start_col + FP4_SQ_INPUT_Y_OFFSET + i]), + ); + } + + for i in 0..24 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP4_SQ_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP4_SQ_T0_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i] + - local_values[start_col + FP4_SQ_INPUT_X_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP4_SQ_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP4_SQ_T0_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i] + - local_values[start_col + FP4_SQ_INPUT_X_OFFSET + i]), + ); + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP4_SQ_T0_CALC_OFFSET, + bit_selector, + ); + + for i in 0..24 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP4_SQ_T1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP4_SQ_T1_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i] + - local_values[start_col + FP4_SQ_INPUT_Y_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP4_SQ_T1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP4_SQ_T1_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i] + - local_values[start_col + FP4_SQ_INPUT_Y_OFFSET + i]), + ); + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP4_SQ_T1_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP4_SQ_T2_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values + [start_col + FP4_SQ_T2_CALC_OFFSET + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i] + - local_values[start_col + + FP4_SQ_T1_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP4_SQ_T2_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_T2_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + 12 + + i] + - local_values[start_col + + FP4_SQ_T1_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + add_non_residue_multiplication_constraints( + local_values, + yield_constr, + start_col + FP4_SQ_T2_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP4_SQ_T2_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + FP4_SQ_T0_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP4_SQ_T2_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + FP4_SQ_T0_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP4_SQ_X_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP4_SQ_INPUT_X_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP4_SQ_INPUT_Y_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP4_SQ_INPUT_X_OFFSET + 12 + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP4_SQ_INPUT_Y_OFFSET + 12 + i]), + ); + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP4_SQ_T3_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i] + - local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i + 12] + - local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i] + - local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i + 12] + - local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP4_SQ_T4_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP4_SQ_T4_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP4_SQ_T0_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP4_SQ_T4_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP4_SQ_T0_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP4_SQ_T5_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP4_SQ_T1_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP4_SQ_T1_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP4_SQ_Y_CALC_OFFSET, + bit_selector, + ); +} + +pub fn add_fp4_sq_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..24 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP4_SQ_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + FP4_SQ_INPUT_X_OFFSET + i], + next_values[start_col + FP4_SQ_INPUT_X_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint_transition(builder, c); + + let c = builder.sub_extension( + local_values[start_col + FP4_SQ_INPUT_Y_OFFSET + i], + next_values[start_col + FP4_SQ_INPUT_Y_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..24 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP4_SQ_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + FP4_SQ_T0_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + local_values[start_col + FP4_SQ_INPUT_X_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + FP4_SQ_T0_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + local_values[start_col + FP4_SQ_INPUT_X_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP4_SQ_T0_CALC_OFFSET, + bit_selector, + ); + + for i in 0..24 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP4_SQ_T1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + FP4_SQ_T1_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + local_values[start_col + FP4_SQ_INPUT_Y_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + FP4_SQ_T1_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + local_values[start_col + FP4_SQ_INPUT_Y_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP4_SQ_T1_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP4_SQ_T2_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + FP4_SQ_T2_CALC_OFFSET + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i], + local_values[start_col + FP4_SQ_T1_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values + [start_col + FP4_SQ_T2_CALC_OFFSET + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + 12 + i], + local_values[start_col + FP4_SQ_T1_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_non_residue_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP4_SQ_T2_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let tmp2 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP4_SQ_T2_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp1, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP4_SQ_T0_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp1, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP4_SQ_T2_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp2, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP4_SQ_T0_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp2, c); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP4_SQ_X_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let tmp2 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP4_SQ_INPUT_X_OFFSET + i], + ); + let c = builder.mul_extension(tmp1, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP4_SQ_INPUT_Y_OFFSET + i], + ); + let c = builder.mul_extension(tmp1, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP4_SQ_INPUT_X_OFFSET + 12 + i], + ); + let c = builder.mul_extension(tmp2, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP4_SQ_INPUT_Y_OFFSET + 12 + i], + ); + let c = builder.mul_extension(tmp2, c); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP4_SQ_T3_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i + 12], + local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + FP4_SQ_T4_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i + 12], + local_values[start_col + + FP4_SQ_T3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP4_SQ_T4_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let tmp2 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let tmp3 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let tmp4 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP4_SQ_T4_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp1, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values[start_col + FP4_SQ_T0_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp2, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP4_SQ_T4_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp3, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values[start_col + FP4_SQ_T0_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp4, c); + yield_constr.constraint(builder, c); + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP4_SQ_T5_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp1 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let tmp2 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let tmp3 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let tmp4 = builder.mul_extension( + bit_selector_val, + local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp1, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values[start_col + FP4_SQ_T1_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp2, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP4_SQ_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp3, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + + FP4_SQ_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values[start_col + FP4_SQ_T1_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c = builder.mul_extension(tmp4, c); + yield_constr.constraint(builder, c); + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP4_SQ_Y_CALC_OFFSET, + bit_selector, + ); +} + +/// Constraints for [forbenius_map](super::native::Fp2::forbenius_map) function. +/// +/// Constraints both input and power across this and next row, wherever selector is set to on. Constraint the divisor and remainder with power for `power == divisor*2 + remainder`. Selects the forbenius constant using mupliplexer logic -> `y = (1-bit)*constant[0] + bit*constant[1]`. Then constraints multiplication, reduction and range check operations. +pub fn add_fp2_forbenius_map_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + for i in 0..24 { + yield_constr.constraint_transition( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP2_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FORBENIUS_MAP_INPUT_OFFSET + i] + - next_values[start_col + FP2_FORBENIUS_MAP_INPUT_OFFSET + i]), + ); + } + yield_constr.constraint_transition( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP2_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FORBENIUS_MAP_POW_OFFSET] + - next_values[start_col + FP2_FORBENIUS_MAP_POW_OFFSET]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP2_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FORBENIUS_MAP_DIV_OFFSET] * FE::TWO + + local_values[start_col + FP2_FORBENIUS_MAP_REM_OFFSET] + - local_values[start_col + FP2_FORBENIUS_MAP_POW_OFFSET]), + ); + let bit = local_values[start_col + FP2_FORBENIUS_MAP_REM_OFFSET]; + let forbenius_coefficients = Fp2::forbenius_coefficients() + .iter() + .map(|fp| fp.0) + .collect::>(); + let y = (0..12) + .map(|i| { + (P::ONES - bit) * FE::from_canonical_u32(forbenius_coefficients[0][i]) + + bit * FE::from_canonical_u32(forbenius_coefficients[1][i]) + }) + .collect::>(); + for i in 0..12 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + X_INPUT_OFFSET + i] + - local_values[start_col + FP2_FORBENIUS_MAP_INPUT_OFFSET + 12 + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + MULTIPLICATION_SELECTOR_OFFSET] + * (local_values[start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + Y_INPUT_OFFSET + i] + - y[i]), + ); + } + add_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET, + bit_selector, + ); + for i in 0..24 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP2_FORBENIUS_MAP_MUL_RES_ROW] + * (local_values[start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + SUM_OFFSET + i] + - local_values[start_col + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCE_X_OFFSET + + i]), + ); + } + add_reduce_constraints( + local_values, + next_values, + yield_constr, + start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + FP_MULTIPLICATION_TOTAL_COLUMNS, + start_col + FP2_FORBENIUS_MAP_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL, + bit_selector, + ); +} + +pub fn add_fp2_forbenius_map_constraints_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_FORBENIUS_MAP_SELECTOR_OFFSET], + ); + + for i in 0..24 { + let c = builder.sub_extension( + local_values[start_col + FP2_FORBENIUS_MAP_INPUT_OFFSET + i], + next_values[start_col + FP2_FORBENIUS_MAP_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint_transition(builder, c); + } + + let c = builder.sub_extension( + local_values[start_col + FP2_FORBENIUS_MAP_POW_OFFSET], + next_values[start_col + FP2_FORBENIUS_MAP_POW_OFFSET], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint_transition(builder, c); + + let two = builder.constant_extension(F::Extension::TWO); + let c = builder.mul_extension(local_values[start_col + FP2_FORBENIUS_MAP_DIV_OFFSET], two); + let c = builder.add_extension(c, local_values[start_col + FP2_FORBENIUS_MAP_REM_OFFSET]); + let c = builder.sub_extension(c, local_values[start_col + FP2_FORBENIUS_MAP_POW_OFFSET]); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let bit = local_values[start_col + FP2_FORBENIUS_MAP_REM_OFFSET]; + let one = builder.constant_extension(F::Extension::ONE); + let forbenius_coefficients = Fp2::forbenius_coefficients() + .iter() + .map(|fp| fp.0) + .collect::>(); + let y = (0..12) + .map(|i| { + let sub = builder.sub_extension(one, bit); + let const1 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients[0][i], + )); + let mul1 = builder.mul_extension(sub, const1); + + let const2 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients[1][i], + )); + let mul2 = builder.mul_extension(bit, const2); + + let c = builder.add_extension(mul1, mul2); + c + }) + .collect::>>(); + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values + [start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + MULTIPLICATION_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + X_INPUT_OFFSET + i], + local_values[start_col + FP2_FORBENIUS_MAP_INPUT_OFFSET + 12 + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + Y_INPUT_OFFSET + i], + y[i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET, + bit_selector, + ); + for i in 0..24 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP2_FORBENIUS_MAP_MUL_RES_ROW], + ); + + let c = builder.sub_extension( + local_values[start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + SUM_OFFSET + i], + local_values[start_col + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCE_X_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_reduce_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + FP_MULTIPLICATION_TOTAL_COLUMNS, + start_col + FP2_FORBENIUS_MAP_SELECTOR_OFFSET, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCTION_TOTAL, + bit_selector, + ); +} diff --git a/casper-finality-proofs/src/verification/fields/starky/fp6.rs b/casper-finality-proofs/src/verification/fields/starky/fp6.rs new file mode 100644 index 000000000..e5d51fe04 --- /dev/null +++ b/casper-finality-proofs/src/verification/fields/starky/fp6.rs @@ -0,0 +1,7408 @@ +//! This module contains functions for filling the stark trace and adding constraints for the corresponding trace for some Fp6 operations (multiplication, addition, subtraction, etc). One Fp6 element is represented as \[u32; 72\] inside the trace. But most of the time, Fp6 elements are broken up into six Fp elements, hence represented as six \[u32; 12\]. +use crate::verification::{ + fields::starky::{fp::*, fp2::*}, + utils::{ + native_bls::{ + get_u32_vec_from_literal, modulus, Fp2, Fp6 + }, + starky_utils::*, + }, +}; +use num_bigint::BigUint; +use plonky2::{ + field::{ + extension::{Extendable, FieldExtension}, + packed::PackedField, + types::Field, + }, + hash::hash_types::RichField, + iop::ext_target::ExtensionTarget, + plonk::circuit_builder::CircuitBuilder, +}; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; + +// FP6 multiplication offsets +/* + These trace offsets are for fp6 multiplication. It needs 12 rows. The Ti's are defined in (super::native::mul_Fp6). +*/ +pub const FP6_MUL_SELECTOR_OFFSET: usize = 0; +pub const FP6_MUL_X_INPUT_OFFSET: usize = FP6_MUL_SELECTOR_OFFSET + 1; +pub const FP6_MUL_Y_INPUT_OFFSET: usize = FP6_MUL_X_INPUT_OFFSET + 24 * 3; +pub const FP6_MUL_T0_CALC_OFFSET: usize = FP6_MUL_Y_INPUT_OFFSET + 24 * 3; +pub const FP6_MUL_T1_CALC_OFFSET: usize = FP6_MUL_T0_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const FP6_MUL_T2_CALC_OFFSET: usize = FP6_MUL_T1_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const FP6_MUL_T3_CALC_OFFSET: usize = FP6_MUL_T2_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const FP6_MUL_T4_CALC_OFFSET: usize = + FP6_MUL_T3_CALC_OFFSET + FP2_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_T5_CALC_OFFSET: usize = + FP6_MUL_T4_CALC_OFFSET + FP2_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_T6_CALC_OFFSET: usize = FP6_MUL_T5_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const FP6_MUL_T7_CALC_OFFSET: usize = FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_T8_CALC_OFFSET: usize = FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_X_CALC_OFFSET: usize = FP6_MUL_T8_CALC_OFFSET + FP2_NON_RESIDUE_MUL_TOTAL; +pub const FP6_MUL_T9_CALC_OFFSET: usize = + FP6_MUL_X_CALC_OFFSET + FP2_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_T10_CALC_OFFSET: usize = + FP6_MUL_T9_CALC_OFFSET + FP2_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_T11_CALC_OFFSET: usize = + FP6_MUL_T10_CALC_OFFSET + FP2_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_T12_CALC_OFFSET: usize = + FP6_MUL_T11_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const FP6_MUL_T13_CALC_OFFSET: usize = FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_T14_CALC_OFFSET: usize = FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_Y_CALC_OFFSET: usize = FP6_MUL_T14_CALC_OFFSET + FP2_NON_RESIDUE_MUL_TOTAL; +pub const FP6_MUL_T15_CALC_OFFSET: usize = + FP6_MUL_Y_CALC_OFFSET + FP2_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_T16_CALC_OFFSET: usize = + FP6_MUL_T15_CALC_OFFSET + FP2_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_T17_CALC_OFFSET: usize = + FP6_MUL_T16_CALC_OFFSET + FP2_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_T18_CALC_OFFSET: usize = + FP6_MUL_T17_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const FP6_MUL_T19_CALC_OFFSET: usize = FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_Z_CALC_OFFSET: usize = FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const FP6_MUL_TOTAL_COLUMNS: usize = + FP6_MUL_Z_CALC_OFFSET + FP2_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; + +// FP6 non residue multiplication +/* + These trace offsets are for fp6 non residue multiplication (super::native::mul_by_nonresidue). It needs 1 row. +*/ +pub const FP6_NON_RESIDUE_MUL_CHECK_OFFSET: usize = 0; +pub const FP6_NON_RESIDUE_MUL_INPUT_OFFSET: usize = FP6_NON_RESIDUE_MUL_CHECK_OFFSET + 1; +pub const FP6_NON_RESIDUE_MUL_C2: usize = FP6_NON_RESIDUE_MUL_INPUT_OFFSET + 24 * 3; +pub const FP6_NON_RESIDUE_MUL_TOTAL: usize = FP6_NON_RESIDUE_MUL_C2 + FP2_NON_RESIDUE_MUL_TOTAL; + +// FP6 add +/* + These trace offsets are for addition for two Fp6 elements. In essence it's three concatenated Fp2 additions. It needs 1 row. +*/ +pub const FP6_ADDITION_0_OFFSET: usize = 0; +pub const FP6_ADDITION_1_OFFSET: usize = FP6_ADDITION_0_OFFSET + FP2_ADDITION_TOTAL; +pub const FP6_ADDITION_2_OFFSET: usize = FP6_ADDITION_1_OFFSET + FP2_ADDITION_TOTAL; +pub const FP6_ADDITION_TOTAL: usize = FP6_ADDITION_2_OFFSET + FP2_ADDITION_TOTAL; + +// FP6 sub +/* + These trace offsets are for subtraction for two Fp6 elements. In essence it's three concatenated Fp2 subtractions. It needs 1 row. +*/ +pub const FP6_SUBTRACTION_0_OFFSET: usize = 0; +pub const FP6_SUBTRACTION_1_OFFSET: usize = FP6_SUBTRACTION_0_OFFSET + FP2_SUBTRACTION_TOTAL; +pub const FP6_SUBTRACTION_2_OFFSET: usize = FP6_SUBTRACTION_1_OFFSET + FP2_SUBTRACTION_TOTAL; +pub const FP6_SUBTRACTION_TOTAL: usize = FP6_SUBTRACTION_2_OFFSET + FP2_SUBTRACTION_TOTAL; + +// MultiplyBy01 +/* + These trace offsets are for multiplyBy01 (super::native::Fp6::multiplyBy01) function. The Ti's are defined in the native function definition. It needs 12 rows. +*/ +pub const MULTIPLY_BY_01_SELECTOR_OFFSET: usize = 0; +pub const MULTIPLY_BY_01_INPUT_OFFSET: usize = MULTIPLY_BY_01_SELECTOR_OFFSET + 1; +pub const MULTIPLY_BY_01_B0_OFFSET: usize = MULTIPLY_BY_01_INPUT_OFFSET + 24 * 3; +pub const MULTIPLY_BY_01_B1_OFFSET: usize = MULTIPLY_BY_01_B0_OFFSET + 24; +pub const MULTIPLY_BY_01_T0_CALC_OFFSET: usize = MULTIPLY_BY_01_B1_OFFSET + 24; +pub const MULTIPLY_BY_01_T1_CALC_OFFSET: usize = + MULTIPLY_BY_01_T0_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const MULTIPLY_BY_01_T2_CALC_OFFSET: usize = + MULTIPLY_BY_01_T1_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const MULTIPLY_BY_01_T3_CALC_OFFSET: usize = + MULTIPLY_BY_01_T2_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const MULTIPLY_BY_01_X_CALC_OFFSET: usize = + MULTIPLY_BY_01_T3_CALC_OFFSET + FP2_NON_RESIDUE_MUL_TOTAL; +pub const MULTIPLY_BY_01_T4_CALC_OFFSET: usize = MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const MULTIPLY_BY_01_T5_CALC_OFFSET: usize = MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const MULTIPLY_BY_01_T6_CALC_OFFSET: usize = MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const MULTIPLY_BY_01_T7_CALC_OFFSET: usize = + MULTIPLY_BY_01_T6_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const MULTIPLY_BY_01_Y_CALC_OFFSET: usize = MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const MULTIPLY_BY_01_T8_CALC_OFFSET: usize = MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; +pub const MULTIPLY_BY_01_Z_CALC_OFFSET: usize = + MULTIPLY_BY_01_T8_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const MULTIPLY_BY_01_TOTAL: usize = MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * 2; + +// MultiplyBy1 +/* + These trace offsets are for multiplyBy1 (super::native::Fp6::multiplyBy1) function. The Ti's are defined in the native function definition. It needs 12 rows. +*/ +pub const MULTIPLY_BY_1_SELECTOR_OFFSET: usize = 0; +pub const MULTIPLY_BY_1_INPUT_OFFSET: usize = MULTIPLY_BY_1_SELECTOR_OFFSET + 1; +pub const MULTIPLY_BY_1_B1_OFFSET: usize = MULTIPLY_BY_1_INPUT_OFFSET + 24 * 3; +pub const MULTIPLY_BY_1_T0_CALC_OFFSET: usize = MULTIPLY_BY_1_B1_OFFSET + 24; +pub const MULTIPLY_BY_1_X_CALC_OFFSET: usize = + MULTIPLY_BY_1_T0_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const MULTIPLY_BY_1_Y_CALC_OFFSET: usize = + MULTIPLY_BY_1_X_CALC_OFFSET + FP2_NON_RESIDUE_MUL_TOTAL; +pub const MULTIPLY_BY_1_Z_CALC_OFFSET: usize = + MULTIPLY_BY_1_Y_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const MULTIPLY_BY_1_TOTAL: usize = + MULTIPLY_BY_1_Z_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; + +// Forbenius map Fp6 +/* + These trace offsets are for forbenius_map (super::native::Fp6::forbenius_map) function. It needs 12 rows. + FP6_FORBENIUS_MAP_DIV_OFFSET -> offset which stores integer division power/6. + FP6_FORBENIUS_MAP_REM_OFFSET -> offset which stores power%6. + FP6_FORBENIUS_MAP_BIT0_OFFSET, FP6_FORBENIUS_MAP_BIT1_OFFSET, FP6_FORBENIUS_MAP_BIT2_OFFSET -> offsets which store the bit decomposition of remainder (power%6). +*/ +pub const FP6_FORBENIUS_MAP_SELECTOR_OFFSET: usize = 0; +pub const FP6_FORBENIUS_MAP_INPUT_OFFSET: usize = FP6_FORBENIUS_MAP_SELECTOR_OFFSET + 1; +pub const FP6_FORBENIUS_MAP_POW_OFFSET: usize = FP6_FORBENIUS_MAP_INPUT_OFFSET + 24 * 3; +pub const FP6_FORBENIUS_MAP_DIV_OFFSET: usize = FP6_FORBENIUS_MAP_POW_OFFSET + 1; +pub const FP6_FORBENIUS_MAP_REM_OFFSET: usize = FP6_FORBENIUS_MAP_DIV_OFFSET + 1; +pub const FP6_FORBENIUS_MAP_BIT0_OFFSET: usize = FP6_FORBENIUS_MAP_REM_OFFSET + 1; +pub const FP6_FORBENIUS_MAP_BIT1_OFFSET: usize = FP6_FORBENIUS_MAP_BIT0_OFFSET + 1; +pub const FP6_FORBENIUS_MAP_BIT2_OFFSET: usize = FP6_FORBENIUS_MAP_BIT1_OFFSET + 1; +pub const FP6_FORBENIUS_MAP_X_CALC_OFFSET: usize = FP6_FORBENIUS_MAP_BIT2_OFFSET + 1; +pub const FP6_FORBENIUS_MAP_T0_CALC_OFFSET: usize = + FP6_FORBENIUS_MAP_X_CALC_OFFSET + FP2_FORBENIUS_MAP_TOTAL_COLUMNS; +pub const FP6_FORBENIUS_MAP_Y_CALC_OFFSET: usize = + FP6_FORBENIUS_MAP_T0_CALC_OFFSET + FP2_FORBENIUS_MAP_TOTAL_COLUMNS; +pub const FP6_FORBENIUS_MAP_T1_CALC_OFFSET: usize = + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; +pub const FP6_FORBENIUS_MAP_Z_CALC_OFFSET: usize = + FP6_FORBENIUS_MAP_T1_CALC_OFFSET + FP2_FORBENIUS_MAP_TOTAL_COLUMNS; +pub const FP6_FORBENIUS_MAP_TOTAL_COLUMNS: usize = + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + TOTAL_COLUMNS_FP2_MULTIPLICATION; + +/// Fills the stark trace of fp6 addition. Inputs are 12*6 limbs each. Needs 1 row. +pub fn fill_trace_addition_fp6, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[[u32; 12]; 6], + y: &[[u32; 12]; 6], + row: usize, + start_col: usize, +) { + fill_trace_addition_fp2( + trace, + &[x[0], x[1]], + &[y[0], y[1]], + row, + start_col + FP6_ADDITION_0_OFFSET, + ); + fill_trace_addition_fp2( + trace, + &[x[2], x[3]], + &[y[2], y[3]], + row, + start_col + FP6_ADDITION_1_OFFSET, + ); + fill_trace_addition_fp2( + trace, + &[x[4], x[5]], + &[y[4], y[5]], + row, + start_col + FP6_ADDITION_2_OFFSET, + ); +} + +/// Fills trace of fp6 addition combined with reduction and range check. Inputs are 12*6 limbs each. Needs 1 row. +pub fn fill_trace_addition_with_reduction_fp6< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &Fp6, + y: &Fp6, + row: usize, + start_col: usize, +) { + fill_trace_addition_fp6( + trace, + &x.get_u32_slice(), + &y.get_u32_slice(), + row, + start_col, + ); + for i in 0..6 { + let sum = get_u32_vec_from_literal( + BigUint::new(x.0[i].0.to_vec()) + BigUint::new(y.0[i].0.to_vec()), + ); + let rem = fill_trace_reduce_single( + trace, + &sum, + row, + start_col + FP6_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * i, + ); + fill_range_check_trace( + trace, + &rem, + row, + start_col + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * i + + FP_SINGLE_REDUCE_TOTAL, + ); + } +} + +/// Fills trace of fp6 subtraction combined with reduction and range check. Inputs are 12*6 limbs each. Needs 1 row. Fills trace of adding field prime p to x first, and then the trace for subtraction with y. +pub fn fill_trace_subtraction_with_reduction_fp6< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &Fp6, + y: &Fp6, + row: usize, + start_col: usize, +) { + let modulus = vec![get_u32_vec_from_literal(modulus()); 6] + .try_into() + .unwrap(); + fill_trace_addition_fp6(trace, &x.get_u32_slice(), &modulus, row, start_col); + let x_modulus = modulus + .iter() + .zip(x.get_u32_slice()) + .map(|(m, f)| get_u32_vec_from_literal(BigUint::new(m.to_vec()) + BigUint::new(f.to_vec()))) + .collect::>() + .try_into() + .unwrap(); + fill_trace_subtraction_fp6( + trace, + &x_modulus, + &y.get_u32_slice(), + row, + start_col + FP6_ADDITION_TOTAL, + ); + for i in 0..6 { + let diff = get_u32_vec_from_literal( + BigUint::new(x_modulus[i].to_vec()) - BigUint::new(y.0[i].0.to_vec()), + ); + let rem = fill_trace_reduce_single( + trace, + &diff, + row, + start_col + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * i, + ); + fill_range_check_trace( + trace, + &rem, + row, + start_col + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * i + + FP_SINGLE_REDUCE_TOTAL, + ); + } +} + +/// Fills the stark trace of fp6 subtraction. Inputs are 12*6 limbs each. Needs 1 row. Assume x > y. +pub fn fill_trace_subtraction_fp6, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &[[u32; 12]; 6], + y: &[[u32; 12]; 6], + row: usize, + start_col: usize, +) { + fill_trace_subtraction_fp2( + trace, + &[x[0], x[1]], + &[y[0], y[1]], + row, + start_col + FP6_SUBTRACTION_0_OFFSET, + ); + fill_trace_subtraction_fp2( + trace, + &[x[2], x[3]], + &[y[2], y[3]], + row, + start_col + FP6_SUBTRACTION_1_OFFSET, + ); + fill_trace_subtraction_fp2( + trace, + &[x[4], x[5]], + &[y[4], y[5]], + row, + start_col + FP6_SUBTRACTION_2_OFFSET, + ); +} + +/// Fills the stark trace of negation. Input is 12*6 limbs. Needs 1 row. In essence, it fills an addition trace with inputs as `x` and `-x`. +pub fn fill_trace_negate_fp6, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp6, + row: usize, + start_col: usize, +) { + fill_trace_addition_fp6( + trace, + &x.get_u32_slice(), + &(-(*x)).get_u32_slice(), + row, + start_col, + ); +} + +/// Fills trace of [mul_by_nonresidue](super::native::mul_by_nonresidue) function. Input is 12*6 limbs. Needs 1 row. +pub fn fill_trace_non_residue_multiplication_fp6< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &Fp6, + row: usize, + start_col: usize, +) { + trace[row][start_col + FP6_NON_RESIDUE_MUL_CHECK_OFFSET] = F::ONE; + for (i, e) in x.0.iter().enumerate() { + assign_u32_in_series( + trace, + row, + start_col + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + i * 12, + &e.0, + ); + } + let c2 = Fp2([x.0[4], x.0[5]]); + fill_trace_non_residue_multiplication( + trace, + &c2.get_u32_slice(), + row, + start_col + FP6_NON_RESIDUE_MUL_C2, + ); +} + +/// Fills stark trace for fp6 multiplication. Inputs are 12*6 limbs each. Needs 12 rows. +pub fn fill_trace_fp6_multiplication< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &Fp6, + y: &Fp6, + start_row: usize, + end_row: usize, + start_col: usize, +) { + for i in 0..6 { + for row in start_row..end_row + 1 { + assign_u32_in_series( + trace, + row, + start_col + FP6_MUL_X_INPUT_OFFSET + 12 * i, + &x.0[i].0, + ); + assign_u32_in_series( + trace, + row, + start_col + FP6_MUL_Y_INPUT_OFFSET + 12 * i, + &y.0[i].0, + ); + trace[row][start_col + FP6_MUL_SELECTOR_OFFSET] = F::ONE; + } + } + trace[end_row][start_col + FP6_MUL_SELECTOR_OFFSET] = F::ZERO; + let (c0, c1, c2) = ( + Fp2([x.0[0], x.0[1]]), + Fp2([x.0[2], x.0[3]]), + Fp2([x.0[4], x.0[5]]), + ); + let (r0, r1, r2) = ( + Fp2([y.0[0], y.0[1]]), + Fp2([y.0[2], y.0[3]]), + Fp2([y.0[4], y.0[5]]), + ); + + let t0 = c0 * r0; + generate_trace_fp2_mul( + trace, + c0.get_u32_slice(), + r0.get_u32_slice(), + start_row, + end_row, + start_col + FP6_MUL_T0_CALC_OFFSET, + ); + let t1 = c1 * r1; + generate_trace_fp2_mul( + trace, + c1.get_u32_slice(), + r1.get_u32_slice(), + start_row, + end_row, + start_col + FP6_MUL_T1_CALC_OFFSET, + ); + let t2 = c2 * r2; + generate_trace_fp2_mul( + trace, + c2.get_u32_slice(), + r2.get_u32_slice(), + start_row, + end_row, + start_col + FP6_MUL_T2_CALC_OFFSET, + ); + + let t3 = c1 + c2; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &c1.get_u32_slice(), + &c2.get_u32_slice(), + row, + start_col + FP6_MUL_T3_CALC_OFFSET, + ); + } + let t4 = r1 + r2; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &r1.get_u32_slice(), + &r2.get_u32_slice(), + row, + start_col + FP6_MUL_T4_CALC_OFFSET, + ); + } + let t5 = t3 * t4; + generate_trace_fp2_mul( + trace, + t3.get_u32_slice(), + t4.get_u32_slice(), + start_row, + end_row, + start_col + FP6_MUL_T5_CALC_OFFSET, + ); + let t6 = t5 - t1; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t5.get_u32_slice(), + &t1.get_u32_slice(), + row, + start_col + FP6_MUL_T6_CALC_OFFSET, + ); + } + let t7 = t6 - t2; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t6.get_u32_slice(), + &t2.get_u32_slice(), + row, + start_col + FP6_MUL_T7_CALC_OFFSET, + ); + } + let t8 = t7.mul_by_nonresidue(); + for row in start_row..end_row + 1 { + fill_trace_non_residue_multiplication( + trace, + &t7.get_u32_slice(), + row, + start_col + FP6_MUL_T8_CALC_OFFSET, + ); + } + let _x = t8 + t0; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t8.get_u32_slice(), + &t0.get_u32_slice(), + row, + start_col + FP6_MUL_X_CALC_OFFSET, + ); + } + + let t9 = c0 + c1; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &c0.get_u32_slice(), + &c1.get_u32_slice(), + row, + start_col + FP6_MUL_T9_CALC_OFFSET, + ); + } + let t10 = r0 + r1; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &r0.get_u32_slice(), + &r1.get_u32_slice(), + row, + start_col + FP6_MUL_T10_CALC_OFFSET, + ); + } + let t11 = t9 * t10; + generate_trace_fp2_mul( + trace, + t9.get_u32_slice(), + t10.get_u32_slice(), + start_row, + end_row, + start_col + FP6_MUL_T11_CALC_OFFSET, + ); + let t12 = t11 - t0; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t11.get_u32_slice(), + &t0.get_u32_slice(), + row, + start_col + FP6_MUL_T12_CALC_OFFSET, + ); + } + let t13 = t12 - t1; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t12.get_u32_slice(), + &t1.get_u32_slice(), + row, + start_col + FP6_MUL_T13_CALC_OFFSET, + ); + } + let t14 = t2.mul_by_nonresidue(); + for row in start_row..end_row + 1 { + fill_trace_non_residue_multiplication( + trace, + &t2.get_u32_slice(), + row, + start_col + FP6_MUL_T14_CALC_OFFSET, + ); + } + let _y = t13 + t14; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t13.get_u32_slice(), + &t14.get_u32_slice(), + row, + start_col + FP6_MUL_Y_CALC_OFFSET, + ); + } + + let t15 = c0 + c2; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &c0.get_u32_slice(), + &c2.get_u32_slice(), + row, + start_col + FP6_MUL_T15_CALC_OFFSET, + ); + } + let t16 = r0 + r2; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &r0.get_u32_slice(), + &r2.get_u32_slice(), + row, + start_col + FP6_MUL_T16_CALC_OFFSET, + ); + } + let t17 = t15 * t16; + generate_trace_fp2_mul( + trace, + t15.get_u32_slice(), + t16.get_u32_slice(), + start_row, + end_row, + start_col + FP6_MUL_T17_CALC_OFFSET, + ); + let t18 = t17 - t0; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t17.get_u32_slice(), + &t0.get_u32_slice(), + row, + start_col + FP6_MUL_T18_CALC_OFFSET, + ); + } + let t19 = t18 - t2; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t18.get_u32_slice(), + &t2.get_u32_slice(), + row, + start_col + FP6_MUL_T19_CALC_OFFSET, + ); + } + let _z = t19 + t1; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t19.get_u32_slice(), + &t1.get_u32_slice(), + row, + start_col + FP6_MUL_Z_CALC_OFFSET, + ); + } +} + +/// Fills trace of [multiplyBy1](super::native::Fp6::multiplyBy1) function. Input is 12\*6 limbs and 12\*2 limbs. Needs 12 rows. +pub fn fill_trace_multiply_by_1, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp6, + b1: &Fp2, + start_row: usize, + end_row: usize, + start_col: usize, +) { + for row in start_row..end_row + 1 { + for i in 0..6 { + assign_u32_in_series( + trace, + row, + start_col + MULTIPLY_BY_1_INPUT_OFFSET + i * 12, + &x.0[i].0, + ); + } + for i in 0..2 { + assign_u32_in_series( + trace, + row, + start_col + MULTIPLY_BY_1_B1_OFFSET + i * 12, + &b1.0[i].0, + ); + } + trace[row][start_col + MULTIPLY_BY_1_SELECTOR_OFFSET] = F::ONE; + } + trace[end_row][start_col + MULTIPLY_BY_1_SELECTOR_OFFSET] = F::ZERO; + + let c0 = Fp2([x.0[0], x.0[1]]); + let c1 = Fp2([x.0[2], x.0[3]]); + let c2 = Fp2([x.0[4], x.0[5]]); + let t0 = c2 * (*b1); + generate_trace_fp2_mul( + trace, + c2.get_u32_slice(), + b1.get_u32_slice(), + start_row, + end_row, + start_col + MULTIPLY_BY_1_T0_CALC_OFFSET, + ); + let _x = t0.mul_by_nonresidue(); + for row in start_row..end_row + 1 { + fill_trace_non_residue_multiplication( + trace, + &t0.get_u32_slice(), + row, + start_col + MULTIPLY_BY_1_X_CALC_OFFSET, + ); + } + let _y = c0 * (*b1); + generate_trace_fp2_mul( + trace, + c0.get_u32_slice(), + b1.get_u32_slice(), + start_row, + end_row, + start_col + MULTIPLY_BY_1_Y_CALC_OFFSET, + ); + let _z = c1 * (*b1); + generate_trace_fp2_mul( + trace, + c1.get_u32_slice(), + b1.get_u32_slice(), + start_row, + end_row, + start_col + MULTIPLY_BY_1_Z_CALC_OFFSET, + ); +} + +/// Fills trace of [multiplyBy01](super::native::Fp6::multiplyBy01) function. Input is 12\*6 limbs and two 12\*2 limbs. Needs 12 rows. +pub fn fill_trace_multiply_by_01, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp6, + b0: &Fp2, + b1: &Fp2, + start_row: usize, + end_row: usize, + start_col: usize, +) { + for row in start_row..end_row + 1 { + for i in 0..6 { + assign_u32_in_series( + trace, + row, + start_col + MULTIPLY_BY_01_INPUT_OFFSET + i * 12, + &x.0[i].0, + ); + } + for i in 0..2 { + assign_u32_in_series( + trace, + row, + start_col + MULTIPLY_BY_01_B0_OFFSET + i * 12, + &b0.0[i].0, + ); + } + for i in 0..2 { + assign_u32_in_series( + trace, + row, + start_col + MULTIPLY_BY_01_B1_OFFSET + i * 12, + &b1.0[i].0, + ); + } + trace[row][start_col + MULTIPLY_BY_01_SELECTOR_OFFSET] = F::ONE; + } + trace[end_row][start_col + MULTIPLY_BY_01_SELECTOR_OFFSET] = F::ZERO; + + let c0 = Fp2([x.0[0], x.0[1]]); + let c1 = Fp2([x.0[2], x.0[3]]); + let c2 = Fp2([x.0[4], x.0[5]]); + + let t0 = c0 * (*b0); + generate_trace_fp2_mul( + trace, + c0.get_u32_slice(), + b0.get_u32_slice(), + start_row, + end_row, + start_col + MULTIPLY_BY_01_T0_CALC_OFFSET, + ); + let t1 = c1 * (*b1); + generate_trace_fp2_mul( + trace, + c1.get_u32_slice(), + b1.get_u32_slice(), + start_row, + end_row, + start_col + MULTIPLY_BY_01_T1_CALC_OFFSET, + ); + + let t2 = c2 * (*b1); + generate_trace_fp2_mul( + trace, + c2.get_u32_slice(), + b1.get_u32_slice(), + start_row, + end_row, + start_col + MULTIPLY_BY_01_T2_CALC_OFFSET, + ); + let t3 = t2.mul_by_nonresidue(); + for row in start_row..end_row + 1 { + fill_trace_non_residue_multiplication( + trace, + &t2.get_u32_slice(), + row, + start_col + MULTIPLY_BY_01_T3_CALC_OFFSET, + ); + } + let _x = t3 + t0; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t3.get_u32_slice(), + &t0.get_u32_slice(), + row, + start_col + MULTIPLY_BY_01_X_CALC_OFFSET, + ); + } + + let t4 = (*b0) + (*b1); + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &b0.get_u32_slice(), + &b1.get_u32_slice(), + row, + start_col + MULTIPLY_BY_01_T4_CALC_OFFSET, + ); + } + let t5 = c0 + c1; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &c0.get_u32_slice(), + &c1.get_u32_slice(), + row, + start_col + MULTIPLY_BY_01_T5_CALC_OFFSET, + ); + } + let t6 = t4 * t5; + generate_trace_fp2_mul( + trace, + t4.get_u32_slice(), + t5.get_u32_slice(), + start_row, + end_row, + start_col + MULTIPLY_BY_01_T6_CALC_OFFSET, + ); + let t7 = t6 - t0; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t6.get_u32_slice(), + &t0.get_u32_slice(), + row, + start_col + MULTIPLY_BY_01_T7_CALC_OFFSET, + ); + } + let _y = t7 - t1; + for row in start_row..end_row + 1 { + fill_trace_subtraction_with_reduction( + trace, + &t7.get_u32_slice(), + &t1.get_u32_slice(), + row, + start_col + MULTIPLY_BY_01_Y_CALC_OFFSET, + ); + } + + let t8 = c2 * (*b0); + generate_trace_fp2_mul( + trace, + c2.get_u32_slice(), + b0.get_u32_slice(), + start_row, + end_row, + start_col + MULTIPLY_BY_01_T8_CALC_OFFSET, + ); + let _z = t8 + t1; + for row in start_row..end_row + 1 { + fill_trace_addition_with_reduction( + trace, + &t8.get_u32_slice(), + &t1.get_u32_slice(), + row, + start_col + MULTIPLY_BY_01_Z_CALC_OFFSET, + ); + } +} + +/// Fills trace of [forbenius_map](super::native::Fp6::forbenius_map) function. Input is 12*6 limbs and usize. Needs 12 rows. +pub fn fill_trace_fp6_forbenius_map< + F: RichField + Extendable, + const D: usize, + const C: usize, +>( + trace: &mut Vec<[F; C]>, + x: &Fp6, + pow: usize, + start_row: usize, + end_row: usize, + start_col: usize, +) { + let div = pow / 6; + let rem = pow % 6; + for row in start_row..end_row + 1 { + assign_u32_in_series( + trace, + row, + start_col + FP6_FORBENIUS_MAP_INPUT_OFFSET, + &x.get_u32_slice().concat(), + ); + trace[row][start_col + FP6_FORBENIUS_MAP_SELECTOR_OFFSET] = F::ONE; + trace[row][start_col + FP6_FORBENIUS_MAP_POW_OFFSET] = F::from_canonical_usize(pow); + trace[row][start_col + FP6_FORBENIUS_MAP_DIV_OFFSET] = F::from_canonical_usize(div); + trace[row][start_col + FP6_FORBENIUS_MAP_REM_OFFSET] = F::from_canonical_usize(rem); + trace[row][start_col + FP6_FORBENIUS_MAP_BIT0_OFFSET] = F::from_canonical_usize(rem & 1); + trace[row][start_col + FP6_FORBENIUS_MAP_BIT1_OFFSET] = + F::from_canonical_usize((rem >> 1) & 1); + trace[row][start_col + FP6_FORBENIUS_MAP_BIT2_OFFSET] = F::from_canonical_usize(rem >> 2); + } + trace[end_row][start_col + FP6_FORBENIUS_MAP_SELECTOR_OFFSET] = F::ZERO; + let c0 = Fp2(x.0[0..2].to_vec().try_into().unwrap()); + let c1 = Fp2(x.0[2..4].to_vec().try_into().unwrap()); + let c2 = Fp2(x.0[4..6].to_vec().try_into().unwrap()); + let forbenius_coefficients_1 = Fp6::forbenius_coefficients_1(); + let forbenius_coefficients_2 = Fp6::forbenius_coefficients_2(); + let _x = c0.forbenius_map(pow); + fill_trace_fp2_forbenius_map( + trace, + &c0, + pow, + start_row, + end_row, + start_col + FP6_FORBENIUS_MAP_X_CALC_OFFSET, + ); + let t0 = c1.forbenius_map(pow); + fill_trace_fp2_forbenius_map( + trace, + &c1, + pow, + start_row, + end_row, + start_col + FP6_FORBENIUS_MAP_T0_CALC_OFFSET, + ); + let _y = t0 * forbenius_coefficients_1[pow % 6]; + generate_trace_fp2_mul( + trace, + t0.get_u32_slice(), + forbenius_coefficients_1[pow % 6].get_u32_slice(), + start_row, + end_row, + start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET, + ); + let t1 = c2.forbenius_map(pow); + fill_trace_fp2_forbenius_map( + trace, + &c2, + pow, + start_row, + end_row, + start_col + FP6_FORBENIUS_MAP_T1_CALC_OFFSET, + ); + let _z = t1 * forbenius_coefficients_2[pow % 6]; + generate_trace_fp2_mul( + trace, + t1.get_u32_slice(), + forbenius_coefficients_2[pow % 6].get_u32_slice(), + start_row, + end_row, + start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET, + ); +} + +/// Constraints fp2 addition. In essence, constraints three Fp2 addititons. +pub fn add_addition_fp6_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + add_addition_fp2_constraints( + local_values, + yield_constr, + start_col + FP6_ADDITION_0_OFFSET, + bit_selector, + ); + add_addition_fp2_constraints( + local_values, + yield_constr, + start_col + FP6_ADDITION_1_OFFSET, + bit_selector, + ); + add_addition_fp2_constraints( + local_values, + yield_constr, + start_col + FP6_ADDITION_2_OFFSET, + bit_selector, + ); +} + +pub fn add_addition_fp6_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + add_addition_fp2_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_ADDITION_0_OFFSET, + bit_selector, + ); + add_addition_fp2_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_ADDITION_1_OFFSET, + bit_selector, + ); + add_addition_fp2_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_ADDITION_2_OFFSET, + bit_selector, + ); +} + +/// Constraints fp6 addition followed by reduction and range check constraints. +pub fn add_addition_with_reduction_constranints_fp6< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + add_addition_fp6_constraints(local_values, yield_constr, start_col, bit_selector); + for j in 0..6 { + let fp2_offset = if j < 2 { + FP6_ADDITION_0_OFFSET + } else if j < 4 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + let fp_offset = if j % 2 == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + fp2_offset + fp_offset + FP_ADDITION_CHECK_OFFSET] + * (local_values + [start_col + fp2_offset + fp_offset + FP_ADDITION_SUM_OFFSET + i] + - local_values[start_col + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCE_X_OFFSET + + i]), + ); + } + add_fp_reduce_single_constraints( + local_values, + yield_constr, + start_col + FP6_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCE_TOTAL, + bit_selector, + ); + } +} + +pub fn add_addition_with_reduction_constraints_fp6_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + add_addition_fp6_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col, + bit_selector, + ); + for j in 0..6 { + let fp2_offset = if j < 2 { + FP6_ADDITION_0_OFFSET + } else if j < 4 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + let fp_offset = if j % 2 == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + for i in 0..12 { + let sub_tmp = builder.sub_extension( + local_values[start_col + fp2_offset + fp_offset + FP_ADDITION_SUM_OFFSET + i], + local_values[start_col + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCE_X_OFFSET + + i], + ); + let c = builder.mul_extension( + local_values[start_col + fp2_offset + fp_offset + FP_ADDITION_CHECK_OFFSET], + sub_tmp, + ); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + add_fp_reduce_single_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_ADDITION_TOTAL + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCE_TOTAL, + bit_selector, + ); + } +} + +/// Constraints fp6 subtraction. In essence, constraints three Fp2 subtractions. +pub fn add_subtraction_fp6_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + add_subtraction_fp2_constraints( + local_values, + yield_constr, + start_col + FP6_SUBTRACTION_0_OFFSET, + bit_selector, + ); + add_subtraction_fp2_constraints( + local_values, + yield_constr, + start_col + FP6_SUBTRACTION_1_OFFSET, + bit_selector, + ); + add_subtraction_fp2_constraints( + local_values, + yield_constr, + start_col + FP6_SUBTRACTION_2_OFFSET, + bit_selector, + ); +} + +pub fn add_subtraction_fp6_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + add_subtraction_fp2_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_SUBTRACTION_0_OFFSET, + bit_selector, + ); + add_subtraction_fp2_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_SUBTRACTION_1_OFFSET, + bit_selector, + ); + add_subtraction_fp2_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_SUBTRACTION_2_OFFSET, + bit_selector, + ); +} + +/// Constraints fp6 negation. First add constraints for fp6 addition. Followed by constraining the result of the addition with bls12-381 field prime p. +pub fn add_negate_fp6_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + add_addition_fp6_constraints(local_values, yield_constr, start_col, bit_selector); + let mod_u32 = get_u32_vec_from_literal(modulus()); + for i in 0..12 { + for j in 0..3 { + let fp2_offset = if j == 0 { + FP6_ADDITION_0_OFFSET + } else if j == 1 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + for k in 0..2 { + let fp_offset = if k == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + fp2_offset + fp_offset + FP_ADDITION_CHECK_OFFSET] + * (local_values + [start_col + fp2_offset + fp_offset + FP_ADDITION_SUM_OFFSET + i] + - FE::from_canonical_u32(mod_u32[i])), + ); + } + } + } +} + +pub fn add_negate_fp6_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + add_addition_fp6_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col, + bit_selector, + ); + let mod_u32 = get_u32_vec_from_literal(modulus()); + for i in 0..12 { + for j in 0..3 { + let fp2_offset = if j == 0 { + FP6_ADDITION_0_OFFSET + } else if j == 1 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + for k in 0..2 { + let lc = builder.constant_extension(F::Extension::from_canonical_u32(mod_u32[i])); + let fp_offset = if k == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + + let sub_tmp = builder.sub_extension( + local_values[start_col + fp2_offset + fp_offset + FP_ADDITION_SUM_OFFSET + i], + lc, + ); + let c = builder.mul_extension( + local_values[start_col + fp2_offset + fp_offset + FP_ADDITION_CHECK_OFFSET], + sub_tmp, + ); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + } + } +} + +/// Constraints fp6 subtraction followed by reduction and range check constraints. First, constraints of adding field prime p to x to prevent overflow, because x > y assumption is not valid here. Then constraints the subtraction operation. Then reduce and range check constraints. +pub fn add_subtraction_with_reduction_constranints_fp6< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, // Starting column of your multiplication trace + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + add_addition_fp6_constraints(local_values, yield_constr, start_col, bit_selector); + add_subtraction_fp6_constraints( + local_values, + yield_constr, + start_col + FP6_ADDITION_TOTAL, + bit_selector, + ); + + let modulus = get_u32_vec_from_literal(modulus()); + for j in 0..6 { + let (fp2_add_offset, fp2_sub_offset) = if j < 2 { + (FP6_ADDITION_0_OFFSET, FP6_SUBTRACTION_0_OFFSET) + } else if j < 4 { + (FP6_ADDITION_1_OFFSET, FP6_SUBTRACTION_1_OFFSET) + } else { + (FP6_ADDITION_2_OFFSET, FP6_SUBTRACTION_2_OFFSET) + }; + let (_fp_add_offset, fp_sub_offset) = if j % 2 == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET) + }; + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + fp2_add_offset + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + fp2_add_offset + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - FE::from_canonical_u32(modulus[i])), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + fp2_add_offset + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + fp2_add_offset + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - FE::from_canonical_u32(modulus[i])), + ); + } + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_X_OFFSET + + i] + - local_values[start_col + + fp2_add_offset + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_SUM_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_X_OFFSET + + i] + - local_values[start_col + + fp2_add_offset + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_SUM_OFFSET + + i]), + ); + } + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + fp_sub_offset + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + fp_sub_offset + + FP_SUBTRACTION_DIFF_OFFSET + + i] + - local_values[start_col + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCE_X_OFFSET + + i]), + ); + } + add_fp_reduce_single_constraints( + local_values, + yield_constr, + start_col + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j, + bit_selector, + ); + add_range_check_constraints( + local_values, + yield_constr, + start_col + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCE_TOTAL, + bit_selector, + ); + } +} + +pub fn add_subtraction_with_reduction_constraints_fp6_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + add_addition_fp6_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col, + bit_selector, + ); + add_subtraction_fp6_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_ADDITION_TOTAL, + bit_selector, + ); + let modulus = get_u32_vec_from_literal(modulus()); + for j in 0..6 { + let (fp2_add_offset, fp2_sub_offset) = if j < 2 { + (FP6_ADDITION_0_OFFSET, FP6_SUBTRACTION_0_OFFSET) + } else if j < 4 { + (FP6_ADDITION_1_OFFSET, FP6_SUBTRACTION_1_OFFSET) + } else { + (FP6_ADDITION_2_OFFSET, FP6_SUBTRACTION_2_OFFSET) + }; + let (_fp_add_offset, fp_sub_offset) = if j % 2 == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET) + }; + for i in 0..12 { + let lc = builder.constant_extension(F::Extension::from_canonical_u32(modulus[i])); + + let sub_tmp1 = builder.sub_extension( + local_values + [start_col + fp2_add_offset + FP2_ADDITION_0_OFFSET + FP_ADDITION_Y_OFFSET + i], + lc, + ); + let c1 = builder.mul_extension( + local_values + [start_col + fp2_add_offset + FP2_ADDITION_0_OFFSET + FP_ADDITION_CHECK_OFFSET], + sub_tmp1, + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values + [start_col + fp2_add_offset + FP2_ADDITION_1_OFFSET + FP_ADDITION_Y_OFFSET + i], + lc, + ); + let c2 = builder.mul_extension( + local_values + [start_col + fp2_add_offset + FP2_ADDITION_1_OFFSET + FP_ADDITION_CHECK_OFFSET], + sub_tmp2, + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_X_OFFSET + + i], + local_values[start_col + + fp2_add_offset + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_SUM_OFFSET + + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_X_OFFSET + + i], + local_values[start_col + + fp2_add_offset + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_SUM_OFFSET + + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + for i in 0..12 { + let sub_tmp = builder.sub_extension( + local_values[start_col + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + fp_sub_offset + + FP_SUBTRACTION_DIFF_OFFSET + + i], + local_values[start_col + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCE_X_OFFSET + + i], + ); + let c = builder.mul_extension( + sub_tmp, + local_values[start_col + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + fp_sub_offset + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + add_fp_reduce_single_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j, + bit_selector, + ); + add_range_check_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCE_TOTAL, + bit_selector, + ); + } +} + +/// Constraints [mul_by_nonresidue](super::native::mul_by_nonresidue) function. +pub fn add_non_residue_multiplication_fp6_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + i + 48] + - local_values[start_col + + FP6_NON_RESIDUE_MUL_C2 + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + i]), + ); + } + add_non_residue_multiplication_constraints( + local_values, + yield_constr, + start_col + FP6_NON_RESIDUE_MUL_C2, + bit_selector, + ); +} + +pub fn add_non_residue_multiplication_fp6_constraints_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..24 { + let sub_tmp = builder.sub_extension( + local_values[start_col + FP6_NON_RESIDUE_MUL_INPUT_OFFSET + i + 48], + local_values[start_col + FP6_NON_RESIDUE_MUL_C2 + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i], + ); + let c = builder.mul_extension( + sub_tmp, + local_values[start_col + FP6_NON_RESIDUE_MUL_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + add_non_residue_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_NON_RESIDUE_MUL_C2, + bit_selector, + ); +} + +/// Constraints fp6 multiplication. +/// +/// Constraints inputs across this and next row, wherever selector is set to on. Constraints all the Ti's (defined in the [function](super::native::mul_Fp6)) accordinng to their respective operations. +pub fn add_fp6_multiplication_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for i in 0..24 * 3 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + FP6_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i] + - next_values[start_col + FP6_MUL_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + FP6_MUL_SELECTOR_OFFSET] + * (local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i] + - next_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i]), + ); + } + + // T0 + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i] + - local_values + [start_col + FP6_MUL_T0_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i] + - local_values + [start_col + FP6_MUL_T0_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i]), + ); + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP6_MUL_T0_CALC_OFFSET, + bit_selector, + ); + + // T1 + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 24] + - local_values + [start_col + FP6_MUL_T1_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 24] + - local_values + [start_col + FP6_MUL_T1_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i]), + ); + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP6_MUL_T1_CALC_OFFSET, + bit_selector, + ); + + // T2 + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T2_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 48] + - local_values + [start_col + FP6_MUL_T2_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T2_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 48] + - local_values + [start_col + FP6_MUL_T2_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i]), + ); + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP6_MUL_T2_CALC_OFFSET, + bit_selector, + ); + + // T3 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 24]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 24 + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 48]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 48 + 12]), + ); + } + + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_T3_CALC_OFFSET, + bit_selector, + ); + + // T4 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 24]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 24 + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 48]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 48 + 12]), + ); + } + + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_T4_CALC_OFFSET, + bit_selector, + ); + + // T5 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i + 12]), + ); + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP6_MUL_T5_CALC_OFFSET, + bit_selector, + ); + + // T6 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T5_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T5_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T1_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T1_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_T6_CALC_OFFSET, + bit_selector, + ); + + // T7 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T2_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T2_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_T7_CALC_OFFSET, + bit_selector, + ); + + // T8 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + FP6_MUL_T8_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values + [start_col + FP6_MUL_T8_CALC_OFFSET + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i] + - local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + FP6_MUL_T8_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T8_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + 12] + - local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + add_non_residue_multiplication_constraints( + local_values, + yield_constr, + start_col + FP6_MUL_T8_CALC_OFFSET, + bit_selector, + ); + + // X calc offset + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T8_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T8_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T0_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T0_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_X_CALC_OFFSET, + bit_selector, + ); + + // T9 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 24]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 24 + 12]), + ); + } + + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_T9_CALC_OFFSET, + bit_selector, + ); + + // T10 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 24]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 24 + 12]), + ); + } + + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_T10_CALC_OFFSET, + bit_selector, + ); + + // T11 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i + 12]), + ); + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP6_MUL_T11_CALC_OFFSET, + bit_selector, + ); + + // T12 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T11_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T11_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T0_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T0_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_T12_CALC_OFFSET, + bit_selector, + ); + + // T13 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T1_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T1_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_T13_CALC_OFFSET, + bit_selector, + ); + + // T14 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + FP6_MUL_T14_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values + [start_col + FP6_MUL_T14_CALC_OFFSET + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i] + - local_values[start_col + + FP6_MUL_T2_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + FP6_MUL_T14_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T14_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + 12] + - local_values[start_col + + FP6_MUL_T2_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + add_non_residue_multiplication_constraints( + local_values, + yield_constr, + start_col + FP6_MUL_T14_CALC_OFFSET, + bit_selector, + ); + + // Y calc offset + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T14_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T14_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_Y_CALC_OFFSET, + bit_selector, + ); + + // T15 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 48]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 48 + 12]), + ); + } + + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_T15_CALC_OFFSET, + bit_selector, + ); + + // T16 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 48]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 48 + 12]), + ); + } + + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_T16_CALC_OFFSET, + bit_selector, + ); + + // T17 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i + 12]), + ); + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP6_MUL_T17_CALC_OFFSET, + bit_selector, + ); + + // T18 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T17_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T17_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T0_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T0_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_T18_CALC_OFFSET, + bit_selector, + ); + + // T19 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T2_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T2_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_T19_CALC_OFFSET, + bit_selector, + ); + + // Z calc offset + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T1_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + FP6_MUL_T1_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + FP6_MUL_Z_CALC_OFFSET, + bit_selector, + ); +} + +pub fn add_fp6_multiplication_constraints_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..24 * 3 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i], + next_values[start_col + FP6_MUL_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(sub_tmp1, local_values[start_col + FP6_MUL_SELECTOR_OFFSET]); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint_transition(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i], + next_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, local_values[start_col + FP6_MUL_SELECTOR_OFFSET]); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint_transition(builder, c); + } + + // T0 + for i in 0..24 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i], + local_values[start_col + FP6_MUL_T0_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + FP6_MUL_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i], + local_values[start_col + FP6_MUL_T0_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + FP6_MUL_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP6_MUL_T0_CALC_OFFSET, + bit_selector, + ); + + // T1 + for i in 0..24 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 24], + local_values[start_col + FP6_MUL_T1_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + FP6_MUL_T1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 24], + local_values[start_col + FP6_MUL_T1_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + FP6_MUL_T1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP6_MUL_T1_CALC_OFFSET, + bit_selector, + ); + + // T2 + for i in 0..24 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 48], + local_values[start_col + FP6_MUL_T2_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + FP6_MUL_T2_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 48], + local_values[start_col + FP6_MUL_T2_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + FP6_MUL_T2_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP6_MUL_T2_CALC_OFFSET, + bit_selector, + ); + + // T3 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 24], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 24 + 12], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 48], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 48 + 12], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T3_CALC_OFFSET, + bit_selector, + ); + + // T4 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 24], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 24 + 12], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 48], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 48 + 12], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T4_CALC_OFFSET, + bit_selector, + ); + + // T5 + for i in 0..12 { + let mul_tmp = local_values[start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T3_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i + 12], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + ); + let c3 = builder.mul_extension(sub_tmp3, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + FP6_MUL_T5_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i + 12], + ); + let c4 = builder.mul_extension(sub_tmp4, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP6_MUL_T5_CALC_OFFSET, + bit_selector, + ); + + // T6 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + FP6_MUL_T5_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + FP6_MUL_T5_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T1_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T1_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T6_CALC_OFFSET, + bit_selector, + ); + + // T7 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP6_MUL_T6_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T2_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T2_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T7_CALC_OFFSET, + bit_selector, + ); + + // T8 + for i in 0..12 { + let mul_tmp = + local_values[start_col + FP6_MUL_T8_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + FP6_MUL_T8_CALC_OFFSET + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i], + local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values + [start_col + FP6_MUL_T8_CALC_OFFSET + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i + 12], + local_values[start_col + + FP6_MUL_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_non_residue_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T8_CALC_OFFSET, + bit_selector, + ); + + // X calc offset + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP6_MUL_T8_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP6_MUL_T8_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T0_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T0_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_X_CALC_OFFSET, + bit_selector, + ); + + // T9 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 12], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 24], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 24 + 12], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T9_CALC_OFFSET, + bit_selector, + ); + + // T10 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 12], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 24], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 24 + 12], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T10_CALC_OFFSET, + bit_selector, + ); + + // T11 + for i in 0..12 { + let mul_tmp = local_values[start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T9_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i + 12], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + ); + let c3 = builder.mul_extension(sub_tmp3, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T10_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + FP6_MUL_T11_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i + 12], + ); + let c4 = builder.mul_extension(sub_tmp4, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP6_MUL_T11_CALC_OFFSET, + bit_selector, + ); + + // T12 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + FP6_MUL_T11_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + FP6_MUL_T11_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T0_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T0_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T12_CALC_OFFSET, + bit_selector, + ); + + // T13 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP6_MUL_T12_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T1_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T1_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T13_CALC_OFFSET, + bit_selector, + ); + + // T14 + for i in 0..12 { + let mul_tmp = + local_values[start_col + FP6_MUL_T14_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values + [start_col + FP6_MUL_T14_CALC_OFFSET + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i], + local_values + [start_col + FP6_MUL_T2_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values + [start_col + FP6_MUL_T14_CALC_OFFSET + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i + 12], + local_values + [start_col + FP6_MUL_T2_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_non_residue_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T14_CALC_OFFSET, + bit_selector, + ); + + // Y calc offset + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP6_MUL_T13_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + + FP6_MUL_T14_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + + FP6_MUL_T14_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_Y_CALC_OFFSET, + bit_selector, + ); + + // T15 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 12], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 48], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP6_MUL_X_INPUT_OFFSET + i + 48 + 12], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T15_CALC_OFFSET, + bit_selector, + ); + + // T16 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 12], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 48], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + FP6_MUL_Y_INPUT_OFFSET + i + 48 + 12], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T16_CALC_OFFSET, + bit_selector, + ); + + // T17 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T15_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i + 12], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T16_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i + 12], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + FP6_MUL_T17_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP6_MUL_T17_CALC_OFFSET, + bit_selector, + ); + + // T18 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + FP6_MUL_T17_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + FP6_MUL_T17_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T0_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T0_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T18_CALC_OFFSET, + bit_selector, + ); + + // T19 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP6_MUL_T18_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T2_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T2_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_T19_CALC_OFFSET, + bit_selector, + ); + + // Z calc offset + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + FP6_MUL_T19_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T1_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values + [start_col + FP6_MUL_T1_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + FP6_MUL_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + FP6_MUL_Z_CALC_OFFSET, + bit_selector, + ); +} + +/// Constraints [multiplyBy1](super::native::Fp6::multiplyBy1) function. +/// +/// Constraints inputs across this and next row, wherever selector is set to on. Constraints all the Ti's (defined in the native function) accordinng to their respective operations. +pub fn add_multiply_by_1_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for i in 0..24 { + for j in 0..3 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_1_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_BY_1_INPUT_OFFSET + j * 24 + i] + - next_values[start_col + MULTIPLY_BY_1_INPUT_OFFSET + j * 24 + i]), + ); + } + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_1_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_BY_1_B1_OFFSET + i] + - next_values[start_col + MULTIPLY_BY_1_B1_OFFSET + i]), + ); + } + + // T0 + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_1_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_1_T0_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_1_INPUT_OFFSET + i + 48]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_1_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_1_T0_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_1_B1_OFFSET + i]), + ) + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_BY_1_T0_CALC_OFFSET, + bit_selector, + ); + + // X + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + MULTIPLY_BY_1_X_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_1_X_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_1_T0_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + MULTIPLY_BY_1_X_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_1_X_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + 12] + - local_values[start_col + + MULTIPLY_BY_1_T0_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + add_non_residue_multiplication_constraints( + local_values, + yield_constr, + start_col + MULTIPLY_BY_1_X_CALC_OFFSET, + bit_selector, + ); + + // Y + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_1_Y_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_1_Y_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_1_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_1_Y_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_1_Y_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_1_B1_OFFSET + i]), + ) + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_BY_1_Y_CALC_OFFSET, + bit_selector, + ); + + // Z + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_1_Z_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_1_Z_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_1_INPUT_OFFSET + i + 24]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_1_Z_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_1_Z_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_1_B1_OFFSET + i]), + ) + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_BY_1_Z_CALC_OFFSET, + bit_selector, + ); +} + +pub fn add_multiply_by_1_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..24 { + let mul_tmp = local_values[start_col + MULTIPLY_BY_1_SELECTOR_OFFSET]; + for j in 0..3 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_1_INPUT_OFFSET + j * 24 + i], + next_values[start_col + MULTIPLY_BY_1_INPUT_OFFSET + j * 24 + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint_transition(builder, c); + } + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_1_B1_OFFSET + i], + next_values[start_col + MULTIPLY_BY_1_B1_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint_transition(builder, c); + } + + // T0 + for i in 0..24 { + let mul_tmp = + local_values[start_col + MULTIPLY_BY_1_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_1_T0_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_1_INPUT_OFFSET + i + 48], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_1_T0_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_1_B1_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_BY_1_T0_CALC_OFFSET, + bit_selector, + ); + + // X + for i in 0..12 { + let mul_tmp = local_values + [start_col + MULTIPLY_BY_1_X_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values + [start_col + MULTIPLY_BY_1_X_CALC_OFFSET + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i], + local_values + [start_col + MULTIPLY_BY_1_T0_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_1_X_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + 12], + local_values + [start_col + MULTIPLY_BY_1_T0_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_non_residue_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_1_X_CALC_OFFSET, + bit_selector, + ); + + // Y + for i in 0..24 { + let mul_tmp = + local_values[start_col + MULTIPLY_BY_1_Y_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_1_Y_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_1_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_1_Y_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_1_B1_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_BY_1_Y_CALC_OFFSET, + bit_selector, + ); + + // Z + for i in 0..24 { + let mul_tmp = + local_values[start_col + MULTIPLY_BY_1_Z_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_1_Z_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_1_INPUT_OFFSET + i + 24], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_1_Z_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_1_B1_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_BY_1_Z_CALC_OFFSET, + bit_selector, + ); +} + +/// Constraints [multiplyBy01](super::native::Fp6::multiplyBy01) function. +/// +/// Constraints inputs across this and next row, wherever selector is set to on. Constraints all the Ti's (defined in the native function) accordinng to their respective operations. +pub fn add_multiply_by_01_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for i in 0..24 { + for j in 0..3 { + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + j * 24 + i] + - next_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + j * 24 + i]), + ); + } + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_BY_01_B0_OFFSET + i] + - next_values[start_col + MULTIPLY_BY_01_B0_OFFSET + i]), + ); + yield_constr.constraint_transition( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_SELECTOR_OFFSET] + * (local_values[start_col + MULTIPLY_BY_01_B1_OFFSET + i] + - next_values[start_col + MULTIPLY_BY_01_B1_OFFSET + i]), + ); + } + + // T0 + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_01_T0_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_01_T0_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_01_B0_OFFSET + i]), + ) + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_BY_01_T0_CALC_OFFSET, + bit_selector, + ); + + // T1 + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_T1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_01_T1_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i + 24]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_T1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_01_T1_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_01_B1_OFFSET + i]), + ) + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_BY_01_T1_CALC_OFFSET, + bit_selector, + ); + + // T2 + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_T2_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_01_T2_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i + 48]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_T2_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_01_T2_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_01_B1_OFFSET + i]), + ) + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_BY_01_T2_CALC_OFFSET, + bit_selector, + ); + + // T3 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + MULTIPLY_BY_01_T3_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T3_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T2_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + MULTIPLY_BY_01_T3_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T3_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + 12] + - local_values[start_col + + MULTIPLY_BY_01_T2_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + add_non_residue_multiplication_constraints( + local_values, + yield_constr, + start_col + MULTIPLY_BY_01_T3_CALC_OFFSET, + bit_selector, + ); + + // X + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T3_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T3_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T0_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T0_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + MULTIPLY_BY_01_X_CALC_OFFSET, + bit_selector, + ); + + // T4 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + MULTIPLY_BY_01_B0_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + MULTIPLY_BY_01_B0_OFFSET + i + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + MULTIPLY_BY_01_B1_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + MULTIPLY_BY_01_B1_OFFSET + i + 12]), + ); + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + MULTIPLY_BY_01_T4_CALC_OFFSET, + bit_selector, + ); + + // T5 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i + 24]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i + 36]), + ); + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + MULTIPLY_BY_01_T5_CALC_OFFSET, + bit_selector, + ); + + // T6 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T6_CALC_OFFSET + + FP2_FP2_X_INPUT_OFFSET + + i + + 12]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T6_CALC_OFFSET + + FP2_FP2_Y_INPUT_OFFSET + + i + + 12]), + ); + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_BY_01_T6_CALC_OFFSET, + bit_selector, + ); + + // T7 + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T6_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T6_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T0_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T0_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + MULTIPLY_BY_01_T7_CALC_OFFSET, + bit_selector, + ); + + // Y + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T1_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T1_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + add_subtraction_with_reduction_constranints( + local_values, + yield_constr, + start_col + MULTIPLY_BY_01_Y_CALC_OFFSET, + bit_selector, + ); + + // T8 + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_T8_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_01_T8_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i + 48]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + MULTIPLY_BY_01_T8_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + MULTIPLY_BY_01_T8_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i] + - local_values[start_col + MULTIPLY_BY_01_B0_OFFSET + i]), + ) + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + MULTIPLY_BY_01_T8_CALC_OFFSET, + bit_selector, + ); + + // Z + for i in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T8_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T8_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T1_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i] + - local_values[start_col + + MULTIPLY_BY_01_T1_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + + i]), + ); + } + add_addition_with_reduction_constranints( + local_values, + yield_constr, + start_col + MULTIPLY_BY_01_Z_CALC_OFFSET, + bit_selector, + ); +} + +pub fn add_multiply_by_01_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..24 { + for j in 0..3 { + let sub_tmp = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + j * 24 + i], + next_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + j * 24 + i], + ); + let c = builder.mul_extension( + sub_tmp, + local_values[start_col + MULTIPLY_BY_01_SELECTOR_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint_transition(builder, c); + } + + let mul_tmp = local_values[start_col + MULTIPLY_BY_01_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_01_B0_OFFSET + i], + next_values[start_col + MULTIPLY_BY_01_B0_OFFSET + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint_transition(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_01_B1_OFFSET + i], + next_values[start_col + MULTIPLY_BY_01_B1_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint_transition(builder, c); + } + + // T0 + for i in 0..24 { + let mul_tmp = + local_values[start_col + MULTIPLY_BY_01_T0_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_01_T0_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_01_T0_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_01_B0_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_BY_01_T0_CALC_OFFSET, + bit_selector, + ); + + // T1 + for i in 0..24 { + let mul_tmp = + local_values[start_col + MULTIPLY_BY_01_T1_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_01_T1_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i + 24], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_01_T1_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_01_B1_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_BY_01_T1_CALC_OFFSET, + bit_selector, + ); + + // T2 + for i in 0..24 { + let mul_tmp = + local_values[start_col + MULTIPLY_BY_01_T2_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_01_T2_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i + 48], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_01_T2_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_01_B1_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_BY_01_T2_CALC_OFFSET, + bit_selector, + ); + + // T3 + for i in 0..12 { + let mul_tmp = local_values + [start_col + MULTIPLY_BY_01_T3_CALC_OFFSET + FP2_NON_RESIDUE_MUL_CHECK_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values + [start_col + MULTIPLY_BY_01_T3_CALC_OFFSET + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + i], + local_values + [start_col + MULTIPLY_BY_01_T2_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T3_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_INPUT_OFFSET + + i + + 12], + local_values + [start_col + MULTIPLY_BY_01_T2_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_non_residue_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_01_T3_CALC_OFFSET, + bit_selector, + ); + + // X + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + MULTIPLY_BY_01_T3_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z0_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + MULTIPLY_BY_01_T3_CALC_OFFSET + + FP2_NON_RESIDUE_MUL_Z1_REDUCE_OFFSET + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T0_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T0_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + MULTIPLY_BY_01_X_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_01_X_CALC_OFFSET, + bit_selector, + ); + + // T4 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_01_B0_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_01_B0_OFFSET + i + 12], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_01_B1_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_01_B1_OFFSET + i + 12], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_01_T4_CALC_OFFSET, + bit_selector, + ); + + // T5 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i + 12], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i + 24], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i + 36], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_01_T5_CALC_OFFSET, + bit_selector, + ); + + // T6 + for i in 0..12 { + let mul_tmp = + local_values[start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T4_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i + 12], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + ); + let c3 = builder.mul_extension(sub_tmp3, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T5_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i + 12], + ); + let c4 = builder.mul_extension(sub_tmp4, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_BY_01_T6_CALC_OFFSET, + bit_selector, + ); + + // T7 + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T6_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T0_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T0_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_01_T7_CALC_OFFSET, + bit_selector, + ); + + // Y + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + + MULTIPLY_BY_01_T7_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_TOTAL + + FP_SINGLE_REDUCE_TOTAL + + RANGE_CHECK_TOTAL + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T1_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_0_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_Y_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T1_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + MULTIPLY_BY_01_Y_CALC_OFFSET + + FP2_ADDITION_TOTAL + + FP2_SUBTRACTION_1_OFFSET + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_subtraction_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_01_Y_CALC_OFFSET, + bit_selector, + ); + + // T8 + for i in 0..24 { + let mul_tmp = + local_values[start_col + MULTIPLY_BY_01_T8_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_01_T8_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_01_INPUT_OFFSET + i + 48], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + MULTIPLY_BY_01_T8_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + local_values[start_col + MULTIPLY_BY_01_B0_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + MULTIPLY_BY_01_T8_CALC_OFFSET, + bit_selector, + ); + + // Z + for i in 0..12 { + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T8_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c1 = builder.mul_extension( + sub_tmp1, + local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_X_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T8_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c2 = builder.mul_extension( + sub_tmp2, + local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T1_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension( + sub_tmp3, + local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_0_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_Y_OFFSET + + i], + local_values + [start_col + MULTIPLY_BY_01_T1_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + i], + ); + let c4 = builder.mul_extension( + sub_tmp4, + local_values[start_col + + MULTIPLY_BY_01_Z_CALC_OFFSET + + FP2_ADDITION_1_OFFSET + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + add_addition_with_reduction_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + MULTIPLY_BY_01_Z_CALC_OFFSET, + bit_selector, + ); +} + +/// Constraints for [forbenius_map](super::native::Fp6::forbenius_map) function. +/// +/// Constraints both input and power across this and next row, wherever selector is set to on. Constraint the divisor and remainder with power for `power == divisor*6 + remainder`. Constraints the bit decomposition as `remainder == bit0 + bit1*2 + bit2*4`. Selects the forbenius constant using mupliplexer logic. Then constraints fp2 forbenius map, multiplication, reduction and range check operations. +pub fn add_fp6_forbenius_map_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + for i in 0..24 * 3 { + yield_constr.constraint_transition( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP6_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + FP6_FORBENIUS_MAP_INPUT_OFFSET + i] + - next_values[start_col + FP6_FORBENIUS_MAP_INPUT_OFFSET + i]), + ); + } + yield_constr.constraint_transition( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP6_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + FP6_FORBENIUS_MAP_POW_OFFSET] + - next_values[start_col + FP6_FORBENIUS_MAP_POW_OFFSET]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP6_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + FP6_FORBENIUS_MAP_DIV_OFFSET] + * FE::from_canonical_usize(6) + + local_values[start_col + FP6_FORBENIUS_MAP_REM_OFFSET] + - local_values[start_col + FP6_FORBENIUS_MAP_POW_OFFSET]), + ); + let bit0 = local_values[start_col + FP6_FORBENIUS_MAP_BIT0_OFFSET]; + let bit1 = local_values[start_col + FP6_FORBENIUS_MAP_BIT1_OFFSET]; + let bit2 = local_values[start_col + FP6_FORBENIUS_MAP_BIT2_OFFSET]; + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + FP6_FORBENIUS_MAP_SELECTOR_OFFSET] + * (bit0 + bit1 * FE::TWO + bit2 * FE::from_canonical_usize(4) + - local_values[start_col + FP6_FORBENIUS_MAP_REM_OFFSET]), + ); + let forbenius_coefficients_1 = Fp6::forbenius_coefficients_1() + .iter() + .map(|fp2| fp2.get_u32_slice().concat().try_into().unwrap()) + .collect::>(); + let forbenius_coefficients_2 = Fp6::forbenius_coefficients_2() + .iter() + .map(|fp2| fp2.get_u32_slice().concat().try_into().unwrap()) + .collect::>(); + let y1 = (0..24) + .map(|i| { + (P::ONES - bit0) + * (P::ONES - bit1) + * FE::from_canonical_u32(forbenius_coefficients_1[0][i]) + + (bit0) * (P::ONES - bit1) * FE::from_canonical_u32(forbenius_coefficients_1[1][i]) + + (P::ONES - bit0) * (bit1) * FE::from_canonical_u32(forbenius_coefficients_1[2][i]) + + (bit0) * (bit1) * FE::from_canonical_u32(forbenius_coefficients_1[3][i]) + }) + .collect::>(); + let y2 = (0..24) + .map(|i| { + (P::ONES - bit0) + * (P::ONES - bit1) + * FE::from_canonical_u32(forbenius_coefficients_2[0][i]) + + (bit0) * (P::ONES - bit1) * FE::from_canonical_u32(forbenius_coefficients_2[1][i]) + + (P::ONES - bit0) * (bit1) * FE::from_canonical_u32(forbenius_coefficients_2[2][i]) + + (bit0) * (bit1) * FE::from_canonical_u32(forbenius_coefficients_2[3][i]) + }) + .collect::>(); + + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP6_FORBENIUS_MAP_X_CALC_OFFSET + FP2_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values + [start_col + FP6_FORBENIUS_MAP_X_CALC_OFFSET + FP2_FORBENIUS_MAP_POW_OFFSET] + - local_values[start_col + FP6_FORBENIUS_MAP_POW_OFFSET]), + ); + for i in 0..24 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP6_FORBENIUS_MAP_X_CALC_OFFSET + + FP2_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_FORBENIUS_MAP_X_CALC_OFFSET + + FP2_FORBENIUS_MAP_INPUT_OFFSET + + i] + - local_values[start_col + FP6_FORBENIUS_MAP_INPUT_OFFSET + i]), + ); + } + add_fp2_forbenius_map_constraints( + local_values, + next_values, + yield_constr, + start_col + FP6_FORBENIUS_MAP_X_CALC_OFFSET, + bit_selector, + ); + + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP6_FORBENIUS_MAP_T0_CALC_OFFSET + FP2_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values + [start_col + FP6_FORBENIUS_MAP_T0_CALC_OFFSET + FP2_FORBENIUS_MAP_POW_OFFSET] + - local_values[start_col + FP6_FORBENIUS_MAP_POW_OFFSET]), + ); + for i in 0..24 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP6_FORBENIUS_MAP_T0_CALC_OFFSET + + FP2_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_FORBENIUS_MAP_T0_CALC_OFFSET + + FP2_FORBENIUS_MAP_INPUT_OFFSET + + i] + - local_values[start_col + FP6_FORBENIUS_MAP_INPUT_OFFSET + i + 24]), + ); + } + add_fp2_forbenius_map_constraints( + local_values, + next_values, + yield_constr, + start_col + FP6_FORBENIUS_MAP_T0_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i] + - local_values[start_col + + FP6_FORBENIUS_MAP_T0_CALC_OFFSET + + FP2_FORBENIUS_MAP_INPUT_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + + FP2_FP2_X_INPUT_OFFSET + + i + + 12] + - local_values[start_col + + FP6_FORBENIUS_MAP_T0_CALC_OFFSET + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i] + - y1[i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + + FP2_FP2_Y_INPUT_OFFSET + + i + + 12] + - y1[i + 12]), + ); + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET, + bit_selector, + ); + + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP6_FORBENIUS_MAP_T1_CALC_OFFSET + FP2_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values + [start_col + FP6_FORBENIUS_MAP_T1_CALC_OFFSET + FP2_FORBENIUS_MAP_POW_OFFSET] + - local_values[start_col + FP6_FORBENIUS_MAP_POW_OFFSET]), + ); + for i in 0..24 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values[start_col + + FP6_FORBENIUS_MAP_T1_CALC_OFFSET + + FP2_FORBENIUS_MAP_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_FORBENIUS_MAP_T1_CALC_OFFSET + + FP2_FORBENIUS_MAP_INPUT_OFFSET + + i] + - local_values[start_col + FP6_FORBENIUS_MAP_INPUT_OFFSET + i + 48]), + ); + } + add_fp2_forbenius_map_constraints( + local_values, + next_values, + yield_constr, + start_col + FP6_FORBENIUS_MAP_T1_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i] + - local_values[start_col + + FP6_FORBENIUS_MAP_T1_CALC_OFFSET + + FP2_FORBENIUS_MAP_INPUT_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + + FP2_FP2_X_INPUT_OFFSET + + i + + 12] + - local_values[start_col + + FP6_FORBENIUS_MAP_T1_CALC_OFFSET + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values + [start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i] + - y2[i]), + ); + yield_constr.constraint( + bit_selector.unwrap_or(P::ONES) + * local_values + [start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET] + * (local_values[start_col + + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + + FP2_FP2_Y_INPUT_OFFSET + + i + + 12] + - y2[i + 12]), + ); + } + add_fp2_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET, + bit_selector, + ); +} + +pub fn add_fp6_forbenius_map_constraints_ext_circuit< + F: RichField + Extendable, + const D: usize, +>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP6_FORBENIUS_MAP_SELECTOR_OFFSET], + ); + + for i in 0..24 * 3 { + let c = builder.sub_extension( + local_values[start_col + FP6_FORBENIUS_MAP_INPUT_OFFSET + i], + next_values[start_col + FP6_FORBENIUS_MAP_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint_transition(builder, c); + } + let c = builder.sub_extension( + local_values[start_col + FP6_FORBENIUS_MAP_POW_OFFSET], + next_values[start_col + FP6_FORBENIUS_MAP_POW_OFFSET], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint_transition(builder, c); + + let six = builder.constant_extension(F::Extension::from_canonical_u32(6)); + let c = builder.mul_extension(local_values[start_col + FP6_FORBENIUS_MAP_DIV_OFFSET], six); + let c = builder.add_extension(c, local_values[start_col + FP6_FORBENIUS_MAP_REM_OFFSET]); + let c = builder.sub_extension(c, local_values[start_col + FP6_FORBENIUS_MAP_POW_OFFSET]); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let bit0 = local_values[start_col + FP6_FORBENIUS_MAP_BIT0_OFFSET]; + let bit1 = local_values[start_col + FP6_FORBENIUS_MAP_BIT1_OFFSET]; + let bit2 = local_values[start_col + FP6_FORBENIUS_MAP_BIT2_OFFSET]; + + let one = builder.constant_extension(F::Extension::ONE); + let one_bit0 = builder.sub_extension(one, bit0); + let one_bit1 = builder.sub_extension(one, bit1); + + let two = builder.constant_extension(F::Extension::TWO); + let four = builder.constant_extension(F::Extension::from_canonical_u32(4)); + let mul1 = builder.mul_extension(bit1, two); + let mul2 = builder.mul_extension(bit2, four); + let c = builder.add_extension(bit0, mul1); + let c = builder.add_extension(c, mul2); + let c = builder.sub_extension(c, local_values[start_col + FP6_FORBENIUS_MAP_REM_OFFSET]); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let forbenius_coefficients_1 = Fp6::forbenius_coefficients_1() + .iter() + .map(|fp2| fp2.get_u32_slice().concat().try_into().unwrap()) + .collect::>(); + let forbenius_coefficients_2 = Fp6::forbenius_coefficients_2() + .iter() + .map(|fp2| fp2.get_u32_slice().concat().try_into().unwrap()) + .collect::>(); + let y1 = (0..24) + .map(|i| { + let const1 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients_1[0][i], + )); + let const2 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients_1[1][i], + )); + let const3 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients_1[2][i], + )); + let const4 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients_1[3][i], + )); + + let bit = builder.mul_extension(one_bit0, one_bit1); + let mul1 = builder.mul_extension(bit, const1); + + let bit = builder.mul_extension(bit0, one_bit1); + let mul2 = builder.mul_extension(bit, const2); + + let bit = builder.mul_extension(one_bit0, bit1); + let mul3 = builder.mul_extension(bit, const3); + + let bit = builder.mul_extension(bit0, bit1); + let mul4 = builder.mul_extension(bit, const4); + + let c = builder.add_extension(mul1, mul2); + let c = builder.add_extension(c, mul3); + let c = builder.add_extension(c, mul4); + c + }) + .collect::>>(); + let y2 = (0..24) + .map(|i| { + let const1 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients_2[0][i], + )); + let const2 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients_2[1][i], + )); + let const3 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients_2[2][i], + )); + let const4 = builder.constant_extension(F::Extension::from_canonical_u32( + forbenius_coefficients_2[3][i], + )); + + let bit = builder.mul_extension(one_bit0, one_bit1); + let mul1 = builder.mul_extension(bit, const1); + + let bit = builder.mul_extension(bit0, one_bit1); + let mul2 = builder.mul_extension(bit, const2); + + let bit = builder.mul_extension(one_bit0, bit1); + let mul3 = builder.mul_extension(bit, const3); + + let bit = builder.mul_extension(bit0, bit1); + let mul4 = builder.mul_extension(bit, const4); + + let c = builder.add_extension(mul1, mul2); + let c = builder.add_extension(c, mul3); + let c = builder.add_extension(c, mul4); + c + }) + .collect::>>(); + + let tmp = builder.mul_extension( + bit_selector_val, + local_values + [start_col + FP6_FORBENIUS_MAP_X_CALC_OFFSET + FP2_FORBENIUS_MAP_SELECTOR_OFFSET], + ); + let c = builder.sub_extension( + local_values[start_col + FP6_FORBENIUS_MAP_X_CALC_OFFSET + FP2_FORBENIUS_MAP_POW_OFFSET], + local_values[start_col + FP6_FORBENIUS_MAP_POW_OFFSET], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + for i in 0..24 { + let c = builder.sub_extension( + local_values + [start_col + FP6_FORBENIUS_MAP_X_CALC_OFFSET + FP2_FORBENIUS_MAP_INPUT_OFFSET + i], + local_values[start_col + FP6_FORBENIUS_MAP_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_forbenius_map_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP6_FORBENIUS_MAP_X_CALC_OFFSET, + bit_selector, + ); + + let tmp = builder.mul_extension( + bit_selector_val, + local_values + [start_col + FP6_FORBENIUS_MAP_T0_CALC_OFFSET + FP2_FORBENIUS_MAP_SELECTOR_OFFSET], + ); + let c = builder.sub_extension( + local_values[start_col + FP6_FORBENIUS_MAP_T0_CALC_OFFSET + FP2_FORBENIUS_MAP_POW_OFFSET], + local_values[start_col + FP6_FORBENIUS_MAP_POW_OFFSET], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + for i in 0..24 { + let c = builder.sub_extension( + local_values + [start_col + FP6_FORBENIUS_MAP_T0_CALC_OFFSET + FP2_FORBENIUS_MAP_INPUT_OFFSET + i], + local_values[start_col + FP6_FORBENIUS_MAP_INPUT_OFFSET + i + 24], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_forbenius_map_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP6_FORBENIUS_MAP_T0_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + local_values + [start_col + FP6_FORBENIUS_MAP_T0_CALC_OFFSET + FP2_FORBENIUS_MAP_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values + [start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i + 12], + local_values[start_col + + FP6_FORBENIUS_MAP_T0_CALC_OFFSET + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + y1[i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values + [start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i + 12], + y1[i + 12], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP6_FORBENIUS_MAP_Y_CALC_OFFSET, + bit_selector, + ); + + let tmp = builder.mul_extension( + bit_selector_val, + local_values + [start_col + FP6_FORBENIUS_MAP_T1_CALC_OFFSET + FP2_FORBENIUS_MAP_SELECTOR_OFFSET], + ); + let c = builder.sub_extension( + local_values[start_col + FP6_FORBENIUS_MAP_T1_CALC_OFFSET + FP2_FORBENIUS_MAP_POW_OFFSET], + local_values[start_col + FP6_FORBENIUS_MAP_POW_OFFSET], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + for i in 0..24 { + let c = builder.sub_extension( + local_values + [start_col + FP6_FORBENIUS_MAP_T1_CALC_OFFSET + FP2_FORBENIUS_MAP_INPUT_OFFSET + i], + local_values[start_col + FP6_FORBENIUS_MAP_INPUT_OFFSET + i + 48], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_forbenius_map_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP6_FORBENIUS_MAP_T1_CALC_OFFSET, + bit_selector, + ); + + for i in 0..12 { + let tmp = builder.mul_extension( + bit_selector_val, + local_values[start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + FP2_FP2_SELECTOR_OFFSET], + ); + + let c = builder.sub_extension( + local_values[start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i], + local_values + [start_col + FP6_FORBENIUS_MAP_T1_CALC_OFFSET + FP2_FORBENIUS_MAP_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values + [start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + FP2_FP2_X_INPUT_OFFSET + i + 12], + local_values[start_col + + FP6_FORBENIUS_MAP_T1_CALC_OFFSET + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + + i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i], + y2[i], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values + [start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + FP2_FP2_Y_INPUT_OFFSET + i + 12], + y2[i + 12], + ); + let c = builder.mul_extension(tmp, c); + yield_constr.constraint(builder, c); + } + add_fp2_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + FP6_FORBENIUS_MAP_Z_CALC_OFFSET, + bit_selector, + ); +} diff --git a/casper-finality-proofs/src/verification/fields/starky/mod.rs b/casper-finality-proofs/src/verification/fields/starky/mod.rs new file mode 100644 index 000000000..70210b925 --- /dev/null +++ b/casper-finality-proofs/src/verification/fields/starky/mod.rs @@ -0,0 +1,4 @@ +pub mod fp; +pub mod fp12; +pub mod fp2; +pub mod fp6; diff --git a/casper-finality-proofs/src/verification/mod.rs b/casper-finality-proofs/src/verification/mod.rs new file mode 100644 index 000000000..ef4fe65bb --- /dev/null +++ b/casper-finality-proofs/src/verification/mod.rs @@ -0,0 +1,7 @@ +pub mod fields; +pub mod curves; +pub mod proofs; +pub mod pubkey_to_g1; +pub mod aggregation; +pub mod utils; +pub mod verify; diff --git a/casper-finality-proofs/src/verification/proofs/ecc_aggregate.rs b/casper-finality-proofs/src/verification/proofs/ecc_aggregate.rs new file mode 100644 index 000000000..bbb1c8edf --- /dev/null +++ b/casper-finality-proofs/src/verification/proofs/ecc_aggregate.rs @@ -0,0 +1,679 @@ +use itertools::Itertools; +use plonky2::{ + field::{ + extension::{Extendable, FieldExtension}, + packed::PackedField, + }, + hash::hash_types::RichField, + iop::ext_target::ExtensionTarget, +}; +use starky::{ + constraint_consumer::ConstraintConsumer, + evaluation_frame::{StarkEvaluationFrame, StarkFrame}, + stark::Stark, +}; + +use crate::verification::{ + curves::starky::g1::{ + add_g1_addition_constraints, add_g1_addition_constraints_ext_circuit, + fill_trace_g1_addition, G1_POINT_ADDITION_X1, G1_POINT_ADDITION_X2, G1_POINT_ADDITION_X3, + G1_POINT_ADDITION_Y1, G1_POINT_ADDITION_Y2, G1_POINT_ADDITION_Y3, TOT_COL, + }, + utils::native_bls::Fp, +}; + +pub const NUM_POINTS: usize = 1; + +pub const ROW_NUM: usize = 0; +pub const PIS_IDX: usize = ROW_NUM + 12; +pub const A_IS_INF: usize = PIS_IDX + NUM_POINTS; +pub const B_IS_INF: usize = A_IS_INF + 1; +pub const OP: usize = B_IS_INF + 1; +pub const TOTAL_COLUMNS: usize = OP + TOT_COL; +pub const COLUMNS: usize = TOTAL_COLUMNS; + +pub const POINTS: usize = 0; +pub const BITS: usize = POINTS + 24 * NUM_POINTS; +pub const RES: usize = BITS + NUM_POINTS; +pub const PUBLIC_INPUTS: usize = RES + 24; + +#[derive(Clone, Copy)] +pub struct ECCAggStark, const D: usize> { + num_rows: usize, + _f: std::marker::PhantomData, +} + +impl, const D: usize> ECCAggStark { + pub fn new(num_rows: usize) -> Self { + Self { + num_rows, + _f: std::marker::PhantomData, + } + } + + /// Ensure that both of the initial points, i.e. points\[0\] and points\[1\], at least one of them is not a point at infinty. + pub fn generate_trace(&self, points: &[[Fp; 2]], bits: &[bool]) -> Vec<[F; TOTAL_COLUMNS]> { + assert_eq!(NUM_POINTS, points.len()); + assert_eq!(points.len(), bits.len()); + let num_additions = points.len() - 1; + let num_rows_req = num_additions * 12; + assert!( + num_rows_req < self.num_rows, + "stark doesn't have enough rows" + ); + + let mut trace = vec![[F::ZERO; TOTAL_COLUMNS]; self.num_rows]; + (0..self.num_rows).chunks(12).into_iter().for_each(|c| { + c.into_iter().for_each(|i| { + trace[i][ROW_NUM + i % 12] = F::ONE; + }); + }); + let mut row = 0; + for i in 0..NUM_POINTS { + if i < 2 { + println!("enters"); + let dali_shte_se_printira = (row..row + 12).into_iter().for_each(|rw| { + trace[rw][PIS_IDX + i] = F::ONE; + println!("trace[rw][PIS_IDX + i] is: {:?}", trace[rw][PIS_IDX + i]); + println!("trace[rw][PIS_IDX + i] is: {:?}", rw); + }); + println!("dali_shte_se_printira is: {:?}", dali_shte_se_printira); + } else { + row += 12; + (row..row + 12) + .into_iter() + .for_each(|rw| trace[rw][PIS_IDX + i] = F::ONE); + } + } + row = 0; + let mut res = fill_trace_g1_addition(&mut trace, &points[0], &points[1], row, OP); + for r in row..row + 12 { + trace[r][A_IS_INF] = F::from_bool(!bits[0]); + trace[r][B_IS_INF] = F::from_bool(!bits[1]); + } + if !bits[0] { + res = points[1]; + } else if !bits[1] { + res = points[0]; + } + for i in 2..NUM_POINTS { + row += 12; + let res_tmp = fill_trace_g1_addition(&mut trace, &res, &points[i], row, OP); + for r in row..row + 12 { + trace[r][A_IS_INF] = F::from_bool(false); + trace[r][B_IS_INF] = F::from_bool(!bits[i]); + } + if bits[i] { + res = res_tmp; + } + } + trace + } +} + +// Implement constraint generator +impl, const D: usize> Stark for ECCAggStark { + type EvaluationFrame = StarkFrame + where + FE: FieldExtension, + P: PackedField; + + fn eval_packed_generic( + &self, + vars: &Self::EvaluationFrame, + yield_constr: &mut ConstraintConsumer

, + ) where + FE: FieldExtension, + P: PackedField, + { + let local_values = vars.get_local_values(); + let next_values = vars.get_next_values(); + let public_inputs = vars.get_public_inputs(); + + for i in 0..12 { + if i == 0 { + yield_constr.constraint_first_row(local_values[ROW_NUM] - FE::ONE); + } else { + yield_constr.constraint_first_row(local_values[ROW_NUM + i]) + } + } + for i in 0..12 { + if i < 11 { + yield_constr.constraint_transition( + local_values[ROW_NUM + i] - next_values[ROW_NUM + i + 1], + ); + } else { + yield_constr + .constraint_transition(local_values[ROW_NUM + i] - next_values[ROW_NUM]); + } + } + + for i in 0..NUM_POINTS { + if i < 2 { + yield_constr.constraint_first_row(local_values[PIS_IDX + i] - FE::ONE); + } else { + yield_constr.constraint_first_row(local_values[PIS_IDX + i]); + } + } + + for i in 1..NUM_POINTS - 1 { + yield_constr.constraint_transition( + (P::ONES - local_values[PIS_IDX + NUM_POINTS - 1]) + * next_values[ROW_NUM] + * (local_values[PIS_IDX + i] - next_values[PIS_IDX + i + 1]), + ); + } + + for i in 0..NUM_POINTS { + yield_constr.constraint_transition( + local_values[PIS_IDX + NUM_POINTS - 1] + * next_values[ROW_NUM] + * next_values[PIS_IDX + i], + ); + } + + for i in 0..12 { + yield_constr.constraint_first_row( + local_values[OP + G1_POINT_ADDITION_X1 + i] - public_inputs[POINTS + i], + ); + yield_constr.constraint_first_row( + local_values[OP + G1_POINT_ADDITION_Y1 + i] - public_inputs[POINTS + i + 12], + ); + yield_constr.constraint_first_row( + local_values[OP + G1_POINT_ADDITION_X2 + i] - public_inputs[POINTS + 24 + i], + ); + yield_constr.constraint_first_row( + local_values[OP + G1_POINT_ADDITION_Y2 + i] - public_inputs[POINTS + 24 + i + 12], + ); + } + + yield_constr.constraint_first_row(P::ONES - local_values[A_IS_INF] - public_inputs[BITS]); + yield_constr + .constraint_first_row(P::ONES - local_values[B_IS_INF] - public_inputs[BITS + 1]); + + for idx in 2..NUM_POINTS { + for i in 0..12 { + yield_constr.constraint_transition( + next_values[ROW_NUM] + * next_values[PIS_IDX + idx] + * (next_values[OP + G1_POINT_ADDITION_X2 + i] + - public_inputs[POINTS + 24 * idx + i]), + ); + yield_constr.constraint_transition( + next_values[ROW_NUM] + * next_values[PIS_IDX + idx] + * (next_values[OP + G1_POINT_ADDITION_Y2 + i] + - public_inputs[POINTS + 24 * idx + i + 12]), + ); + } + yield_constr.constraint_transition( + next_values[ROW_NUM] + * next_values[PIS_IDX + idx] + * (P::ONES - next_values[B_IS_INF] - public_inputs[BITS + idx]), + ); + } + + for i in 0..12 { + yield_constr.constraint_transition( + (P::ONES - next_values[ROW_NUM]) + * (local_values[OP + G1_POINT_ADDITION_X1 + i] + - next_values[OP + G1_POINT_ADDITION_X1 + i]), + ); + yield_constr.constraint_transition( + (P::ONES - next_values[ROW_NUM]) + * (local_values[OP + G1_POINT_ADDITION_Y1 + i] + - next_values[OP + G1_POINT_ADDITION_Y1 + i]), + ); + yield_constr.constraint_transition( + (P::ONES - next_values[ROW_NUM]) + * (local_values[OP + G1_POINT_ADDITION_X2 + i] + - next_values[OP + G1_POINT_ADDITION_X2 + i]), + ); + yield_constr.constraint_transition( + (P::ONES - next_values[ROW_NUM]) + * (local_values[OP + G1_POINT_ADDITION_Y2 + i] + - next_values[OP + G1_POINT_ADDITION_Y2 + i]), + ); + yield_constr.constraint_transition( + (P::ONES - next_values[ROW_NUM]) + * (local_values[OP + G1_POINT_ADDITION_X3 + i] + - next_values[OP + G1_POINT_ADDITION_X3 + i]), + ); + yield_constr.constraint_transition( + (P::ONES - next_values[ROW_NUM]) + * (local_values[OP + G1_POINT_ADDITION_Y3 + i] + - next_values[OP + G1_POINT_ADDITION_Y3 + i]), + ); + } + + yield_constr.constraint(local_values[A_IS_INF] * (P::ONES - local_values[A_IS_INF])); + yield_constr.constraint(local_values[B_IS_INF] * (P::ONES - local_values[B_IS_INF])); + yield_constr.constraint(local_values[A_IS_INF] * local_values[B_IS_INF]); + + yield_constr.constraint_transition( + (P::ONES - next_values[ROW_NUM]) * (local_values[A_IS_INF] - next_values[A_IS_INF]), + ); + yield_constr.constraint_transition( + (P::ONES - next_values[ROW_NUM]) * (local_values[B_IS_INF] - next_values[B_IS_INF]), + ); + + for i in 0..12 { + yield_constr.constraint_transition( + next_values[ROW_NUM] + * (P::ONES - local_values[PIS_IDX + NUM_POINTS - 1]) + * (local_values[A_IS_INF] * local_values[OP + G1_POINT_ADDITION_X2 + i] + + local_values[B_IS_INF] * local_values[OP + G1_POINT_ADDITION_X1 + i] + + (P::ONES - local_values[A_IS_INF] - local_values[B_IS_INF]) + * local_values[OP + G1_POINT_ADDITION_X3 + i] + - next_values[OP + G1_POINT_ADDITION_X1 + i]), + ); + yield_constr.constraint_transition( + next_values[ROW_NUM] + * (P::ONES - local_values[PIS_IDX + NUM_POINTS - 1]) + * (local_values[A_IS_INF] * local_values[OP + G1_POINT_ADDITION_Y2 + i] + + local_values[B_IS_INF] * local_values[OP + G1_POINT_ADDITION_Y1 + i] + + (P::ONES - local_values[A_IS_INF] - local_values[B_IS_INF]) + * local_values[OP + G1_POINT_ADDITION_Y3 + i] + - next_values[OP + G1_POINT_ADDITION_Y1 + i]), + ); + } + + add_g1_addition_constraints(local_values, next_values, yield_constr, OP, None); + + for i in 0..12 { + yield_constr.constraint_transition( + next_values[ROW_NUM] + * local_values[PIS_IDX + NUM_POINTS - 1] + * (local_values[A_IS_INF] * local_values[OP + G1_POINT_ADDITION_X2 + i] + + local_values[B_IS_INF] * local_values[OP + G1_POINT_ADDITION_X1 + i] + + (P::ONES - local_values[A_IS_INF] - local_values[B_IS_INF]) + * local_values[OP + G1_POINT_ADDITION_X3 + i] + - public_inputs[RES + i]), + ); + yield_constr.constraint_transition( + next_values[ROW_NUM] + * local_values[PIS_IDX + NUM_POINTS - 1] + * (local_values[A_IS_INF] * local_values[OP + G1_POINT_ADDITION_Y2 + i] + + local_values[B_IS_INF] * local_values[OP + G1_POINT_ADDITION_Y1 + i] + + (P::ONES - local_values[A_IS_INF] - local_values[B_IS_INF]) + * local_values[OP + G1_POINT_ADDITION_Y3 + i] + - public_inputs[RES + i + 12]), + ); + } + } + + type EvaluationFrameTarget = + StarkFrame, ExtensionTarget, COLUMNS, PUBLIC_INPUTS>; + + fn eval_ext_circuit( + &self, + builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, + vars: &Self::EvaluationFrameTarget, + yield_constr: &mut starky::constraint_consumer::RecursiveConstraintConsumer, + ) { + let local_values = vars.get_local_values(); + let next_values = vars.get_next_values(); + let public_inputs = vars.get_public_inputs(); + + for i in 0..12 { + if i == 0 { + let one = builder.one_extension(); + let c = builder.sub_extension(local_values[ROW_NUM], one); + yield_constr.constraint_first_row(builder, c); + } else { + yield_constr.constraint_first_row(builder, local_values[ROW_NUM + i]); + } + } + for i in 0..12 { + if i < 11 { + let c = + builder.sub_extension(local_values[ROW_NUM + i], next_values[ROW_NUM + i + 1]); + yield_constr.constraint_transition(builder, c); + } else { + let c = builder.sub_extension(local_values[ROW_NUM + i], next_values[ROW_NUM]); + yield_constr.constraint_transition(builder, c); + } + } + + for i in 0..NUM_POINTS { + if i < 2 { + let one = builder.one_extension(); + let c = builder.sub_extension(local_values[PIS_IDX + i], one); + yield_constr.constraint_first_row(builder, c); + } else { + yield_constr.constraint_first_row(builder, local_values[PIS_IDX + i]); + } + } + + for i in 1..NUM_POINTS - 1 { + let one = builder.one_extension(); + let sub1 = builder.sub_extension(one, local_values[PIS_IDX + NUM_POINTS - 1]); + let mul = builder.mul_extension(sub1, next_values[ROW_NUM]); + let sub2 = + builder.sub_extension(local_values[PIS_IDX + i], next_values[PIS_IDX + i + 1]); + let c = builder.mul_extension(mul, sub2); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..NUM_POINTS { + let mul = + builder.mul_extension(local_values[PIS_IDX + NUM_POINTS - 1], next_values[ROW_NUM]); + let c = builder.mul_extension(mul, next_values[PIS_IDX + i]); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..12 { + let c = builder.sub_extension( + local_values[OP + G1_POINT_ADDITION_X1 + i], + public_inputs[POINTS + i], + ); + yield_constr.constraint_first_row(builder, c); + let c = builder.sub_extension( + local_values[OP + G1_POINT_ADDITION_Y1 + i], + public_inputs[POINTS + i + 12], + ); + yield_constr.constraint_first_row(builder, c); + let c = builder.sub_extension( + local_values[OP + G1_POINT_ADDITION_X2 + i], + public_inputs[POINTS + 24 + i], + ); + yield_constr.constraint_first_row(builder, c); + let c = builder.sub_extension( + local_values[OP + G1_POINT_ADDITION_Y2 + i], + public_inputs[POINTS + 24 + i + 12], + ); + yield_constr.constraint_first_row(builder, c); + } + + let one = builder.one_extension(); + let c = builder.sub_extension(one, local_values[A_IS_INF]); + let c = builder.sub_extension(c, public_inputs[BITS]); + yield_constr.constraint_first_row(builder, c); + let c = builder.sub_extension(one, local_values[B_IS_INF]); + let c = builder.sub_extension(c, public_inputs[BITS + 1]); + yield_constr.constraint_first_row(builder, c); + + for idx in 2..NUM_POINTS { + for i in 0..12 { + let mul = builder.mul_extension(next_values[ROW_NUM], next_values[PIS_IDX + idx]); + let c = builder.sub_extension( + next_values[OP + G1_POINT_ADDITION_X2 + i], + public_inputs[POINTS + 24 * idx + i], + ); + let c = builder.mul_extension(mul, c); + yield_constr.constraint_transition(builder, c); + + let c = builder.sub_extension( + next_values[OP + G1_POINT_ADDITION_Y2 + i], + public_inputs[POINTS + 24 * idx + i + 12], + ); + let c = builder.mul_extension(mul, c); + yield_constr.constraint_transition(builder, c); + } + let one = builder.one_extension(); + let mul = builder.mul_extension(next_values[ROW_NUM], next_values[PIS_IDX + idx]); + let c = builder.sub_extension(one, next_values[B_IS_INF]); + let c = builder.sub_extension(c, public_inputs[BITS + idx]); + let c = builder.mul_extension(mul, c); + yield_constr.constraint_transition(builder, c); + } + + for i in 0..12 { + let one = builder.one_extension(); + let sub1 = builder.sub_extension(one, next_values[ROW_NUM]); + + let c = builder.sub_extension( + local_values[OP + G1_POINT_ADDITION_X1 + i], + next_values[OP + G1_POINT_ADDITION_X1 + i], + ); + let c = builder.mul_extension(sub1, c); + yield_constr.constraint_transition(builder, c); + + let c = builder.sub_extension( + local_values[OP + G1_POINT_ADDITION_Y1 + i], + next_values[OP + G1_POINT_ADDITION_Y1 + i], + ); + let c = builder.mul_extension(sub1, c); + yield_constr.constraint_transition(builder, c); + + let c = builder.sub_extension( + local_values[OP + G1_POINT_ADDITION_X2 + i], + next_values[OP + G1_POINT_ADDITION_X2 + i], + ); + let c = builder.mul_extension(sub1, c); + yield_constr.constraint_transition(builder, c); + + let c = builder.sub_extension( + local_values[OP + G1_POINT_ADDITION_Y2 + i], + next_values[OP + G1_POINT_ADDITION_Y2 + i], + ); + let c = builder.mul_extension(sub1, c); + yield_constr.constraint_transition(builder, c); + + let c = builder.sub_extension( + local_values[OP + G1_POINT_ADDITION_X3 + i], + next_values[OP + G1_POINT_ADDITION_X3 + i], + ); + let c = builder.mul_extension(sub1, c); + yield_constr.constraint_transition(builder, c); + + let c = builder.sub_extension( + local_values[OP + G1_POINT_ADDITION_Y3 + i], + next_values[OP + G1_POINT_ADDITION_Y3 + i], + ); + let c = builder.mul_extension(sub1, c); + yield_constr.constraint_transition(builder, c); + } + + let one = builder.one_extension(); + let c = builder.sub_extension(one, local_values[A_IS_INF]); + let c = builder.mul_extension(local_values[A_IS_INF], c); + yield_constr.constraint(builder, c); + let c = builder.sub_extension(one, local_values[B_IS_INF]); + let c = builder.mul_extension(local_values[B_IS_INF], c); + yield_constr.constraint(builder, c); + let c = builder.mul_extension(local_values[A_IS_INF], local_values[B_IS_INF]); + yield_constr.constraint(builder, c); + + let sub1 = builder.sub_extension(one, next_values[ROW_NUM]); + let c = builder.sub_extension(local_values[A_IS_INF], next_values[A_IS_INF]); + let c = builder.mul_extension(sub1, c); + yield_constr.constraint_transition(builder, c); + + let c = builder.sub_extension(local_values[B_IS_INF], next_values[B_IS_INF]); + let c = builder.mul_extension(sub1, c); + yield_constr.constraint_transition(builder, c); + + for i in 0..12 { + let one = builder.one_extension(); + let sub1 = builder.sub_extension(one, local_values[PIS_IDX + NUM_POINTS - 1]); + let mul = builder.mul_extension(sub1, next_values[ROW_NUM]); + + let mul1 = builder.mul_extension( + local_values[A_IS_INF], + local_values[OP + G1_POINT_ADDITION_X2 + i], + ); + let mul2 = builder.mul_extension( + local_values[B_IS_INF], + local_values[OP + G1_POINT_ADDITION_X1 + i], + ); + let a_b_inf_or = builder.add_extension(local_values[A_IS_INF], local_values[B_IS_INF]); + let res_selector = builder.sub_extension(one, a_b_inf_or); + let mul3 = + builder.mul_extension(res_selector, local_values[OP + G1_POINT_ADDITION_X3 + i]); + let sel = builder.add_many_extension([mul1, mul2, mul3].iter()); + let sub2 = builder.sub_extension(sel, next_values[OP + G1_POINT_ADDITION_X1 + i]); + let c = builder.mul_extension(mul, sub2); + yield_constr.constraint_transition(builder, c); + + let mul1 = builder.mul_extension( + local_values[A_IS_INF], + local_values[OP + G1_POINT_ADDITION_Y2 + i], + ); + let mul2 = builder.mul_extension( + local_values[B_IS_INF], + local_values[OP + G1_POINT_ADDITION_Y1 + i], + ); + let a_b_inf_or = builder.add_extension(local_values[A_IS_INF], local_values[B_IS_INF]); + let res_selector = builder.sub_extension(one, a_b_inf_or); + let mul3 = + builder.mul_extension(res_selector, local_values[OP + G1_POINT_ADDITION_Y3 + i]); + let sel = builder.add_many_extension([mul1, mul2, mul3].iter()); + let sub2 = builder.sub_extension(sel, next_values[OP + G1_POINT_ADDITION_Y1 + i]); + let c = builder.mul_extension(mul, sub2); + yield_constr.constraint_transition(builder, c); + } + + add_g1_addition_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + OP, + None, + ); + + for i in 0..12 { + let mul = + builder.mul_extension(local_values[PIS_IDX + NUM_POINTS - 1], next_values[ROW_NUM]); + + let mul1 = builder.mul_extension( + local_values[A_IS_INF], + local_values[OP + G1_POINT_ADDITION_X2 + i], + ); + let mul2 = builder.mul_extension( + local_values[B_IS_INF], + local_values[OP + G1_POINT_ADDITION_X1 + i], + ); + let a_b_inf_or = builder.add_extension(local_values[A_IS_INF], local_values[B_IS_INF]); + let res_selector = builder.sub_extension(one, a_b_inf_or); + let mul3 = + builder.mul_extension(res_selector, local_values[OP + G1_POINT_ADDITION_X3 + i]); + let sel = builder.add_many_extension([mul1, mul2, mul3].iter()); + let sub2 = builder.sub_extension(sel, public_inputs[RES + i]); + let c = builder.mul_extension(mul, sub2); + yield_constr.constraint_transition(builder, c); + + let mul1 = builder.mul_extension( + local_values[A_IS_INF], + local_values[OP + G1_POINT_ADDITION_Y2 + i], + ); + let mul2 = builder.mul_extension( + local_values[B_IS_INF], + local_values[OP + G1_POINT_ADDITION_Y1 + i], + ); + let a_b_inf_or = builder.add_extension(local_values[A_IS_INF], local_values[B_IS_INF]); + let res_selector = builder.sub_extension(one, a_b_inf_or); + let mul3 = + builder.mul_extension(res_selector, local_values[OP + G1_POINT_ADDITION_Y3 + i]); + let sel = builder.add_many_extension([mul1, mul2, mul3].iter()); + let sub2 = builder.sub_extension(sel, public_inputs[RES + i + 12]); + let c = builder.mul_extension(mul, sub2); + yield_constr.constraint_transition(builder, c); + } + } + + fn constraint_degree(&self) -> usize { + 4 + } +} + +#[cfg(test)] +mod tests { + use std::{str::FromStr, time::Instant}; + + use num_bigint::BigUint; + use plonky2::{ + field::types::Field, + plonk::config::{GenericConfig, PoseidonGoldilocksConfig}, + util::timing::TimingTree, + }; + use starky::{ + config::StarkConfig, prover::prove, stark_testing::test_stark_circuit_constraints, + util::trace_rows_to_poly_values, verifier::verify_stark_proof, + }; + + use crate::verification::{ + proofs::ecc_aggregate::{ECCAggStark, PUBLIC_INPUTS}, + utils::native_bls::Fp, + }; + + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + #[test] + fn test_stark() { + let points = vec![ + [ + Fp::get_fp_from_biguint(BigUint::from_str("1126623738681067087257746233621637126057761795105632825039721241530561605789561587401946101488319534304696021688867").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("1234387340521756581420450482431370540586970509879406431297819985144306829815967947105769314182001941730344316001350").unwrap()), + ], + [ + Fp::get_fp_from_biguint(BigUint::from_str("2227077755005763044330380583051825752563137755581948302467438657174056912044402195092391651898529973204169901068783").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("3938630597268339120028117703099158472921664122822994005207596611335679241381714852577868690525915355086230780305947").unwrap()), + ], + [ + Fp::get_fp_from_biguint(BigUint::from_str("2053421366648413666933823320372384475868365657546365151314366566305943228812274803081977171134139973672272644986467").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("2207186006724644389273078744925282614132324652989136375251464172117985207634291886391161234997127125369095093999580").unwrap()), + ], + [ + Fp::get_fp_from_biguint(BigUint::from_str("1169033241627732070028418158513086714573774583755322258758869360993605333207425506760926925188242096359787524711048").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("1939098195156802692901565413605879295070375396037882592352367327738278109451906629210913080936054556354569965872525").unwrap()), + ], + [ + Fp::get_fp_from_biguint(BigUint::from_str("757685478162556714953738341385841404889192281968043194927346142717614691781995093597748984673977386749617478360670").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("1622292944328971247507221252705479796178069451942507208036486950244675416773645537482644705283188691681973682881721").unwrap()), + ], + ]; + + let res = [ + Fp::get_fp_from_biguint(BigUint::from_str("234946323920378256253926848256725419729562691368307398516954172389919862735998320714103660807049810312016222352882").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("2296889008503245272184437522688584268823483443227037325600916018718669246920962157509760406568054757825100071836929").unwrap()), + ]; + + let bits = vec![true, true, true, true, false]; + + let mut config = StarkConfig::standard_fast_config(); + config.fri_config.rate_bits = 2; + let stark = ECCAggStark::::new(64); + let s = Instant::now(); + let mut public_inputs = Vec::::new(); + for pt in &points { + for x in &pt[0].0 { + public_inputs.push(F::from_canonical_u32(*x)); + } + for y in &pt[1].0 { + public_inputs.push(F::from_canonical_u32(*y)); + } + } + for b in bits.iter() { + public_inputs.push(F::from_bool(*b)); + } + for x in res[0].0 { + public_inputs.push(F::from_canonical_u32(x)); + } + for y in res[1].0 { + public_inputs.push(F::from_canonical_u32(y)); + } + assert_eq!(public_inputs.len(), PUBLIC_INPUTS); + let trace = stark.generate_trace(&points, &bits); + let trace_poly_values = trace_rows_to_poly_values(trace); + let proof = prove::, D>( + stark, + &config, + trace_poly_values, + &public_inputs, + &mut TimingTree::default(), + ) + .unwrap(); + println!("Time taken for acc_agg stark proof {:?}", s.elapsed()); + verify_stark_proof(stark, proof.clone(), &config).unwrap(); + } + + #[test] + fn test_stark_circuit() { + let stark = ECCAggStark::::new(64); + test_stark_circuit_constraints::, D>(stark).unwrap(); + } +} diff --git a/casper-finality-proofs/src/verification/proofs/final_exponentiate.rs b/casper-finality-proofs/src/verification/proofs/final_exponentiate.rs new file mode 100644 index 000000000..a1de105d4 --- /dev/null +++ b/casper-finality-proofs/src/verification/proofs/final_exponentiate.rs @@ -0,0 +1,2112 @@ +use plonky2::{ + field::{ + extension::{Extendable, FieldExtension}, + packed::PackedField, + types::Field, + }, + hash::hash_types::RichField, + iop::ext_target::ExtensionTarget, +}; +use starky::{ + constraint_consumer::ConstraintConsumer, + evaluation_frame::{StarkEvaluationFrame, StarkFrame}, + stark::Stark, +}; +use crate::verification::{ + utils::{starky_utils::*, native_bls::Fp12}, + fields::starky::{ + fp::*, + fp2::*, + fp6::*, + fp12::* + }, +}; + +/* + These offsets are for final_exponentiation function (super::native::Fp12::final_exponentiate). This stark needs 8192 rows. The Ti's are defined in the function definition. + FINAL_EXP_ROW_SELECTORS -> 8192 selectors set 1 for the corresponding row number. + FINAL_EXP_FORBENIUS_MAP_SELECTOR -> selector set 1 when the operation is Fp12 forbenius map. + FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR -> selector set 1 when the operation is cyclotomicExponent. + FINAL_EXP_MUL_SELECTOR -> selector set 1 when the operation is fp12 multiplication. + FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR -> selector set 1 when the operation is Fp12 forbenius map. + FINAL_EXP_CONJUGATE_SELECTOR -> selector set 1 when the operation is fp12 conjugate. + FINAL_EXP_INPUT_OFFSET -> offset where input of the function is set. + FINAL_EXP_T${i}_OFFSET -> offset where the Ti's are set. + FINAL_EXP_OP_OFFSET -> offset where the stark trace of an operation is filled. +*/ + +pub const FINAL_EXP_ROW_SELECTORS: usize = 0; +pub const FINAL_EXP_FORBENIUS_MAP_SELECTOR: usize = FINAL_EXP_ROW_SELECTORS + 8192; +pub const FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR: usize = FINAL_EXP_FORBENIUS_MAP_SELECTOR + 1; +pub const FINAL_EXP_MUL_SELECTOR: usize = FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR + 1; +pub const FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR: usize = FINAL_EXP_MUL_SELECTOR + 1; +pub const FINAL_EXP_CONJUGATE_SELECTOR: usize = FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR + 1; +pub const FINAL_EXP_INPUT_OFFSET: usize = FINAL_EXP_CONJUGATE_SELECTOR + 1; +pub const FINAL_EXP_T0_OFFSET: usize = FINAL_EXP_INPUT_OFFSET + 12 * 12; +pub const FINAL_EXP_T1_OFFSET: usize = FINAL_EXP_T0_OFFSET + 12 * 12; +pub const FINAL_EXP_T2_OFFSET: usize = FINAL_EXP_T1_OFFSET + 12 * 12; +pub const FINAL_EXP_T3_OFFSET: usize = FINAL_EXP_T2_OFFSET + 12 * 12; +pub const FINAL_EXP_T4_OFFSET: usize = FINAL_EXP_T3_OFFSET + 12 * 12; +pub const FINAL_EXP_T5_OFFSET: usize = FINAL_EXP_T4_OFFSET + 12 * 12; +pub const FINAL_EXP_T6_OFFSET: usize = FINAL_EXP_T5_OFFSET + 12 * 12; +pub const FINAL_EXP_T7_OFFSET: usize = FINAL_EXP_T6_OFFSET + 12 * 12; +pub const FINAL_EXP_T8_OFFSET: usize = FINAL_EXP_T7_OFFSET + 12 * 12; +pub const FINAL_EXP_T9_OFFSET: usize = FINAL_EXP_T8_OFFSET + 12 * 12; +pub const FINAL_EXP_T10_OFFSET: usize = FINAL_EXP_T9_OFFSET + 12 * 12; +pub const FINAL_EXP_T11_OFFSET: usize = FINAL_EXP_T10_OFFSET + 12 * 12; +pub const FINAL_EXP_T12_OFFSET: usize = FINAL_EXP_T11_OFFSET + 12 * 12; +pub const FINAL_EXP_T13_OFFSET: usize = FINAL_EXP_T12_OFFSET + 12 * 12; +pub const FINAL_EXP_T14_OFFSET: usize = FINAL_EXP_T13_OFFSET + 12 * 12; +pub const FINAL_EXP_T15_OFFSET: usize = FINAL_EXP_T14_OFFSET + 12 * 12; +pub const FINAL_EXP_T16_OFFSET: usize = FINAL_EXP_T15_OFFSET + 12 * 12; +pub const FINAL_EXP_T17_OFFSET: usize = FINAL_EXP_T16_OFFSET + 12 * 12; +pub const FINAL_EXP_T18_OFFSET: usize = FINAL_EXP_T17_OFFSET + 12 * 12; +pub const FINAL_EXP_T19_OFFSET: usize = FINAL_EXP_T18_OFFSET + 12 * 12; +pub const FINAL_EXP_T20_OFFSET: usize = FINAL_EXP_T19_OFFSET + 12 * 12; +pub const FINAL_EXP_T21_OFFSET: usize = FINAL_EXP_T20_OFFSET + 12 * 12; +pub const FINAL_EXP_T22_OFFSET: usize = FINAL_EXP_T21_OFFSET + 12 * 12; +pub const FINAL_EXP_T23_OFFSET: usize = FINAL_EXP_T22_OFFSET + 12 * 12; +pub const FINAL_EXP_T24_OFFSET: usize = FINAL_EXP_T23_OFFSET + 12 * 12; +pub const FINAL_EXP_T25_OFFSET: usize = FINAL_EXP_T24_OFFSET + 12 * 12; +pub const FINAL_EXP_T26_OFFSET: usize = FINAL_EXP_T25_OFFSET + 12 * 12; +pub const FINAL_EXP_T27_OFFSET: usize = FINAL_EXP_T26_OFFSET + 12 * 12; +pub const FINAL_EXP_T28_OFFSET: usize = FINAL_EXP_T27_OFFSET + 12 * 12; +pub const FINAL_EXP_T29_OFFSET: usize = FINAL_EXP_T28_OFFSET + 12 * 12; +pub const FINAL_EXP_T30_OFFSET: usize = FINAL_EXP_T29_OFFSET + 12 * 12; +pub const FINAL_EXP_T31_OFFSET: usize = FINAL_EXP_T30_OFFSET + 12 * 12; +pub const FINAL_EXP_OP_OFFSET: usize = FINAL_EXP_T31_OFFSET + 12 * 12; +pub const FINAL_EXP_TOTAL_COLUMNS: usize = FINAL_EXP_OP_OFFSET + CYCLOTOMIC_EXP_TOTAL_COLUMNS; + +// Number of rows required for each operation +pub const FP12_MUL_ROWS: usize = 12; +pub const FP12_FORBENIUS_MAP_ROWS: usize = 12; +pub const CYCLOTOMIC_SQ_ROWS: usize = 12; +pub const CONJUGATE_ROWS: usize = 1; +pub const CYCLOTOMIC_EXP_ROWS: usize = 70 * 12 + 1; + +// Row number where the operation for computing Ti starts. +pub const T0_ROW: usize = 0; +pub const T1_ROW: usize = T0_ROW + FP12_FORBENIUS_MAP_ROWS; +pub const T2_ROW: usize = T1_ROW + FP12_MUL_ROWS; +pub const T3_ROW: usize = T2_ROW + FP12_FORBENIUS_MAP_ROWS; +pub const T4_ROW: usize = T3_ROW + FP12_MUL_ROWS; +pub const T5_ROW: usize = T4_ROW + CYCLOTOMIC_EXP_ROWS; +pub const T6_ROW: usize = T5_ROW + CONJUGATE_ROWS; +pub const T7_ROW: usize = T6_ROW + CYCLOTOMIC_SQ_ROWS; +pub const T8_ROW: usize = T7_ROW + CONJUGATE_ROWS; +pub const T9_ROW: usize = T8_ROW + FP12_MUL_ROWS; +pub const T10_ROW: usize = T9_ROW + CYCLOTOMIC_EXP_ROWS; +pub const T11_ROW: usize = T10_ROW + CONJUGATE_ROWS; +pub const T12_ROW: usize = T11_ROW + CYCLOTOMIC_EXP_ROWS; +pub const T13_ROW: usize = T12_ROW + CONJUGATE_ROWS; +pub const T14_ROW: usize = T13_ROW + CYCLOTOMIC_EXP_ROWS; +pub const T15_ROW: usize = T14_ROW + CONJUGATE_ROWS; +pub const T16_ROW: usize = T15_ROW + CYCLOTOMIC_SQ_ROWS; +pub const T17_ROW: usize = T16_ROW + FP12_MUL_ROWS; +pub const T18_ROW: usize = T17_ROW + CYCLOTOMIC_EXP_ROWS; +pub const T19_ROW: usize = T18_ROW + CONJUGATE_ROWS; +pub const T20_ROW: usize = T19_ROW + FP12_MUL_ROWS; +pub const T21_ROW: usize = T20_ROW + FP12_FORBENIUS_MAP_ROWS; +pub const T22_ROW: usize = T21_ROW + FP12_MUL_ROWS; +pub const T23_ROW: usize = T22_ROW + FP12_FORBENIUS_MAP_ROWS; +pub const T24_ROW: usize = T23_ROW + CONJUGATE_ROWS; +pub const T25_ROW: usize = T24_ROW + FP12_MUL_ROWS; +pub const T26_ROW: usize = T25_ROW + FP12_FORBENIUS_MAP_ROWS; +pub const T27_ROW: usize = T26_ROW + CONJUGATE_ROWS; +pub const T28_ROW: usize = T27_ROW + FP12_MUL_ROWS; +pub const T29_ROW: usize = T28_ROW + FP12_MUL_ROWS; +pub const T30_ROW: usize = T29_ROW + FP12_MUL_ROWS; +pub const T31_ROW: usize = T30_ROW + FP12_MUL_ROWS; +pub const TOTAL_ROW: usize = T31_ROW + FP12_MUL_ROWS; + +pub const TOTAL_COLUMNS: usize = FINAL_EXP_TOTAL_COLUMNS; +pub const COLUMNS: usize = TOTAL_COLUMNS; + +// Public inputs to this stark are the input and output of the function final_exponentiate. +pub const PIS_INPUT_OFFSET: usize = 0; +pub const PIS_OUTPUT_OFFSET: usize = PIS_INPUT_OFFSET + 24 * 3 * 2; +pub const PUBLIC_INPUTS: usize = PIS_OUTPUT_OFFSET + 24 * 3 * 2; + +// A (Fp) * B (Fp) => C (Fp) +#[derive(Clone, Copy)] +pub struct FinalExponentiateStark, const D: usize> { + num_rows: usize, + _f: std::marker::PhantomData, +} + +/// Fills trace for forbenius map operation. First sets `FINAL_EXP_FORBENIUS_MAP_SELECTOR` to 1 in the rows of the operation. Sets the result of the operaion in all rows of the trace. Then fills the trace for the forbenius map operation. +pub fn fill_trace_forbenius, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp12, + pow: usize, + start_row: usize, + end_row: usize, + output_col: usize, +) -> Fp12 { + let res = x.forbenius_map(pow); + for row in start_row..end_row + 1 { + trace[row][FINAL_EXP_FORBENIUS_MAP_SELECTOR] = F::ONE; + } + for row in 0..trace.len() { + assign_u32_in_series(trace, row, output_col, &res.get_u32_slice().concat()); + } + fill_trace_fp12_forbenius_map(trace, x, pow, start_row, end_row, FINAL_EXP_OP_OFFSET); + res +} + +/// Fills trace for fp12 multiplication operation. First sets `FINAL_EXP_MUL_SELECTOR` to 1 in the rows of the operation. Sets the result of the operaion in all rows of the trace. Then fills the trace for the fp12 multiplication operation. +pub fn fill_trace_mul, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp12, + y: &Fp12, + start_row: usize, + end_row: usize, + output_col: usize, +) -> Fp12 { + let res = (*x) * (*y); + for row in start_row..end_row + 1 { + trace[row][FINAL_EXP_MUL_SELECTOR] = F::ONE; + } + for row in 0..trace.len() { + assign_u32_in_series(trace, row, output_col, &res.get_u32_slice().concat()); + } + fill_trace_fp12_multiplication(trace, &x, &y, start_row, end_row, FINAL_EXP_OP_OFFSET); + res +} + +/// Fills trace for fp12 division (which is basically fp12 multiplication) operation. First sets `FINAL_EXP_MUL_SELECTOR` to 1 in the rows of the operation. Sets the result of the operaion in all rows of the trace. Then fills the trace for the fp12 multiplication operation. +pub fn fill_trace_div, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp12, + y: &Fp12, + start_row: usize, + end_row: usize, + output_col: usize, +) -> Fp12 { + let res = *x / *y; + for row in start_row..end_row + 1 { + trace[row][FINAL_EXP_MUL_SELECTOR] = F::ONE; + } + for row in 0..trace.len() { + assign_u32_in_series(trace, row, output_col, &res.get_u32_slice().concat()); + } + fill_trace_fp12_multiplication(trace, &res, &y, start_row, end_row, FINAL_EXP_OP_OFFSET); + res +} + +/// Fills trace for cyclotomic exponent operation. First sets `FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR` to 1 in the rows of the operation. Sets the result of the operaion in all rows of the trace. Then fills the trace for the cyclotomic exponent operation. +pub fn fill_trace_cyc_exp, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp12, + start_row: usize, + end_row: usize, + output_col: usize, +) -> Fp12 { + let res = x.cyclotocmic_exponent(); + for row in start_row..end_row + 1 { + trace[row][FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR] = F::ONE; + } + for row in 0..trace.len() { + assign_u32_in_series(trace, row, output_col, &res.get_u32_slice().concat()); + } + fill_trace_cyclotomic_exp(trace, x, start_row, end_row, FINAL_EXP_OP_OFFSET); + res +} + +/// Fills trace for fp12 conjugate operation. First sets `FINAL_EXP_CONJUGATE_SELECTOR` to 1 in the rows of the operation. Sets the result of the operaion in all rows of the trace. Then fills the trace for the fp12 conjugate operation. +pub fn fill_trace_conjugate, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp12, + row: usize, + output_col: usize, +) -> Fp12 { + let res = x.conjugate(); + trace[row][FINAL_EXP_CONJUGATE_SELECTOR] = F::ONE; + for i in 0..trace.len() { + assign_u32_in_series(trace, i, output_col, &res.get_u32_slice().concat()); + } + fill_trace_fp12_conjugate(trace, x, row, FINAL_EXP_OP_OFFSET); + res +} + +/// Fills trace for cyclotomic square operation. First sets `FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR` to 1 in the rows of the operation. Sets the result of the operaion in all rows of the trace. Then fills the trace for the cyclotomic square operation. +pub fn fill_trace_cyc_sq, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp12, + start_row: usize, + end_row: usize, + output_col: usize, +) -> Fp12 { + let res = x.cyclotomic_square(); + for row in start_row..end_row + 1 { + trace[row][FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR] = F::ONE; + } + for row in 0..trace.len() { + assign_u32_in_series(trace, row, output_col, &res.get_u32_slice().concat()); + } + fill_trace_cyclotomic_sq(trace, x, start_row, end_row, FINAL_EXP_OP_OFFSET); + res +} + +// Implement trace generator +impl, const D: usize> FinalExponentiateStark { + pub fn new(num_rows: usize) -> Self { + Self { + num_rows, + _f: std::marker::PhantomData, + } + } + + /// Fills the trace for [final_exponentiate](super::native::Fp12::final_exponentiate) function. First fill the `FINAL_EXP_ROW_SELECTORS` according to the row number. Assigns the input to all rows in `FINAL_EXP_INPUT_OFFSET`, then fills trace for each Ti term as defined in the native function definition. + pub fn generate_trace(&self, x: Fp12) -> Vec<[F; TOTAL_COLUMNS]> { + let mut trace = vec![[F::ZERO; TOTAL_COLUMNS]; self.num_rows]; + for row in 0..trace.len() { + trace[row][FINAL_EXP_ROW_SELECTORS + row] = F::ONE; + assign_u32_in_series( + &mut trace, + row, + FINAL_EXP_INPUT_OFFSET, + &x.get_u32_slice().concat(), + ); + } + let t0 = fill_trace_forbenius(&mut trace, &x, 6, T0_ROW, T1_ROW - 1, FINAL_EXP_T0_OFFSET); + let t1 = fill_trace_div(&mut trace, &t0, &x, T1_ROW, T2_ROW - 1, FINAL_EXP_T1_OFFSET); + let t2 = fill_trace_forbenius(&mut trace, &t1, 2, T2_ROW, T3_ROW - 1, FINAL_EXP_T2_OFFSET); + let t3 = fill_trace_mul( + &mut trace, + &t2, + &t1, + T3_ROW, + T4_ROW - 1, + FINAL_EXP_T3_OFFSET, + ); + let t4 = fill_trace_cyc_exp(&mut trace, &t3, T4_ROW, T5_ROW - 1, FINAL_EXP_T4_OFFSET); + let t5 = fill_trace_conjugate(&mut trace, &t4, T5_ROW, FINAL_EXP_T5_OFFSET); + let t6 = fill_trace_cyc_sq(&mut trace, &t3, T6_ROW, T7_ROW - 1, FINAL_EXP_T6_OFFSET); + let t7 = fill_trace_conjugate(&mut trace, &t6, T7_ROW, FINAL_EXP_T7_OFFSET); + let t8 = fill_trace_mul( + &mut trace, + &t7, + &t5, + T8_ROW, + T9_ROW - 1, + FINAL_EXP_T8_OFFSET, + ); + let t9 = fill_trace_cyc_exp(&mut trace, &t8, T9_ROW, T10_ROW - 1, FINAL_EXP_T9_OFFSET); + let t10 = fill_trace_conjugate(&mut trace, &t9, T10_ROW, FINAL_EXP_T10_OFFSET); + let t11 = fill_trace_cyc_exp(&mut trace, &t10, T11_ROW, T12_ROW - 1, FINAL_EXP_T11_OFFSET); + let t12 = fill_trace_conjugate(&mut trace, &t11, T12_ROW, FINAL_EXP_T12_OFFSET); + let t13 = fill_trace_cyc_exp(&mut trace, &t12, T13_ROW, T14_ROW - 1, FINAL_EXP_T13_OFFSET); + let t14 = fill_trace_conjugate(&mut trace, &t13, T14_ROW, FINAL_EXP_T14_OFFSET); + let t15 = fill_trace_cyc_sq(&mut trace, &t5, T15_ROW, T16_ROW - 1, FINAL_EXP_T15_OFFSET); + let t16 = fill_trace_mul( + &mut trace, + &t14, + &t15, + T16_ROW, + T17_ROW - 1, + FINAL_EXP_T16_OFFSET, + ); + let t17 = fill_trace_cyc_exp(&mut trace, &t16, T17_ROW, T18_ROW - 1, FINAL_EXP_T17_OFFSET); + let t18 = fill_trace_conjugate(&mut trace, &t17, T18_ROW, FINAL_EXP_T18_OFFSET); + let t19 = fill_trace_mul( + &mut trace, + &t5, + &t12, + T19_ROW, + T20_ROW - 1, + FINAL_EXP_T19_OFFSET, + ); + let t20 = fill_trace_forbenius( + &mut trace, + &t19, + 2, + T20_ROW, + T21_ROW - 1, + FINAL_EXP_T20_OFFSET, + ); + let t21 = fill_trace_mul( + &mut trace, + &t10, + &t3, + T21_ROW, + T22_ROW - 1, + FINAL_EXP_T21_OFFSET, + ); + let t22 = fill_trace_forbenius( + &mut trace, + &t21, + 3, + T22_ROW, + T23_ROW - 1, + FINAL_EXP_T22_OFFSET, + ); + let t23 = fill_trace_conjugate(&mut trace, &t3, T23_ROW, FINAL_EXP_T23_OFFSET); + let t24 = fill_trace_mul( + &mut trace, + &t16, + &t23, + T24_ROW, + T25_ROW - 1, + FINAL_EXP_T24_OFFSET, + ); + let t25 = fill_trace_forbenius( + &mut trace, + &t24, + 1, + T25_ROW, + T26_ROW - 1, + FINAL_EXP_T25_OFFSET, + ); + let t26 = fill_trace_conjugate(&mut trace, &t8, T26_ROW, FINAL_EXP_T26_OFFSET); + let t27 = fill_trace_mul( + &mut trace, + &t18, + &t26, + T27_ROW, + T28_ROW - 1, + FINAL_EXP_T27_OFFSET, + ); + let t28 = fill_trace_mul( + &mut trace, + &t27, + &t3, + T28_ROW, + T29_ROW - 1, + FINAL_EXP_T28_OFFSET, + ); + let t29 = fill_trace_mul( + &mut trace, + &t20, + &t22, + T29_ROW, + T30_ROW - 1, + FINAL_EXP_T29_OFFSET, + ); + let t30 = fill_trace_mul( + &mut trace, + &t29, + &t25, + T30_ROW, + T31_ROW - 1, + FINAL_EXP_T30_OFFSET, + ); + let _t31 = fill_trace_mul( + &mut trace, + &t30, + &t28, + T31_ROW, + TOTAL_ROW - 1, + FINAL_EXP_T31_OFFSET, + ); + trace + } +} + +/// Constraints `FINAL_EXP_FORBENIUS_MAP_SELECTOR` to be 1 and other op selectors to be 0 in the `FP12_FORBENIUS_MAP_ROWS` starting from `row`. Constraints the values in input columns to input of forbenius_map operation trace. Constraints the output of forbenius_map trace to the values set in output columns. +fn add_constraints_forbenius, const D: usize, FE, P, const D2: usize>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + row: usize, + input_col: usize, + output_col: usize, + pow: usize, +) where + FE: FieldExtension, + P: PackedField, +{ + for i in row..row + FP12_FORBENIUS_MAP_ROWS { + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * (local_values[FINAL_EXP_FORBENIUS_MAP_SELECTOR] - P::ONES), + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * local_values[FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] * local_values[FINAL_EXP_MUL_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * local_values[FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] * local_values[FINAL_EXP_CONJUGATE_SELECTOR], + ); + } + for i in 0..24 * 3 * 2 { + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * (local_values[input_col + i] + - local_values[FINAL_EXP_OP_OFFSET + FP12_FORBENIUS_MAP_INPUT_OFFSET + i]), + ); + } + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * (local_values[FINAL_EXP_OP_OFFSET + FP12_FORBENIUS_MAP_POW_OFFSET] + - FE::from_canonical_usize(pow)), + ); + for i in 0..12 { + for j in 0..12 { + let offset = if j == 0 { + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_X_CALC_OFFSET + + FP2_FORBENIUS_MAP_INPUT_OFFSET + } else if j == 1 { + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_X_CALC_OFFSET + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + } else if j == 2 { + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + } else if j == 3 { + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + } else if j == 4 { + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + } else if j == 5 { + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + } else if j == 6 { + FP12_FORBENIUS_MAP_C0_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + } else if j == 7 { + FP12_FORBENIUS_MAP_C0_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + } else if j == 8 { + FP12_FORBENIUS_MAP_C1_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + } else if j == 9 { + FP12_FORBENIUS_MAP_C1_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + } else if j == 10 { + FP12_FORBENIUS_MAP_C2_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + } else { + FP12_FORBENIUS_MAP_C2_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + }; + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * (local_values[FINAL_EXP_OP_OFFSET + offset + i] + - local_values[output_col + j * 12 + i]), + ); + } + } +} + +pub fn add_constraints_forbenius_ext_circuit, const D: usize>( + builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, + yield_constr: &mut starky::constraint_consumer::RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + row: usize, + input_col: usize, + output_col: usize, + pow: usize, +) { + for i in row..row + FP12_FORBENIUS_MAP_ROWS { + let one = builder.constant_extension(F::Extension::ONE); + + let c = builder.sub_extension(local_values[FINAL_EXP_FORBENIUS_MAP_SELECTOR], one); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + i], c); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_MUL_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_CONJUGATE_SELECTOR], + ); + yield_constr.constraint(builder, c); + } + for i in 0..24 * 3 * 2 { + let c = builder.sub_extension( + local_values[input_col + i], + local_values[FINAL_EXP_OP_OFFSET + FP12_FORBENIUS_MAP_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + row], c); + yield_constr.constraint(builder, c); + } + let pow = builder.constant_extension(F::Extension::from_canonical_usize(pow)); + let c = builder.sub_extension( + local_values[FINAL_EXP_OP_OFFSET + FP12_FORBENIUS_MAP_POW_OFFSET], + pow, + ); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + row], c); + yield_constr.constraint(builder, c); + + for i in 0..12 { + for j in 0..12 { + let offset = if j == 0 { + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_X_CALC_OFFSET + + FP2_FORBENIUS_MAP_INPUT_OFFSET + } else if j == 1 { + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_X_CALC_OFFSET + + FP2_FORBENIUS_MAP_T0_CALC_OFFSET + + FP_MULTIPLICATION_TOTAL_COLUMNS + + REDUCED_OFFSET + } else if j == 2 { + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + } else if j == 3 { + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_Y_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + } else if j == 4 { + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + + Z1_REDUCE_OFFSET + + REDUCED_OFFSET + } else if j == 5 { + FP12_FORBENIUS_MAP_R0_CALC_OFFSET + + FP6_FORBENIUS_MAP_Z_CALC_OFFSET + + Z2_REDUCE_OFFSET + + REDUCED_OFFSET + } else if j == 6 { + FP12_FORBENIUS_MAP_C0_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + } else if j == 7 { + FP12_FORBENIUS_MAP_C0_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + } else if j == 8 { + FP12_FORBENIUS_MAP_C1_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + } else if j == 9 { + FP12_FORBENIUS_MAP_C1_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + } else if j == 10 { + FP12_FORBENIUS_MAP_C2_CALC_OFFSET + Z1_REDUCE_OFFSET + REDUCED_OFFSET + } else { + FP12_FORBENIUS_MAP_C2_CALC_OFFSET + Z2_REDUCE_OFFSET + REDUCED_OFFSET + }; + let c = builder.sub_extension( + local_values[FINAL_EXP_OP_OFFSET + offset + i], + local_values[output_col + j * 12 + i], + ); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + row], c); + yield_constr.constraint(builder, c); + } + } +} + +/// Constraints `FINAL_EXP_MUL_SELECTOR` to be 1, and other op selectors to be 0 in the `FP12_MUL_ROWS` starting from `row`. Constraints the values in input columns to input of fp12_multiplication operation trace. Constraints the output of fp12_multiplication trace to the values set in output columns. +fn add_constraints_mul, const D: usize, FE, P, const D2: usize>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + row: usize, + x_col: usize, + y_col: usize, + res_col: usize, +) where + FE: FieldExtension, + P: PackedField, +{ + for i in row..row + FP12_MUL_ROWS { + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * local_values[FINAL_EXP_FORBENIUS_MAP_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * local_values[FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * (local_values[FINAL_EXP_MUL_SELECTOR] - P::ONES), + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * local_values[FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] * local_values[FINAL_EXP_CONJUGATE_SELECTOR], + ); + } + for i in 0..24 * 3 * 2 { + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * (local_values[x_col + i] + - local_values[FINAL_EXP_OP_OFFSET + FP12_MUL_X_INPUT_OFFSET + i]), + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * (local_values[y_col + i] + - local_values[FINAL_EXP_OP_OFFSET + FP12_MUL_Y_INPUT_OFFSET + i]), + ); + } + for i in 0..12 { + for j in 0..6 { + for k in 0..2 { + let x_y = if k == 0 { + FP12_MUL_X_CALC_OFFSET + FP6_ADDITION_TOTAL + } else { + FP12_MUL_Y_CALC_OFFSET + FP6_ADDITION_TOTAL + FP6_SUBTRACTION_TOTAL + }; + let offset = x_y + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i; + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * (local_values[res_col + k * 24 * 3 + j * 12 + i] + - local_values[FINAL_EXP_OP_OFFSET + offset]), + ); + } + } + } +} + +pub fn add_constraints_mul_ext_circuit, const D: usize>( + builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, + yield_constr: &mut starky::constraint_consumer::RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + row: usize, + x_col: usize, + y_col: usize, + res_col: usize, +) { + for i in row..row + FP12_MUL_ROWS { + let one = builder.constant_extension(F::Extension::ONE); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_FORBENIUS_MAP_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension(local_values[FINAL_EXP_MUL_SELECTOR], one); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + i], c); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_CONJUGATE_SELECTOR], + ); + yield_constr.constraint(builder, c); + } + for i in 0..24 * 3 * 2 { + let c = builder.sub_extension( + local_values[x_col + i], + local_values[FINAL_EXP_OP_OFFSET + FP12_MUL_X_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + row], c); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[y_col + i], + local_values[FINAL_EXP_OP_OFFSET + FP12_MUL_Y_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + row], c); + yield_constr.constraint(builder, c); + } + for i in 0..12 { + for j in 0..6 { + for k in 0..2 { + let x_y = if k == 0 { + FP12_MUL_X_CALC_OFFSET + FP6_ADDITION_TOTAL + } else { + FP12_MUL_Y_CALC_OFFSET + FP6_ADDITION_TOTAL + FP6_SUBTRACTION_TOTAL + }; + let offset = x_y + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i; + let c = builder.sub_extension( + local_values[res_col + k * 24 * 3 + j * 12 + i], + local_values[FINAL_EXP_OP_OFFSET + offset], + ); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + row], c); + yield_constr.constraint(builder, c); + } + } + } +} + +/// Constraints `FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR` to be 1 and other op selectors to be 0 in the `CYCLOTOMIC_EXP_ROWS` starting from `row`. Constraints the values in input columns to input of cyclotomic_exponent operation trace. Constraints the output of cyclotomic_exponent trace to the values set in output columns. +fn add_constraints_cyc_exp, const D: usize, FE, P, const D2: usize>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + row: usize, + input_col: usize, + output_col: usize, +) where + FE: FieldExtension, + P: PackedField, +{ + for i in row..row + CYCLOTOMIC_EXP_ROWS { + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * local_values[FINAL_EXP_FORBENIUS_MAP_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * (local_values[FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR] - P::ONES), + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] * local_values[FINAL_EXP_MUL_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * local_values[FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] * local_values[FINAL_EXP_CONJUGATE_SELECTOR], + ); + } + for i in 0..24 * 3 * 2 { + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * (local_values[input_col + i] + - local_values[FINAL_EXP_OP_OFFSET + INPUT_OFFSET + i]), + ); + } + for i in 0..24 * 3 * 2 { + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row + CYCLOTOMIC_EXP_ROWS - 1] + * local_values[FINAL_EXP_OP_OFFSET + RES_ROW_SELECTOR_OFFSET] + * (local_values[output_col + i] - local_values[FINAL_EXP_OP_OFFSET + Z_OFFSET + i]), + ); + } +} + +pub fn add_constraints_cyc_exp_ext_circuit, const D: usize>( + builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, + yield_constr: &mut starky::constraint_consumer::RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + row: usize, + input_col: usize, + output_col: usize, +) { + for i in row..row + CYCLOTOMIC_EXP_ROWS { + let one = builder.constant_extension(F::Extension::ONE); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_FORBENIUS_MAP_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension(local_values[FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR], one); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + i], c); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_MUL_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_CONJUGATE_SELECTOR], + ); + yield_constr.constraint(builder, c); + } + for i in 0..24 * 3 * 2 { + let c = builder.sub_extension( + local_values[input_col + i], + local_values[FINAL_EXP_OP_OFFSET + INPUT_OFFSET + i], + ); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + row], c); + yield_constr.constraint(builder, c); + } + for i in 0..24 * 3 * 2 { + let c = builder.sub_extension( + local_values[output_col + i], + local_values[FINAL_EXP_OP_OFFSET + Z_OFFSET + i], + ); + let c = builder.mul_extension( + local_values[FINAL_EXP_OP_OFFSET + RES_ROW_SELECTOR_OFFSET], + c, + ); + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + row + CYCLOTOMIC_EXP_ROWS - 1], + c, + ); + yield_constr.constraint(builder, c); + } +} + +/// Constraints `FINAL_EXP_CONJUGATE_SELECTOR` to be 1 and other op selectors to be 0 in the `CONJUGATE_ROWS` starting from `row`. Constraints the values in input columns to input of fp12_conjugate operation trace. Constraints the output of fp12_conjugate trace to the values set in output columns. +pub fn add_constraints_conjugate< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + row: usize, + input_col: usize, + output_col: usize, +) where + FE: FieldExtension, + P: PackedField, +{ + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * local_values[FINAL_EXP_FORBENIUS_MAP_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * local_values[FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] * local_values[FINAL_EXP_MUL_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * local_values[FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * (local_values[FINAL_EXP_CONJUGATE_SELECTOR] - P::ONES), + ); + for i in 0..24 * 3 * 2 { + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * (local_values[input_col + i] + - local_values[FINAL_EXP_OP_OFFSET + FP12_CONJUGATE_INPUT_OFFSET + i]), + ); + } + for i in 0..24 * 3 * 2 { + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * (local_values[output_col + i] + - local_values[FINAL_EXP_OP_OFFSET + FP12_CONJUGATE_OUTPUT_OFFSET + i]), + ); + } +} + +pub fn add_constraints_conjugate_ext_circuit, const D: usize>( + builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, + yield_constr: &mut starky::constraint_consumer::RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + row: usize, + input_col: usize, + output_col: usize, +) { + let one = builder.constant_extension(F::Extension::ONE); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + row], + local_values[FINAL_EXP_FORBENIUS_MAP_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + row], + local_values[FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + row], + local_values[FINAL_EXP_MUL_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + row], + local_values[FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension(local_values[FINAL_EXP_CONJUGATE_SELECTOR], one); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + row], c); + yield_constr.constraint(builder, c); + + for i in 0..24 * 3 * 2 { + let c = builder.sub_extension( + local_values[input_col + i], + local_values[FINAL_EXP_OP_OFFSET + FP12_CONJUGATE_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + row], c); + yield_constr.constraint(builder, c); + } + for i in 0..24 * 3 * 2 { + let c = builder.sub_extension( + local_values[output_col + i], + local_values[FINAL_EXP_OP_OFFSET + FP12_CONJUGATE_OUTPUT_OFFSET + i], + ); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + row], c); + yield_constr.constraint(builder, c); + } +} + +/// Constraints `FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR` to be 1 and other op selectors to be 0 in the `CYCLOTOMIC_SQ_ROWS` starting from `row`. Constraints the values in input columns to input of cyclotomic_square operation trace. Constraints the output of cyclotomic_square trace to the values set in output columns. +pub fn add_constraints_cyc_sq< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + yield_constr: &mut ConstraintConsumer

, + row: usize, + input_col: usize, + output_col: usize, +) where + FE: FieldExtension, + P: PackedField, +{ + for i in row..row + CYCLOTOMIC_SQ_ROWS { + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * local_values[FINAL_EXP_FORBENIUS_MAP_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * local_values[FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] * local_values[FINAL_EXP_MUL_SELECTOR], + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] + * (local_values[FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR] - P::ONES), + ); + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + i] * local_values[FINAL_EXP_CONJUGATE_SELECTOR], + ); + } + for i in 0..24 * 3 * 2 { + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * (local_values[input_col + i] + - local_values[FINAL_EXP_OP_OFFSET + CYCLOTOMIC_SQ_INPUT_OFFSET + i]), + ); + } + for i in 0..12 { + for j in 0..6 { + let c_offset = if j == 0 { + CYCLOTOMIC_SQ_C0_CALC_OFFSET + } else if j == 1 { + CYCLOTOMIC_SQ_C1_CALC_OFFSET + } else if j == 2 { + CYCLOTOMIC_SQ_C2_CALC_OFFSET + } else if j == 3 { + CYCLOTOMIC_SQ_C3_CALC_OFFSET + } else if j == 4 { + CYCLOTOMIC_SQ_C4_CALC_OFFSET + } else { + CYCLOTOMIC_SQ_C5_CALC_OFFSET + }; + for k in 0..2 { + let offset = c_offset + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * k + + FP_SINGLE_REDUCED_OFFSET; + yield_constr.constraint( + local_values[FINAL_EXP_ROW_SELECTORS + row] + * (local_values[FINAL_EXP_OP_OFFSET + offset + i] + - local_values[output_col + j * 24 + k * 12 + i]), + ); + } + } + } +} + +pub fn add_constraints_cyc_sq_ext_circuit, const D: usize>( + builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, + yield_constr: &mut starky::constraint_consumer::RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + row: usize, + input_col: usize, + output_col: usize, +) { + for i in row..row + CYCLOTOMIC_SQ_ROWS { + let one = builder.constant_extension(F::Extension::ONE); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_FORBENIUS_MAP_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_MUL_SELECTOR], + ); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension(local_values[FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR], one); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + i], c); + yield_constr.constraint(builder, c); + + let c = builder.mul_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + local_values[FINAL_EXP_CONJUGATE_SELECTOR], + ); + yield_constr.constraint(builder, c); + } + for i in 0..24 * 3 * 2 { + let c = builder.sub_extension( + local_values[input_col + i], + local_values[FINAL_EXP_OP_OFFSET + CYCLOTOMIC_SQ_INPUT_OFFSET + i], + ); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + row], c); + yield_constr.constraint(builder, c); + } + for i in 0..12 { + for j in 0..6 { + let c_offset = if j == 0 { + CYCLOTOMIC_SQ_C0_CALC_OFFSET + } else if j == 1 { + CYCLOTOMIC_SQ_C1_CALC_OFFSET + } else if j == 2 { + CYCLOTOMIC_SQ_C2_CALC_OFFSET + } else if j == 3 { + CYCLOTOMIC_SQ_C3_CALC_OFFSET + } else if j == 4 { + CYCLOTOMIC_SQ_C4_CALC_OFFSET + } else { + CYCLOTOMIC_SQ_C5_CALC_OFFSET + }; + for k in 0..2 { + let offset = c_offset + + FP2_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * k + + FP_SINGLE_REDUCED_OFFSET; + let c = builder.sub_extension( + local_values[FINAL_EXP_OP_OFFSET + offset + i], + local_values[output_col + j * 24 + k * 12 + i], + ); + let c = builder.mul_extension(local_values[FINAL_EXP_ROW_SELECTORS + row], c); + yield_constr.constraint(builder, c); + } + } + } +} + +/* + Constraints for final_exponentiate trace (super::native::Fp12::final_exponentiate) + * Constraints input of trace to public inputs + * Constraints T31 of trace (result of final exponentiate) to public inputs + * Constraints `FINAL_EXP_ROW_SELECTORS` for row=0, to 1 in the first row. + * Constraints the `FINAL_EXP_ROW_SELECTORS` to rotate right by 1 in each next row + * Constraints `FINAL_EXP_ROW_SELECTORS` for row=8192, to 1 in the last row. + * Constraints the inputs of the trace and the Ti's to be same across all rows. Ti's defined in the native function. + * Constraints the operation selectors, inputs and outputs for the operation for each Ti. + * Constraints for all the operations with operation selector, i.e. those constraints will only be active if the operation selector is set 1. +*/ + +// Implement constraint generator +impl, const D: usize> Stark for FinalExponentiateStark { + type EvaluationFrame = StarkFrame + where + FE: FieldExtension, + P: PackedField; + + fn eval_packed_generic( + &self, + vars: &Self::EvaluationFrame, + yield_constr: &mut ConstraintConsumer

, + ) where + FE: FieldExtension, + P: PackedField, + { + let local_values = vars.get_local_values(); + let next_values = vars.get_next_values(); + let public_inputs = vars.get_public_inputs(); + + // ---- + for i in 0..24 * 3 * 2 { + yield_constr.constraint( + local_values[FINAL_EXP_INPUT_OFFSET + i] - public_inputs[PIS_INPUT_OFFSET + i], + ); + yield_constr.constraint( + local_values[FINAL_EXP_T31_OFFSET + i] - public_inputs[PIS_OUTPUT_OFFSET + i], + ); + } + + for i in 0..self.num_rows { + let val = if i == 0 { P::ONES } else { P::ZEROS }; + yield_constr.constraint_first_row(local_values[FINAL_EXP_ROW_SELECTORS + i] - val); + } + for i in 0..self.num_rows - 1 { + yield_constr.constraint_transition( + local_values[FINAL_EXP_ROW_SELECTORS + i] + - next_values[FINAL_EXP_ROW_SELECTORS + i + 1], + ); + } + for i in 0..self.num_rows { + let val = if i == self.num_rows - 1 { + P::ONES + } else { + P::ZEROS + }; + yield_constr.constraint_last_row(local_values[FINAL_EXP_ROW_SELECTORS + i] - val); + } + + for i in 0..24 * 3 * 2 { + yield_constr.constraint_transition( + local_values[FINAL_EXP_INPUT_OFFSET + i] - next_values[FINAL_EXP_INPUT_OFFSET + i], + ); + for j in 0..32 { + let t = if j == 0 { + FINAL_EXP_T0_OFFSET + } else if j == 1 { + FINAL_EXP_T1_OFFSET + } else if j == 2 { + FINAL_EXP_T2_OFFSET + } else if j == 3 { + FINAL_EXP_T3_OFFSET + } else if j == 4 { + FINAL_EXP_T4_OFFSET + } else if j == 5 { + FINAL_EXP_T5_OFFSET + } else if j == 6 { + FINAL_EXP_T6_OFFSET + } else if j == 7 { + FINAL_EXP_T7_OFFSET + } else if j == 8 { + FINAL_EXP_T8_OFFSET + } else if j == 9 { + FINAL_EXP_T9_OFFSET + } else if j == 10 { + FINAL_EXP_T10_OFFSET + } else if j == 11 { + FINAL_EXP_T11_OFFSET + } else if j == 12 { + FINAL_EXP_T12_OFFSET + } else if j == 13 { + FINAL_EXP_T13_OFFSET + } else if j == 14 { + FINAL_EXP_T14_OFFSET + } else if j == 15 { + FINAL_EXP_T15_OFFSET + } else if j == 16 { + FINAL_EXP_T16_OFFSET + } else if j == 17 { + FINAL_EXP_T17_OFFSET + } else if j == 18 { + FINAL_EXP_T18_OFFSET + } else if j == 19 { + FINAL_EXP_T19_OFFSET + } else if j == 20 { + FINAL_EXP_T20_OFFSET + } else if j == 21 { + FINAL_EXP_T21_OFFSET + } else if j == 22 { + FINAL_EXP_T22_OFFSET + } else if j == 23 { + FINAL_EXP_T23_OFFSET + } else if j == 24 { + FINAL_EXP_T24_OFFSET + } else if j == 25 { + FINAL_EXP_T25_OFFSET + } else if j == 26 { + FINAL_EXP_T26_OFFSET + } else if j == 27 { + FINAL_EXP_T27_OFFSET + } else if j == 28 { + FINAL_EXP_T28_OFFSET + } else if j == 29 { + FINAL_EXP_T29_OFFSET + } else if j == 30 { + FINAL_EXP_T30_OFFSET + } else { + FINAL_EXP_T31_OFFSET + }; + yield_constr.constraint_transition(local_values[t + i] - next_values[t + i]); + } + } + + // T0 + add_constraints_forbenius( + local_values, + yield_constr, + T0_ROW, + FINAL_EXP_INPUT_OFFSET, + FINAL_EXP_T0_OFFSET, + 6, + ); + + // T1 + add_constraints_mul( + local_values, + yield_constr, + T1_ROW, + FINAL_EXP_T1_OFFSET, + FINAL_EXP_INPUT_OFFSET, + FINAL_EXP_T0_OFFSET, + ); + + // T2 + add_constraints_forbenius( + local_values, + yield_constr, + T2_ROW, + FINAL_EXP_T1_OFFSET, + FINAL_EXP_T2_OFFSET, + 2, + ); + + // T3 + add_constraints_mul( + local_values, + yield_constr, + T3_ROW, + FINAL_EXP_T2_OFFSET, + FINAL_EXP_T1_OFFSET, + FINAL_EXP_T3_OFFSET, + ); + + // T4 + add_constraints_cyc_exp( + local_values, + yield_constr, + T4_ROW, + FINAL_EXP_T3_OFFSET, + FINAL_EXP_T4_OFFSET, + ); + + // T5 + add_constraints_conjugate( + local_values, + yield_constr, + T5_ROW, + FINAL_EXP_T4_OFFSET, + FINAL_EXP_T5_OFFSET, + ); + + // T6 + add_constraints_cyc_sq( + local_values, + yield_constr, + T6_ROW, + FINAL_EXP_T3_OFFSET, + FINAL_EXP_T6_OFFSET, + ); + + // T7 + add_constraints_conjugate( + local_values, + yield_constr, + T7_ROW, + FINAL_EXP_T6_OFFSET, + FINAL_EXP_T7_OFFSET, + ); + + // T8 + add_constraints_mul( + local_values, + yield_constr, + T8_ROW, + FINAL_EXP_T7_OFFSET, + FINAL_EXP_T5_OFFSET, + FINAL_EXP_T8_OFFSET, + ); + + // T9 + add_constraints_cyc_exp( + local_values, + yield_constr, + T9_ROW, + FINAL_EXP_T8_OFFSET, + FINAL_EXP_T9_OFFSET, + ); + + // T10 + add_constraints_conjugate( + local_values, + yield_constr, + T10_ROW, + FINAL_EXP_T9_OFFSET, + FINAL_EXP_T10_OFFSET, + ); + + // T11 + add_constraints_cyc_exp( + local_values, + yield_constr, + T11_ROW, + FINAL_EXP_T10_OFFSET, + FINAL_EXP_T11_OFFSET, + ); + + // T12 + add_constraints_conjugate( + local_values, + yield_constr, + T12_ROW, + FINAL_EXP_T11_OFFSET, + FINAL_EXP_T12_OFFSET, + ); + + // T13 + add_constraints_cyc_exp( + local_values, + yield_constr, + T13_ROW, + FINAL_EXP_T12_OFFSET, + FINAL_EXP_T13_OFFSET, + ); + + // T14 + add_constraints_conjugate( + local_values, + yield_constr, + T14_ROW, + FINAL_EXP_T13_OFFSET, + FINAL_EXP_T14_OFFSET, + ); + + // T15 + add_constraints_cyc_sq( + local_values, + yield_constr, + T15_ROW, + FINAL_EXP_T5_OFFSET, + FINAL_EXP_T15_OFFSET, + ); + + // T16 + add_constraints_mul( + local_values, + yield_constr, + T16_ROW, + FINAL_EXP_T14_OFFSET, + FINAL_EXP_T15_OFFSET, + FINAL_EXP_T16_OFFSET, + ); + + // T17 + add_constraints_cyc_exp( + local_values, + yield_constr, + T17_ROW, + FINAL_EXP_T16_OFFSET, + FINAL_EXP_T17_OFFSET, + ); + + // T18 + add_constraints_conjugate( + local_values, + yield_constr, + T18_ROW, + FINAL_EXP_T17_OFFSET, + FINAL_EXP_T18_OFFSET, + ); + + // T19 + add_constraints_mul( + local_values, + yield_constr, + T19_ROW, + FINAL_EXP_T5_OFFSET, + FINAL_EXP_T12_OFFSET, + FINAL_EXP_T19_OFFSET, + ); + + // T20 + add_constraints_forbenius( + local_values, + yield_constr, + T20_ROW, + FINAL_EXP_T19_OFFSET, + FINAL_EXP_T20_OFFSET, + 2, + ); + + // T21 + add_constraints_mul( + local_values, + yield_constr, + T21_ROW, + FINAL_EXP_T10_OFFSET, + FINAL_EXP_T3_OFFSET, + FINAL_EXP_T21_OFFSET, + ); + + // T22 + add_constraints_forbenius( + local_values, + yield_constr, + T22_ROW, + FINAL_EXP_T21_OFFSET, + FINAL_EXP_T22_OFFSET, + 3, + ); + + // T23 + add_constraints_conjugate( + local_values, + yield_constr, + T23_ROW, + FINAL_EXP_T3_OFFSET, + FINAL_EXP_T23_OFFSET, + ); + + // T24 + add_constraints_mul( + local_values, + yield_constr, + T24_ROW, + FINAL_EXP_T16_OFFSET, + FINAL_EXP_T23_OFFSET, + FINAL_EXP_T24_OFFSET, + ); + + // T25 + add_constraints_forbenius( + local_values, + yield_constr, + T25_ROW, + FINAL_EXP_T24_OFFSET, + FINAL_EXP_T25_OFFSET, + 1, + ); + + // T26 + add_constraints_conjugate( + local_values, + yield_constr, + T26_ROW, + FINAL_EXP_T8_OFFSET, + FINAL_EXP_T26_OFFSET, + ); + + // T27 + add_constraints_mul( + local_values, + yield_constr, + T27_ROW, + FINAL_EXP_T18_OFFSET, + FINAL_EXP_T26_OFFSET, + FINAL_EXP_T27_OFFSET, + ); + + // T28 + add_constraints_mul( + local_values, + yield_constr, + T28_ROW, + FINAL_EXP_T27_OFFSET, + FINAL_EXP_T3_OFFSET, + FINAL_EXP_T28_OFFSET, + ); + + // T29 + add_constraints_mul( + local_values, + yield_constr, + T29_ROW, + FINAL_EXP_T20_OFFSET, + FINAL_EXP_T22_OFFSET, + FINAL_EXP_T29_OFFSET, + ); + + // T30 + add_constraints_mul( + local_values, + yield_constr, + T30_ROW, + FINAL_EXP_T29_OFFSET, + FINAL_EXP_T25_OFFSET, + FINAL_EXP_T30_OFFSET, + ); + + // T31 + add_constraints_mul( + local_values, + yield_constr, + T31_ROW, + FINAL_EXP_T30_OFFSET, + FINAL_EXP_T28_OFFSET, + FINAL_EXP_T31_OFFSET, + ); + + add_fp12_forbenius_map_constraints( + local_values, + next_values, + yield_constr, + FINAL_EXP_OP_OFFSET, + Some(local_values[FINAL_EXP_FORBENIUS_MAP_SELECTOR]), + ); + add_fp12_multiplication_constraints( + local_values, + next_values, + yield_constr, + FINAL_EXP_OP_OFFSET, + Some(local_values[FINAL_EXP_MUL_SELECTOR]), + ); + add_cyclotomic_exp_constraints( + local_values, + next_values, + yield_constr, + FINAL_EXP_OP_OFFSET, + Some(local_values[FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR]), + ); + add_fp12_conjugate_constraints( + local_values, + yield_constr, + FINAL_EXP_OP_OFFSET, + Some(local_values[FINAL_EXP_CONJUGATE_SELECTOR]), + ); + add_cyclotomic_sq_constraints( + local_values, + next_values, + yield_constr, + FINAL_EXP_OP_OFFSET, + Some(local_values[FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR]), + ); + } + + type EvaluationFrameTarget = + StarkFrame, ExtensionTarget, COLUMNS, PUBLIC_INPUTS>; + + fn eval_ext_circuit( + &self, + builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, + vars: &Self::EvaluationFrameTarget, + yield_constr: &mut starky::constraint_consumer::RecursiveConstraintConsumer, + ) { + let local_values = vars.get_local_values(); + let next_values = vars.get_next_values(); + let public_inputs = vars.get_public_inputs(); + + // --- + for i in 0..24 * 3 * 2 { + let c = builder.sub_extension( + local_values[FINAL_EXP_INPUT_OFFSET + i], + public_inputs[PIS_INPUT_OFFSET + i], + ); + yield_constr.constraint(builder, c); + + let c = builder.sub_extension( + local_values[FINAL_EXP_T31_OFFSET + i], + public_inputs[PIS_OUTPUT_OFFSET + i], + ); + yield_constr.constraint(builder, c); + } + + let one = builder.constant_extension(F::Extension::ONE); + let zero = builder.constant_extension(F::Extension::ZERO); + + for i in 0..self.num_rows { + let val = if i == 0 { one } else { zero }; + let c = builder.sub_extension(local_values[FINAL_EXP_ROW_SELECTORS + i], val); + yield_constr.constraint_first_row(builder, c); + } + for i in 0..self.num_rows - 1 { + let c = builder.sub_extension( + local_values[FINAL_EXP_ROW_SELECTORS + i], + next_values[FINAL_EXP_ROW_SELECTORS + i + 1], + ); + yield_constr.constraint_transition(builder, c); + } + for i in 0..self.num_rows { + let val = if i == self.num_rows - 1 { one } else { zero }; + let c = builder.sub_extension(local_values[FINAL_EXP_ROW_SELECTORS + i], val); + yield_constr.constraint_last_row(builder, c); + } + for i in 0..24 * 3 * 2 { + let c = builder.sub_extension( + local_values[FINAL_EXP_INPUT_OFFSET + i], + next_values[FINAL_EXP_INPUT_OFFSET + i], + ); + yield_constr.constraint_transition(builder, c); + for j in 0..32 { + let t = if j == 0 { + FINAL_EXP_T0_OFFSET + } else if j == 1 { + FINAL_EXP_T1_OFFSET + } else if j == 2 { + FINAL_EXP_T2_OFFSET + } else if j == 3 { + FINAL_EXP_T3_OFFSET + } else if j == 4 { + FINAL_EXP_T4_OFFSET + } else if j == 5 { + FINAL_EXP_T5_OFFSET + } else if j == 6 { + FINAL_EXP_T6_OFFSET + } else if j == 7 { + FINAL_EXP_T7_OFFSET + } else if j == 8 { + FINAL_EXP_T8_OFFSET + } else if j == 9 { + FINAL_EXP_T9_OFFSET + } else if j == 10 { + FINAL_EXP_T10_OFFSET + } else if j == 11 { + FINAL_EXP_T11_OFFSET + } else if j == 12 { + FINAL_EXP_T12_OFFSET + } else if j == 13 { + FINAL_EXP_T13_OFFSET + } else if j == 14 { + FINAL_EXP_T14_OFFSET + } else if j == 15 { + FINAL_EXP_T15_OFFSET + } else if j == 16 { + FINAL_EXP_T16_OFFSET + } else if j == 17 { + FINAL_EXP_T17_OFFSET + } else if j == 18 { + FINAL_EXP_T18_OFFSET + } else if j == 19 { + FINAL_EXP_T19_OFFSET + } else if j == 20 { + FINAL_EXP_T20_OFFSET + } else if j == 21 { + FINAL_EXP_T21_OFFSET + } else if j == 22 { + FINAL_EXP_T22_OFFSET + } else if j == 23 { + FINAL_EXP_T23_OFFSET + } else if j == 24 { + FINAL_EXP_T24_OFFSET + } else if j == 25 { + FINAL_EXP_T25_OFFSET + } else if j == 26 { + FINAL_EXP_T26_OFFSET + } else if j == 27 { + FINAL_EXP_T27_OFFSET + } else if j == 28 { + FINAL_EXP_T28_OFFSET + } else if j == 29 { + FINAL_EXP_T29_OFFSET + } else if j == 30 { + FINAL_EXP_T30_OFFSET + } else { + FINAL_EXP_T31_OFFSET + }; + let c = builder.sub_extension(local_values[t + i], next_values[t + i]); + yield_constr.constraint_transition(builder, c); + } + } + + // T0 + add_constraints_forbenius_ext_circuit( + builder, + yield_constr, + local_values, + T0_ROW, + FINAL_EXP_INPUT_OFFSET, + FINAL_EXP_T0_OFFSET, + 6, + ); + + // T1 + add_constraints_mul_ext_circuit( + builder, + yield_constr, + local_values, + T1_ROW, + FINAL_EXP_T1_OFFSET, + FINAL_EXP_INPUT_OFFSET, + FINAL_EXP_T0_OFFSET, + ); + + // T2 + add_constraints_forbenius_ext_circuit( + builder, + yield_constr, + local_values, + T2_ROW, + FINAL_EXP_T1_OFFSET, + FINAL_EXP_T2_OFFSET, + 2, + ); + + // T3 + add_constraints_mul_ext_circuit( + builder, + yield_constr, + local_values, + T3_ROW, + FINAL_EXP_T2_OFFSET, + FINAL_EXP_T1_OFFSET, + FINAL_EXP_T3_OFFSET, + ); + + // T4 + add_constraints_cyc_exp_ext_circuit( + builder, + yield_constr, + local_values, + T4_ROW, + FINAL_EXP_T3_OFFSET, + FINAL_EXP_T4_OFFSET, + ); + + // T5 + add_constraints_conjugate_ext_circuit( + builder, + yield_constr, + local_values, + T5_ROW, + FINAL_EXP_T4_OFFSET, + FINAL_EXP_T5_OFFSET, + ); + + // T6 + add_constraints_cyc_sq_ext_circuit( + builder, + yield_constr, + local_values, + T6_ROW, + FINAL_EXP_T3_OFFSET, + FINAL_EXP_T6_OFFSET, + ); + + // T7 + add_constraints_conjugate_ext_circuit( + builder, + yield_constr, + local_values, + T7_ROW, + FINAL_EXP_T6_OFFSET, + FINAL_EXP_T7_OFFSET, + ); + + // T8 + add_constraints_mul_ext_circuit( + builder, + yield_constr, + local_values, + T8_ROW, + FINAL_EXP_T7_OFFSET, + FINAL_EXP_T5_OFFSET, + FINAL_EXP_T8_OFFSET, + ); + + // T9 + add_constraints_cyc_exp_ext_circuit( + builder, + yield_constr, + local_values, + T9_ROW, + FINAL_EXP_T8_OFFSET, + FINAL_EXP_T9_OFFSET, + ); + + // T10 + add_constraints_conjugate_ext_circuit( + builder, + yield_constr, + local_values, + T10_ROW, + FINAL_EXP_T9_OFFSET, + FINAL_EXP_T10_OFFSET, + ); + + // T11 + add_constraints_cyc_exp_ext_circuit( + builder, + yield_constr, + local_values, + T11_ROW, + FINAL_EXP_T10_OFFSET, + FINAL_EXP_T11_OFFSET, + ); + + // T12 + add_constraints_conjugate_ext_circuit( + builder, + yield_constr, + local_values, + T12_ROW, + FINAL_EXP_T11_OFFSET, + FINAL_EXP_T12_OFFSET, + ); + + // T13 + add_constraints_cyc_exp_ext_circuit( + builder, + yield_constr, + local_values, + T13_ROW, + FINAL_EXP_T12_OFFSET, + FINAL_EXP_T13_OFFSET, + ); + + // T14 + add_constraints_conjugate_ext_circuit( + builder, + yield_constr, + local_values, + T14_ROW, + FINAL_EXP_T13_OFFSET, + FINAL_EXP_T14_OFFSET, + ); + + // T15 + add_constraints_cyc_sq_ext_circuit( + builder, + yield_constr, + local_values, + T15_ROW, + FINAL_EXP_T5_OFFSET, + FINAL_EXP_T15_OFFSET, + ); + + // T16 + add_constraints_mul_ext_circuit( + builder, + yield_constr, + local_values, + T16_ROW, + FINAL_EXP_T14_OFFSET, + FINAL_EXP_T15_OFFSET, + FINAL_EXP_T16_OFFSET, + ); + + // T17 + add_constraints_cyc_exp_ext_circuit( + builder, + yield_constr, + local_values, + T17_ROW, + FINAL_EXP_T16_OFFSET, + FINAL_EXP_T17_OFFSET, + ); + + // T18 + add_constraints_conjugate_ext_circuit( + builder, + yield_constr, + local_values, + T18_ROW, + FINAL_EXP_T17_OFFSET, + FINAL_EXP_T18_OFFSET, + ); + + // T19 + add_constraints_mul_ext_circuit( + builder, + yield_constr, + local_values, + T19_ROW, + FINAL_EXP_T5_OFFSET, + FINAL_EXP_T12_OFFSET, + FINAL_EXP_T19_OFFSET, + ); + + // T20 + add_constraints_forbenius_ext_circuit( + builder, + yield_constr, + local_values, + T20_ROW, + FINAL_EXP_T19_OFFSET, + FINAL_EXP_T20_OFFSET, + 2, + ); + + // T21 + add_constraints_mul_ext_circuit( + builder, + yield_constr, + local_values, + T21_ROW, + FINAL_EXP_T10_OFFSET, + FINAL_EXP_T3_OFFSET, + FINAL_EXP_T21_OFFSET, + ); + + // T22 + add_constraints_forbenius_ext_circuit( + builder, + yield_constr, + local_values, + T22_ROW, + FINAL_EXP_T21_OFFSET, + FINAL_EXP_T22_OFFSET, + 3, + ); + + // T23 + add_constraints_conjugate_ext_circuit( + builder, + yield_constr, + local_values, + T23_ROW, + FINAL_EXP_T3_OFFSET, + FINAL_EXP_T23_OFFSET, + ); + + // T24 + add_constraints_mul_ext_circuit( + builder, + yield_constr, + local_values, + T24_ROW, + FINAL_EXP_T16_OFFSET, + FINAL_EXP_T23_OFFSET, + FINAL_EXP_T24_OFFSET, + ); + + // T25 + add_constraints_forbenius_ext_circuit( + builder, + yield_constr, + local_values, + T25_ROW, + FINAL_EXP_T24_OFFSET, + FINAL_EXP_T25_OFFSET, + 1, + ); + + // T26 + add_constraints_conjugate_ext_circuit( + builder, + yield_constr, + local_values, + T26_ROW, + FINAL_EXP_T8_OFFSET, + FINAL_EXP_T26_OFFSET, + ); + + // T27 + add_constraints_mul_ext_circuit( + builder, + yield_constr, + local_values, + T27_ROW, + FINAL_EXP_T18_OFFSET, + FINAL_EXP_T26_OFFSET, + FINAL_EXP_T27_OFFSET, + ); + + // T28 + add_constraints_mul_ext_circuit( + builder, + yield_constr, + local_values, + T28_ROW, + FINAL_EXP_T27_OFFSET, + FINAL_EXP_T3_OFFSET, + FINAL_EXP_T28_OFFSET, + ); + + // T29 + add_constraints_mul_ext_circuit( + builder, + yield_constr, + local_values, + T29_ROW, + FINAL_EXP_T20_OFFSET, + FINAL_EXP_T22_OFFSET, + FINAL_EXP_T29_OFFSET, + ); + + // T30 + add_constraints_mul_ext_circuit( + builder, + yield_constr, + local_values, + T30_ROW, + FINAL_EXP_T29_OFFSET, + FINAL_EXP_T25_OFFSET, + FINAL_EXP_T30_OFFSET, + ); + + // T31 + add_constraints_mul_ext_circuit( + builder, + yield_constr, + local_values, + T31_ROW, + FINAL_EXP_T30_OFFSET, + FINAL_EXP_T28_OFFSET, + FINAL_EXP_T31_OFFSET, + ); + + add_fp12_forbenius_map_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + FINAL_EXP_OP_OFFSET, + Some(local_values[FINAL_EXP_FORBENIUS_MAP_SELECTOR]), + ); + add_fp12_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + FINAL_EXP_OP_OFFSET, + Some(local_values[FINAL_EXP_MUL_SELECTOR]), + ); + add_cyclotomic_exp_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + FINAL_EXP_OP_OFFSET, + Some(local_values[FINAL_EXP_CYCLOTOMIC_EXP_SELECTOR]), + ); + add_fp12_conjugate_constraints_ext_circuit( + builder, + yield_constr, + local_values, + FINAL_EXP_OP_OFFSET, + Some(local_values[FINAL_EXP_CONJUGATE_SELECTOR]), + ); + add_cyclotomic_sq_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + FINAL_EXP_OP_OFFSET, + Some(local_values[FINAL_EXP_CYCLOTOMIC_SQ_SELECTOR]), + ); + } + + fn constraint_degree(&self) -> usize { + 5 + } +} diff --git a/casper-finality-proofs/src/verification/proofs/miller_loop.rs b/casper-finality-proofs/src/verification/proofs/miller_loop.rs new file mode 100644 index 000000000..25a1af2ad --- /dev/null +++ b/casper-finality-proofs/src/verification/proofs/miller_loop.rs @@ -0,0 +1,1182 @@ +use std::cmp::min; + +use crate::verification::{ + fields::starky::{ + fp::*, + fp12::*, + fp2::*, + fp6::*, + }, + utils::{ + native_bls::{get_bls_12_381_parameter, Fp, Fp12, Fp2, Fp6}, + starky_utils::*, + }, +}; + +use plonky2::plonk::circuit_builder::CircuitBuilder; +use plonky2::{ + field::{ + extension::{Extendable, FieldExtension}, + packed::PackedField, + types::Field, + }, + hash::hash_types::RichField, + iop::ext_target::ExtensionTarget, +}; +use starky::{ + constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}, + evaluation_frame::{StarkEvaluationFrame, StarkFrame}, + stark::Stark, +}; + +// Miller loop offsets +/* + These trace offsets are for the miller_loop function (super::native::miller_loop). It takes 12*68 rows. The MSB of bls12-381 parameter is not used. + FIRST_BIT_SELECTOR_OFFSET -> selector which is set 1 when the trace is for the first bit inside the loop. + LAST_BIT_SELECTOR_OFFSET -> selector which is set 1 when the trace is for the last bit inside the loop. + FIRST_ROW_SELECTOR_OFFSET -> selector which is 1 for the starting row for each operation. Hence, every 12th row, it is set 1. + BIT1_SELECTOR_OFFSET -> selector which is 1 for each 1 bit of bls12-381 parameter. It is set 1 for 12 rows continous rows. + PX_OFFSET -> offset where Px is set (defined in native function definition). + PY_OFFSET -> offset where Py is set (defined in native function definition). + ELL_COEFFS_INDEX_OFFSET -> offset which stores which index of the `ell_coeffs` array the trace is currently on. Total 68 selectors, one for each possible index of ell_coeffs. + ELL_COEFFS_OFFSET -> offset which stores the `ell_coeffs` used in the current row computation. + F12_OFFSET -> offset which stores the result of the current miller loop computation. + O1_CALC_OFFSET -> offset which calculates `ell_coeffs[1]`*Px. + O4_CALC_OFFSET -> offset which calculates `ell_coeffs[2]`*Py. + F12_MUL_BY_014_OFFSET -> offset for multiplyBy014 function computation. + F12_SQ_CALC_OFFSET -> offset for f12*f12 computation. + MILLER_LOOP_RES_OFFSET -> offset which stores the result of miller_loop function. + RES_CONJUGATE_OFFSET -> offset which stores the computation of conjugate of miller loop result. (used to match f12 value after the last loop of computation). +*/ +pub const FIRST_BIT_SELECTOR_OFFSET: usize = 0; +pub const LAST_BIT_SELECTOR_OFFSET: usize = FIRST_BIT_SELECTOR_OFFSET + 1; +pub const FIRST_ROW_SELECTOR_OFFSET: usize = LAST_BIT_SELECTOR_OFFSET + 1; +pub const BIT1_SELECTOR_OFFSET: usize = FIRST_ROW_SELECTOR_OFFSET + 1; +pub const PX_OFFSET: usize = BIT1_SELECTOR_OFFSET + 1; +pub const PY_OFFSET: usize = PX_OFFSET + 12; +pub const ELL_COEFFS_INDEX_OFFEST: usize = PY_OFFSET + 12; +pub const ELL_COEFFS_OFFSET: usize = ELL_COEFFS_INDEX_OFFEST + 68; +pub const F12_OFFSET: usize = ELL_COEFFS_OFFSET + 24 * 3; +pub const O1_CALC_OFFSET: usize = F12_OFFSET + 24 * 3 * 2; +pub const O4_CALC_OFFSET: usize = O1_CALC_OFFSET + FP2_FP_TOTAL_COLUMNS; +pub const F12_MUL_BY_014_OFFSET: usize = O4_CALC_OFFSET + FP2_FP_TOTAL_COLUMNS; +pub const F12_SQ_CALC_OFFSET: usize = F12_MUL_BY_014_OFFSET + MULTIPLY_BY_014_TOTAL; +pub const MILLER_LOOP_RES_OFFSET: usize = F12_SQ_CALC_OFFSET + FP12_MUL_TOTAL_COLUMNS; +pub const RES_CONJUGATE_OFFSET: usize = MILLER_LOOP_RES_OFFSET + 24 * 3 * 2; +pub const MILLER_LOOP_TOTAL: usize = RES_CONJUGATE_OFFSET + FP6_ADDITION_TOTAL; + +pub const TOTAL_COLUMNS: usize = MILLER_LOOP_TOTAL; +pub const COLUMNS: usize = TOTAL_COLUMNS; + +/* + The public inputs for this stark are the x, y inputs to the miller_loop function followed by the array of `ell_coeffs` resulted from calc_pairing_precomp, then the final result of miller_loop. +*/ + +pub const PIS_PX_OFFSET: usize = 0; +pub const PIS_PY_OFFSET: usize = PIS_PX_OFFSET + 12; +pub const PIS_ELL_COEFFS_OFFSET: usize = PIS_PY_OFFSET + 12; +pub const PIS_RES_OFFSET: usize = PIS_ELL_COEFFS_OFFSET + 68 * 24 * 3; +pub const PUBLIC_INPUTS: usize = PIS_RES_OFFSET + 24 * 3 * 2; + +// A (Fp) * B (Fp) => C (Fp) +#[derive(Clone, Copy)] +pub struct MillerLoopStark, const D: usize> { + num_rows: usize, + _f: std::marker::PhantomData, +} + +/// Fills the trace of [miller_loop](super::native::miller_loop) function. Inputs are two 12 limbs and `ell_coeffs` array computed from `calc_pairing_precomp`. The values of Px and Py are filled across all rows in the trace. `FIRST_BIT_SELECTOR_OFFSET` is set 1 for the first loop computation. Sets the `ELL_COEFFS_INDEX` for the corresponding index. Sets the corresponding `ell_coeff` for the current row of computation. Fills the F12 trace, starting with 1 and then updates after each loop of computation. Fills trace for O1 and O4 calculations. Sets `FIRST_ROW_SELECTOR` to 1 for starting row of the operation. Fills the trace for multiplyBy014 caluclations and Fp12 multiplication calculations. Then fills the trace with miller_loop result, followed by conjugate computation for miller loop result. +pub fn fill_trace_miller_loop, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + x: &Fp, + y: &Fp, + ell_coeffs: &[[Fp2; 3]], + start_row: usize, + end_row: usize, + start_col: usize, +) { + for row in start_row..end_row + 1 { + assign_u32_in_series(trace, row, start_col + PX_OFFSET, &x.0); + assign_u32_in_series(trace, row, start_col + PY_OFFSET, &y.0); + } + let mut f12 = Fp12::one(); + let mut i = get_bls_12_381_parameter().bits() - 2; + let mut bitone = false; + // for j in 0..ell_coeffs.len() { + for j in 0..min((end_row + 1 - start_row) / 12, ell_coeffs.len()) { + let s_row = start_row + j * 12; + let e_row = start_row + (j + 1) * 12 - 1; + for row in s_row..e_row + 1 { + if j == 0 { + trace[row][start_col + FIRST_BIT_SELECTOR_OFFSET] = F::ONE; + } + if i == 0 { + trace[row][start_col + LAST_BIT_SELECTOR_OFFSET] = F::ONE; + } + if bitone { + trace[row][start_col + BIT1_SELECTOR_OFFSET] = F::ONE; + } + trace[row][start_col + ELL_COEFFS_INDEX_OFFEST + j] = F::ONE; + for k in 0..3 { + assign_u32_in_series( + trace, + row, + start_col + ELL_COEFFS_OFFSET + k * 24, + &ell_coeffs[j][k].get_u32_slice().concat(), + ); + } + assign_u32_in_series( + trace, + row, + start_col + F12_OFFSET, + &f12.get_u32_slice().concat(), + ); + } + if j != 0 { + trace[s_row][start_col + FIRST_ROW_SELECTOR_OFFSET] = F::ONE; + } + let e = ell_coeffs[j]; + fill_trace_fp2_fp_mul( + trace, + &e[1].get_u32_slice(), + &x.0, + s_row, + e_row, + start_col + O1_CALC_OFFSET, + ); + let o1 = e[1] * (*x); + fill_trace_fp2_fp_mul( + trace, + &e[2].get_u32_slice(), + &y.0, + s_row, + e_row, + start_col + O4_CALC_OFFSET, + ); + let o4 = e[2] * (*y); + fill_trace_multiply_by_014( + trace, + &f12, + &e[0], + &o1, + &o4, + s_row, + e_row, + start_col + F12_MUL_BY_014_OFFSET, + ); + f12 = f12.multiply_by_014(e[0], o1, o4); + fill_trace_fp12_multiplication( + trace, + &f12, + &f12, + s_row, + e_row, + start_col + F12_SQ_CALC_OFFSET, + ); + let f12_sq = f12 * f12; + if get_bls_12_381_parameter().bit(i) && !bitone { + bitone = true; + } else if j < ell_coeffs.len() - 1 { + f12 = f12_sq; + i -= 1; + bitone = false; + } + } + f12 = f12.conjugate(); + for row in start_row..end_row + 1 { + assign_u32_in_series( + trace, + row, + start_col + MILLER_LOOP_RES_OFFSET, + &f12.get_u32_slice().concat(), + ); + } + for row in start_row..end_row + 1 { + fill_trace_negate_fp6( + trace, + &Fp6(f12.0[6..].try_into().unwrap()), + row, + start_col + RES_CONJUGATE_OFFSET, + ); + } + // assert_eq!(i, 0); +} + +// Implement trace generator +impl, const D: usize> MillerLoopStark { + pub fn new(num_rows: usize) -> Self { + Self { + num_rows, + _f: std::marker::PhantomData, + } + } + + pub fn generate_trace( + &self, + x: Fp, + y: Fp, + ell_coeffs: Vec<[Fp2; 3]>, + ) -> Vec<[F; TOTAL_COLUMNS]> { + let mut trace = vec![[F::ZERO; TOTAL_COLUMNS]; self.num_rows]; + fill_trace_miller_loop(&mut trace, &x, &y, &ell_coeffs, 0, self.num_rows - 1, 0); + trace + // let start_col = 0; + // for row in 0..self.num_rows-1 { + // let local_values = self.trace[row]; + // let next_values = self.trace[row+1]; + // println!( + // "{} * (1 - {}) * ({} - {}) = {}", + // next_values[start_col + FIRST_ROW_SELECTOR_OFFSET], + // next_values[start_col + BIT1_SELECTOR_OFFSET], + // next_values[start_col + F12_OFFSET], + // local_values[start_col + F12_SQ_CALC_OFFSET + FP12_MUL_X_CALC_OFFSET + FP6_ADDITION_TOTAL + FP_SINGLE_REDUCED_OFFSET], + // next_values[start_col + FIRST_ROW_SELECTOR_OFFSET] * + // (F::ONE - next_values[start_col + BIT1_SELECTOR_OFFSET]) * + // (next_values[start_col + F12_OFFSET] - + // local_values[start_col + F12_SQ_CALC_OFFSET + FP12_MUL_X_CALC_OFFSET + FP6_ADDITION_TOTAL + FP_SINGLE_REDUCED_OFFSET]) + // ); + // } + } +} + +/// The constraints of this stark are as follows: +/// * Constraint Px and Py to be same across all rows. +/// * Constraints F12 to 1 when `FIRST_BIT_SELECTOR` is set 1. +/// * Constraints next row F12 to result of current row multiplyBy014 when next row `FIRST_ROW_SELECTOR` is set 1 and next row `BIT1_SELECTOR` is set 1. +/// * Constraints next row F12 to result of current row fp12 multiplication when next row `FIRST_ROW_SELECTOR` is set 1 and next row `BIT1_SELECTOR` is set 0. +/// * Constraints O1 computation with Px and current `ell_coeffs[1]`. +/// * Constraints O4 computation with Py and current `ell_coeffs[2]`. +/// * Constraints multiplyBy014 computation with F12, `ell_coeffs[0]`, O1 and O4. +/// * Constrants fp12 multiplication for F12*F12. +/// * Constraints result conjugate computation with miller loop res. +/// * Constraints the result of conjugate computation with F12 when `LAST_BIT_SELECTOR` is set 1. +pub fn add_miller_loop_constraints< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, +>( + local_values: &[P], + next_values: &[P], + yield_constr: &mut ConstraintConsumer

, + start_col: usize, + bit_selector: Option

, +) where + FE: FieldExtension, + P: PackedField, +{ + let bit_selector_val = bit_selector.unwrap_or(P::ONES); + + for i in 0..12 { + yield_constr.constraint_transition( + local_values[start_col + PX_OFFSET + i] - next_values[start_col + PX_OFFSET + i], + ); + yield_constr.constraint_transition( + local_values[start_col + PY_OFFSET + i] - next_values[start_col + PY_OFFSET + i], + ); + } + for i in 0..24 * 3 * 2 { + if i == 0 { + yield_constr.constraint( + local_values[start_col + FIRST_BIT_SELECTOR_OFFSET] + * (local_values[start_col + F12_OFFSET + i] - P::ONES), + ); + } else { + yield_constr.constraint( + local_values[start_col + FIRST_BIT_SELECTOR_OFFSET] + * local_values[start_col + F12_OFFSET + i], + ); + } + } + + for i in 0..12 { + for j in 0..6 { + yield_constr.constraint( + bit_selector_val + * next_values[start_col + FIRST_ROW_SELECTOR_OFFSET] + * next_values[start_col + BIT1_SELECTOR_OFFSET] + * (next_values[start_col + F12_OFFSET + j * 12 + i] + - local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_X_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * next_values[start_col + FIRST_ROW_SELECTOR_OFFSET] + * next_values[start_col + BIT1_SELECTOR_OFFSET] + * (next_values[start_col + F12_OFFSET + j * 12 + i + 24 * 3] + - local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * next_values[start_col + FIRST_ROW_SELECTOR_OFFSET] + * (P::ONES - next_values[start_col + BIT1_SELECTOR_OFFSET]) + * (next_values[start_col + F12_OFFSET + j * 12 + i] + - local_values[start_col + + F12_SQ_CALC_OFFSET + + FP12_MUL_X_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + yield_constr.constraint( + bit_selector_val + * next_values[start_col + FIRST_ROW_SELECTOR_OFFSET] + * (P::ONES - next_values[start_col + BIT1_SELECTOR_OFFSET]) + * (next_values[start_col + F12_OFFSET + j * 12 + i + 24 * 3] + - local_values[start_col + + F12_SQ_CALC_OFFSET + + FP12_MUL_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i]), + ); + } + } + + // O1 + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + O1_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + O1_CALC_OFFSET + FP2_FP_X_INPUT_OFFSET + i] + - local_values[start_col + ELL_COEFFS_OFFSET + 24 + i]), + ); + if i < 12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + O1_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + O1_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i] + - local_values[start_col + PX_OFFSET + i]), + ); + } + } + add_fp2_fp_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + O1_CALC_OFFSET, + bit_selector, + ); + + // O4 + for i in 0..24 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + O4_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + O4_CALC_OFFSET + FP2_FP_X_INPUT_OFFSET + i] + - local_values[start_col + ELL_COEFFS_OFFSET + 48 + i]), + ); + if i < 12 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + O4_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET] + * (local_values[start_col + O4_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i] + - local_values[start_col + PY_OFFSET + i]), + ); + } + } + add_fp2_fp_mul_constraints( + local_values, + next_values, + yield_constr, + start_col + O4_CALC_OFFSET, + bit_selector, + ); + + // f12 multiply by 014 + for i in 0..12 { + for j in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + F12_MUL_BY_014_OFFSET + MULTIPLY_BY_014_SELECTOR_OFFSET] + * (local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_INPUT_OFFSET + + j * 12 + + i] + - local_values[start_col + F12_OFFSET + j * 12 + i]), + ); + } + for j in 0..2 { + let z_offset = if j == 0 { + X0_Y_REDUCE_OFFSET + } else { + X1_Y_REDUCE_OFFSET + }; + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + F12_MUL_BY_014_OFFSET + MULTIPLY_BY_014_SELECTOR_OFFSET] + * (local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_O0_OFFSET + + j * 12 + + i] + - local_values[start_col + ELL_COEFFS_OFFSET + j * 12 + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + F12_MUL_BY_014_OFFSET + MULTIPLY_BY_014_SELECTOR_OFFSET] + * (local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_O1_OFFSET + + j * 12 + + i] + - local_values[start_col + O1_CALC_OFFSET + z_offset + REDUCED_OFFSET + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + F12_MUL_BY_014_OFFSET + MULTIPLY_BY_014_SELECTOR_OFFSET] + * (local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_O4_OFFSET + + j * 12 + + i] + - local_values[start_col + O4_CALC_OFFSET + z_offset + REDUCED_OFFSET + i]), + ); + } + } + add_multiply_by_014_constraints( + local_values, + next_values, + yield_constr, + start_col + F12_MUL_BY_014_OFFSET, + bit_selector, + ); + + // f12 * f12 + for i in 0..12 { + for j in 0..6 { + yield_constr.constraint( + bit_selector_val + * local_values[start_col + F12_SQ_CALC_OFFSET + FP12_MUL_SELECTOR_OFFSET] + * (local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_X_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values[start_col + + F12_SQ_CALC_OFFSET + + FP12_MUL_X_INPUT_OFFSET + + j * 12 + + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + F12_SQ_CALC_OFFSET + FP12_MUL_SELECTOR_OFFSET] + * (local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values[start_col + + F12_SQ_CALC_OFFSET + + FP12_MUL_X_INPUT_OFFSET + + j * 12 + + i + + 24 * 3]), + ); + } + for j in 0..12 { + yield_constr.constraint( + bit_selector_val + * local_values + [start_col + F12_SQ_CALC_OFFSET + FP12_MUL_X_INPUT_OFFSET + j * 12 + i] + - local_values + [start_col + F12_SQ_CALC_OFFSET + FP12_MUL_Y_INPUT_OFFSET + j * 12 + i], + ); + } + } + add_fp12_multiplication_constraints( + local_values, + next_values, + yield_constr, + start_col + F12_SQ_CALC_OFFSET, + bit_selector, + ); + + // RES conjugate + for i in 0..12 { + for j in 0..3 { + let fp2_offset = if j == 0 { + FP6_ADDITION_0_OFFSET + } else if j == 1 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + for k in 0..2 { + let fp_offset = if k == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + yield_constr.constraint( + bit_selector_val + * local_values[start_col + + RES_CONJUGATE_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + RES_CONJUGATE_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_X_OFFSET + + i] + - local_values + [start_col + MILLER_LOOP_RES_OFFSET + (j + 3) * 24 + k * 12 + i]), + ); + } + } + } + add_negate_fp6_constraints( + local_values, + yield_constr, + start_col + RES_CONJUGATE_OFFSET, + bit_selector, + ); + + // RES with last bit result + for i in 0..12 { + for j in 0..3 { + let (fp2_add_offset, fp2_sub_offset) = if j == 0 { + (FP6_ADDITION_0_OFFSET, FP6_SUBTRACTION_0_OFFSET) + } else if j == 1 { + (FP6_ADDITION_1_OFFSET, FP6_SUBTRACTION_1_OFFSET) + } else { + (FP6_ADDITION_2_OFFSET, FP6_SUBTRACTION_2_OFFSET) + }; + for k in 0..2 { + let (fp_add_offset, fp_sub_offset) = if k == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET) + }; + yield_constr.constraint( + bit_selector_val + * local_values[start_col + LAST_BIT_SELECTOR_OFFSET] + * local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_X_CALC_OFFSET + + fp2_add_offset + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET] + * (local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_X_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * (j * 2 + k) + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values + [start_col + MILLER_LOOP_RES_OFFSET + j * 24 + k * 12 + i]), + ); + yield_constr.constraint( + bit_selector_val + * local_values[start_col + LAST_BIT_SELECTOR_OFFSET] + * local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + fp_sub_offset + + FP_SUBTRACTION_CHECK_OFFSET] + * (local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * (j * 2 + k) + + FP_SINGLE_REDUCED_OFFSET + + i] + - local_values[start_col + + RES_CONJUGATE_OFFSET + + fp2_add_offset + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i]), + ); + } + } + } +} +pub fn add_miller_loop_constraints_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + yield_constr: &mut RecursiveConstraintConsumer, + local_values: &[ExtensionTarget], + next_values: &[ExtensionTarget], + start_col: usize, + bit_selector: Option>, +) { + let bit_selector_val = bit_selector.unwrap_or(builder.constant_extension(F::Extension::ONE)); + + for i in 0..12 { + let c1 = builder.sub_extension( + local_values[start_col + PX_OFFSET + i], + next_values[start_col + PX_OFFSET + i], + ); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint_transition(builder, c); + + let c2 = builder.sub_extension( + local_values[start_col + PY_OFFSET + i], + next_values[start_col + PY_OFFSET + i], + ); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint_transition(builder, c); + } + for i in 0..24 * 3 * 2 { + let one = builder.constant_extension(F::Extension::ONE); + let mul_tmp = local_values[start_col + FIRST_BIT_SELECTOR_OFFSET]; + if i == 0 { + let sub_tmp = builder.sub_extension(local_values[start_col + F12_OFFSET + i], one); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } else { + let c = builder.mul_extension(mul_tmp, local_values[start_col + F12_OFFSET + i]); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + } + + for i in 0..12 { + for j in 0..6 { + let one = builder.constant_extension(F::Extension::ONE); + + let mul_tmp1 = builder.mul_extension( + next_values[start_col + FIRST_ROW_SELECTOR_OFFSET], + next_values[start_col + BIT1_SELECTOR_OFFSET], + ); + + let sub_tmp1 = builder.sub_extension( + next_values[start_col + F12_OFFSET + j * 12 + i], + local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_X_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp1); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + next_values[start_col + F12_OFFSET + j * 12 + i + 24 * 3], + local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp1); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp = builder.sub_extension(one, next_values[start_col + BIT1_SELECTOR_OFFSET]); + let mul_tmp2 = + builder.mul_extension(next_values[start_col + FIRST_ROW_SELECTOR_OFFSET], sub_tmp); + + let sub_tmp3 = builder.sub_extension( + next_values[start_col + F12_OFFSET + j * 12 + i], + local_values[start_col + + F12_SQ_CALC_OFFSET + + FP12_MUL_X_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c3 = builder.mul_extension(sub_tmp3, mul_tmp2); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + + let sub_tmp4 = builder.sub_extension( + next_values[start_col + F12_OFFSET + j * 12 + i + 24 * 3], + local_values[start_col + + F12_SQ_CALC_OFFSET + + FP12_MUL_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + ); + let c4 = builder.mul_extension(sub_tmp4, mul_tmp2); + let c = builder.mul_extension(bit_selector_val, c4); + yield_constr.constraint(builder, c); + } + } + + // O1 + for i in 0..24 { + let mul_tmp = local_values[start_col + O1_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET]; + let sub_tmp = builder.sub_extension( + local_values[start_col + O1_CALC_OFFSET + FP2_FP_X_INPUT_OFFSET + i], + local_values[start_col + ELL_COEFFS_OFFSET + 24 + i], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + if i < 12 { + let sub_tmp = builder.sub_extension( + local_values[start_col + O1_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i], + local_values[start_col + PX_OFFSET + i], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + } + add_fp2_fp_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + O1_CALC_OFFSET, + bit_selector, + ); + + // O4 + for i in 0..24 { + let mul_tmp = local_values[start_col + O4_CALC_OFFSET + FP2_FP_MUL_SELECTOR_OFFSET]; + let sub_tmp = builder.sub_extension( + local_values[start_col + O4_CALC_OFFSET + FP2_FP_X_INPUT_OFFSET + i], + local_values[start_col + ELL_COEFFS_OFFSET + 48 + i], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + if i < 12 { + let sub_tmp = builder.sub_extension( + local_values[start_col + O4_CALC_OFFSET + FP2_FP_Y_INPUT_OFFSET + i], + local_values[start_col + PY_OFFSET + i], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + } + add_fp2_fp_mul_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + O4_CALC_OFFSET, + bit_selector, + ); + + // f12 multiply by 014 + for i in 0..12 { + let mul_tmp = + local_values[start_col + F12_MUL_BY_014_OFFSET + MULTIPLY_BY_014_SELECTOR_OFFSET]; + for j in 0..12 { + let sub_tmp = builder.sub_extension( + local_values + [start_col + F12_MUL_BY_014_OFFSET + MULTIPLY_BY_014_INPUT_OFFSET + j * 12 + i], + local_values[start_col + F12_OFFSET + j * 12 + i], + ); + let c = builder.mul_extension(sub_tmp, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + for j in 0..2 { + let z_offset = if j == 0 { + X0_Y_REDUCE_OFFSET + } else { + X1_Y_REDUCE_OFFSET + }; + + let sub_tmp1 = builder.sub_extension( + local_values + [start_col + F12_MUL_BY_014_OFFSET + MULTIPLY_BY_014_O0_OFFSET + j * 12 + i], + local_values[start_col + ELL_COEFFS_OFFSET + j * 12 + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values + [start_col + F12_MUL_BY_014_OFFSET + MULTIPLY_BY_014_O1_OFFSET + j * 12 + i], + local_values[start_col + O1_CALC_OFFSET + z_offset + REDUCED_OFFSET + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + + let sub_tmp3 = builder.sub_extension( + local_values + [start_col + F12_MUL_BY_014_OFFSET + MULTIPLY_BY_014_O4_OFFSET + j * 12 + i], + local_values[start_col + O4_CALC_OFFSET + z_offset + REDUCED_OFFSET + i], + ); + let c3 = builder.mul_extension(sub_tmp3, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c3); + yield_constr.constraint(builder, c); + } + } + add_multiply_by_014_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + F12_MUL_BY_014_OFFSET, + bit_selector, + ); + + // f12 * f12 + for i in 0..12 { + for j in 0..6 { + let mul_tmp = local_values[start_col + F12_SQ_CALC_OFFSET + FP12_MUL_SELECTOR_OFFSET]; + + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_X_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + F12_SQ_CALC_OFFSET + FP12_MUL_X_INPUT_OFFSET + j * 12 + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * j + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + + F12_SQ_CALC_OFFSET + + FP12_MUL_X_INPUT_OFFSET + + j * 12 + + i + + 24 * 3], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + for j in 0..12 { + let c = builder.sub_extension( + local_values[start_col + F12_SQ_CALC_OFFSET + FP12_MUL_X_INPUT_OFFSET + j * 12 + i], + local_values[start_col + F12_SQ_CALC_OFFSET + FP12_MUL_Y_INPUT_OFFSET + j * 12 + i], + ); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + } + add_fp12_multiplication_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + start_col + F12_SQ_CALC_OFFSET, + bit_selector, + ); + + // RES conjugate + for i in 0..12 { + for j in 0..3 { + let fp2_offset = if j == 0 { + FP6_ADDITION_0_OFFSET + } else if j == 1 { + FP6_ADDITION_1_OFFSET + } else { + FP6_ADDITION_2_OFFSET + }; + for k in 0..2 { + let fp_offset = if k == 0 { + FP2_ADDITION_0_OFFSET + } else { + FP2_ADDITION_1_OFFSET + }; + let sub_tmp = builder.sub_extension( + local_values[start_col + + RES_CONJUGATE_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_X_OFFSET + + i], + local_values[start_col + MILLER_LOOP_RES_OFFSET + (j + 3) * 24 + k * 12 + i], + ); + let c = builder.mul_extension( + sub_tmp, + local_values[start_col + + RES_CONJUGATE_OFFSET + + fp2_offset + + fp_offset + + FP_ADDITION_CHECK_OFFSET], + ); + let c = builder.mul_extension(bit_selector_val, c); + yield_constr.constraint(builder, c); + } + } + } + add_negate_fp6_constraints_ext_circuit( + builder, + yield_constr, + local_values, + start_col + RES_CONJUGATE_OFFSET, + bit_selector, + ); + + // RES with last bit result + for i in 0..12 { + for j in 0..3 { + let (fp2_add_offset, fp2_sub_offset) = if j == 0 { + (FP6_ADDITION_0_OFFSET, FP6_SUBTRACTION_0_OFFSET) + } else if j == 1 { + (FP6_ADDITION_1_OFFSET, FP6_SUBTRACTION_1_OFFSET) + } else { + (FP6_ADDITION_2_OFFSET, FP6_SUBTRACTION_2_OFFSET) + }; + for k in 0..2 { + let (fp_add_offset, fp_sub_offset) = if k == 0 { + (FP2_ADDITION_0_OFFSET, FP2_SUBTRACTION_0_OFFSET) + } else { + (FP2_ADDITION_1_OFFSET, FP2_SUBTRACTION_1_OFFSET) + }; + + let mul_tmp1 = builder.mul_extension( + local_values[start_col + LAST_BIT_SELECTOR_OFFSET], + local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_X_CALC_OFFSET + + fp2_add_offset + + fp_add_offset + + FP_ADDITION_CHECK_OFFSET], + ); + let sub_tmp1 = builder.sub_extension( + local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_X_CALC_OFFSET + + FP6_ADDITION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * (j * 2 + k) + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + MILLER_LOOP_RES_OFFSET + j * 24 + k * 12 + i], + ); + let c1 = builder.mul_extension(sub_tmp1, mul_tmp1); + let c = builder.mul_extension(bit_selector_val, c1); + yield_constr.constraint(builder, c); + + let mul_tmp2 = builder.mul_extension( + local_values[start_col + LAST_BIT_SELECTOR_OFFSET], + local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + fp2_sub_offset + + fp_sub_offset + + FP_SUBTRACTION_CHECK_OFFSET], + ); + let sub_tmp2 = builder.sub_extension( + local_values[start_col + + F12_MUL_BY_014_OFFSET + + MULTIPLY_BY_014_Y_CALC_OFFSET + + FP6_ADDITION_TOTAL + + FP6_SUBTRACTION_TOTAL + + (FP_SINGLE_REDUCE_TOTAL + RANGE_CHECK_TOTAL) * (j * 2 + k) + + FP_SINGLE_REDUCED_OFFSET + + i], + local_values[start_col + + RES_CONJUGATE_OFFSET + + fp2_add_offset + + fp_add_offset + + FP_ADDITION_Y_OFFSET + + i], + ); + let c2 = builder.mul_extension(sub_tmp2, mul_tmp2); + let c = builder.mul_extension(bit_selector_val, c2); + yield_constr.constraint(builder, c); + } + } + } +} + +/* + Constraints for miller loop stark: + * Constraint Px with public input x + * Constraint Py with public input y + * Constraint current row `ell_coeff` along with `ELL_COEFFS_INDEX` with public inputs `ell_coeffs`. + * Constrain `MILLER_LOOP_RES` with public inputs result + * Constraints for miller loop computation. +*/ + +// Implement constraint generator without Stark trait + +pub fn traitless_eval_packed_generic< + F: RichField + Extendable, + const D: usize, + FE, + P, + const D2: usize, + EvalFrame: StarkEvaluationFrame, +>( + vars: StarkFrame, + yield_constr: &mut ConstraintConsumer

, +) where + FE: FieldExtension, + P: PackedField, +{ + let local_values = vars.get_local_values(); + let next_values = vars.get_next_values(); + let public_inputs = vars.get_public_inputs(); + + // ---- + for i in 0..12 { + yield_constr.constraint(local_values[PX_OFFSET + i] - public_inputs[PIS_PX_OFFSET + i]); + yield_constr.constraint(local_values[PY_OFFSET + i] - public_inputs[PIS_PY_OFFSET + i]); + } + for i in 0..68 { + for j in 0..24 * 3 { + yield_constr.constraint( + local_values[ELL_COEFFS_INDEX_OFFEST + i] + * (local_values[ELL_COEFFS_OFFSET + j] + - public_inputs[PIS_ELL_COEFFS_OFFSET + i * 24 * 3 + j]), + ); + } + } + for i in 0..24 * 3 * 2 { + yield_constr.constraint( + local_values[MILLER_LOOP_RES_OFFSET + i] - public_inputs[PIS_RES_OFFSET + i], + ); + } + add_miller_loop_constraints(local_values, next_values, yield_constr, 0, None); +} + +// Implement constraint generator +impl, const D: usize> Stark for MillerLoopStark { + type EvaluationFrame = StarkFrame + where + FE: FieldExtension, + P: PackedField; + + fn eval_packed_generic( + &self, + vars: &Self::EvaluationFrame, + yield_constr: &mut ConstraintConsumer

, + ) where + FE: FieldExtension, + P: PackedField, + { + let local_values = vars.get_local_values(); + let next_values = vars.get_next_values(); + let public_inputs = vars.get_public_inputs(); + + // ---- + for i in 0..12 { + yield_constr.constraint(local_values[PX_OFFSET + i] - public_inputs[PIS_PX_OFFSET + i]); + yield_constr.constraint(local_values[PY_OFFSET + i] - public_inputs[PIS_PY_OFFSET + i]); + } + for i in 0..68 { + for j in 0..24 * 3 { + yield_constr.constraint( + local_values[ELL_COEFFS_INDEX_OFFEST + i] + * (local_values[ELL_COEFFS_OFFSET + j] + - public_inputs[PIS_ELL_COEFFS_OFFSET + i * 24 * 3 + j]), + ); + } + } + for i in 0..24 * 3 * 2 { + yield_constr.constraint( + local_values[MILLER_LOOP_RES_OFFSET + i] - public_inputs[PIS_RES_OFFSET + i], + ); + } + add_miller_loop_constraints(local_values, next_values, yield_constr, 0, None); + } + + type EvaluationFrameTarget = + StarkFrame, ExtensionTarget, COLUMNS, PUBLIC_INPUTS>; + + fn eval_ext_circuit( + &self, + builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, + vars: &Self::EvaluationFrameTarget, + yield_constr: &mut starky::constraint_consumer::RecursiveConstraintConsumer, + ) { + let local_values = vars.get_local_values(); + let next_values = vars.get_next_values(); + let public_inputs = vars.get_public_inputs(); + + for i in 0..12 { + let c1 = builder.sub_extension( + local_values[PX_OFFSET + i], + public_inputs[PIS_PX_OFFSET + i], + ); + yield_constr.constraint(builder, c1); + + let c2 = builder.sub_extension( + local_values[PY_OFFSET + i], + public_inputs[PIS_PY_OFFSET + i], + ); + yield_constr.constraint(builder, c2); + } + for i in 0..68 { + for j in 0..24 * 3 { + let sub_tmp = builder.sub_extension( + local_values[ELL_COEFFS_OFFSET + j], + public_inputs[PIS_ELL_COEFFS_OFFSET + i * 24 * 3 + j], + ); + let c = builder.mul_extension(local_values[ELL_COEFFS_INDEX_OFFEST + i], sub_tmp); + yield_constr.constraint(builder, c); + } + } + for i in 0..24 * 3 * 2 { + let c = builder.sub_extension( + local_values[MILLER_LOOP_RES_OFFSET + i], + public_inputs[PIS_RES_OFFSET + i], + ); + yield_constr.constraint(builder, c); + } + add_miller_loop_constraints_ext_circuit( + builder, + yield_constr, + local_values, + next_values, + 0, + None, + ); + } + + fn constraint_degree(&self) -> usize { + 3 + } +} diff --git a/casper-finality-proofs/src/verification/proofs/mod.rs b/casper-finality-proofs/src/verification/proofs/mod.rs new file mode 100644 index 000000000..2ad3f4789 --- /dev/null +++ b/casper-finality-proofs/src/verification/proofs/mod.rs @@ -0,0 +1,4 @@ +pub mod ecc_aggregate; +pub mod final_exponentiate; +pub mod miller_loop; +pub mod proofs; diff --git a/casper-finality-proofs/src/verification/proofs/proofs.rs b/casper-finality-proofs/src/verification/proofs/proofs.rs new file mode 100644 index 000000000..4cd6b407f --- /dev/null +++ b/casper-finality-proofs/src/verification/proofs/proofs.rs @@ -0,0 +1,238 @@ +use std::time::Instant; + +use plonky2::{ + field::extension::Extendable, + hash::hash_types::RichField, + plonk::{ + circuit_data::{CommonCircuitData, VerifierOnlyCircuitData}, + config::GenericConfig, + proof::ProofWithPublicInputs, + }, + util::{log2_ceil, timing::TimingTree}, +}; +use plonky2x::prelude::Field; +use plonky2x::{ + backend::circuit::{DefaultParameters, PlonkParameters}, + utils::proof::ProofWithPublicInputsTargetUtils, +}; +use starky::{ + config::StarkConfig, prover::prove, util::trace_rows_to_poly_values, + verifier::verify_stark_proof, +}; + +use crate::verification::{ + proofs::{ + ecc_aggregate, + final_exponentiate::{self, FinalExponentiateStark}, + miller_loop::{self, MillerLoopStark}, + }, + utils::native_bls::{self, Fp, Fp12, Fp2}, +}; + +use super::ecc_aggregate::ECCAggStark; + +pub fn miller_loop_main< + F: RichField + Extendable, + C: GenericConfig, + const D: usize, +>( + x: Fp, + y: Fp, + q_x: Fp2, + q_y: Fp2, + q_z: Fp2, +) -> ( + MillerLoopStark, + starky::proof::StarkProofWithPublicInputs, + StarkConfig, +) { + let config = StarkConfig::standard_fast_config(); + let stark = MillerLoopStark::::new(1024); + let ell_coeffs = native_bls::calc_pairing_precomp(q_x, q_y, q_z); + let res = native_bls::miller_loop(x, y, q_x, q_y, q_z); + let mut public_inputs = Vec::::new(); + for e in x.0.iter() { + public_inputs.push(F::from_canonical_u32(*e)); + } + for e in y.0.iter() { + public_inputs.push(F::from_canonical_u32(*e)); + } + for coeff in ell_coeffs.iter() { + for f2 in coeff.iter() { + for f in f2.0.iter() { + for e in f.0.iter() { + public_inputs.push(F::from_canonical_u32(*e)); + } + } + } + } + for f in res.0.iter() { + for e in f.0.iter() { + public_inputs.push(F::from_canonical_u32(*e)); + } + } + assert_eq!(public_inputs.len(), miller_loop::PUBLIC_INPUTS); + let s = Instant::now(); + let trace = stark.generate_trace(x, y, ell_coeffs); + let trace_poly_values = trace_rows_to_poly_values(trace); + let proof = prove::, D>( + stark, + &config, + trace_poly_values, + &public_inputs, + &mut TimingTree::default(), + ) + .unwrap(); + println!("Time taken for miller_loop stark proof {:?}", s.elapsed()); + verify_stark_proof(stark, proof.clone(), &config).unwrap(); + (stark, proof, config) +} + +pub fn final_exponentiate_main< + F: RichField + Extendable, + C: GenericConfig, + const D: usize, +>( + x: Fp12, +) -> ( + FinalExponentiateStark, + starky::proof::StarkProofWithPublicInputs, + StarkConfig, +) { + let mut config = StarkConfig::standard_fast_config(); + config.fri_config.rate_bits = 2; + let stark = FinalExponentiateStark::::new(8192); + let s = Instant::now(); + let mut public_inputs = Vec::::new(); + for e in x.get_u32_slice().concat().iter() { + public_inputs.push(F::from_canonical_u32(*e)); + } + for e in x.final_exponentiate().get_u32_slice().concat().iter() { + public_inputs.push(F::from_canonical_u32(*e)); + } + assert_eq!(public_inputs.len(), final_exponentiate::PUBLIC_INPUTS); + let trace = stark.generate_trace(x); + let trace_poly_values = trace_rows_to_poly_values(trace); + let proof = prove::, D>( + stark, + &config, + trace_poly_values, + &public_inputs, + &mut TimingTree::default(), + ) + .unwrap(); + println!( + "Time taken for final_exponentiate stark proof {:?}", + s.elapsed() + ); + verify_stark_proof(stark, proof.clone(), &config).unwrap(); + (stark, proof, config) +} + +pub fn ec_aggregate_main< + F: RichField + Extendable, + C: GenericConfig, + const D: usize, +>( + points: Vec<[Fp; 2]>, + res: [Fp; 2], + bits: Vec, +) -> ( + ECCAggStark, + starky::proof::StarkProofWithPublicInputs, + StarkConfig, +) { + let mut config = StarkConfig::standard_fast_config(); + config.fri_config.rate_bits = 2; + let num_rows = 1 << log2_ceil((points.len() - 1) * 12); + let stark = ECCAggStark::::new(num_rows); + let s = Instant::now(); + let mut public_inputs = Vec::::new(); + for pt in &points { + for x in &pt[0].0 { + public_inputs.push(F::from_canonical_u32(*x)); + } + for y in &pt[1].0 { + public_inputs.push(F::from_canonical_u32(*y)); + } + } + for b in bits.iter() { + public_inputs.push(F::from_bool(*b)); + } + for x in res[0].0 { + public_inputs.push(F::from_canonical_u32(x)); + } + for y in res[1].0 { + public_inputs.push(F::from_canonical_u32(y)); + } + assert_eq!(public_inputs.len(), ecc_aggregate::PUBLIC_INPUTS); + let trace = stark.generate_trace(&points, &bits); + let trace_poly_values = trace_rows_to_poly_values(trace); + let proof = prove::, D>( + stark, + &config, + trace_poly_values, + &public_inputs, + &mut TimingTree::default(), + ) + .unwrap(); + println!("Time taken for acc_agg stark proof {:?}", s.elapsed()); + verify_stark_proof(stark, proof.clone(), &config).unwrap(); + (stark, proof, config) +} + +pub fn recursive_proof< + F: plonky2::hash::hash_types::RichField + plonky2::field::extension::Extendable, + C: GenericConfig, + S: starky::stark::Stark + Copy, + InnerC: GenericConfig, + const D: usize, +>( + stark: S, + inner_proof: starky::proof::StarkProofWithPublicInputs, + inner_config: &StarkConfig, + print_gate_counts: bool, +) -> ProofTuple +where + InnerC::Hasher: plonky2::plonk::config::AlgebraicHasher, +{ + let circuit_config = plonky2::plonk::circuit_data::CircuitConfig::standard_recursion_config(); + let mut builder = plonky2::plonk::circuit_builder::CircuitBuilder::::new(circuit_config); + let mut pw = plonky2::iop::witness::PartialWitness::new(); + let degree_bits = inner_proof.proof.recover_degree_bits(inner_config); + let pt = starky::recursive_verifier::add_virtual_stark_proof_with_pis( + &mut builder, + &stark, + inner_config, + degree_bits, + 0, + 0, + ); + builder.register_public_inputs(&pt.public_inputs); + let zero = builder.zero(); + starky::recursive_verifier::set_stark_proof_with_pis_target(&mut pw, &pt, &inner_proof, zero); + starky::recursive_verifier::verify_stark_proof_circuit::( + &mut builder, + stark, + pt, + inner_config, + ); + + if print_gate_counts { + builder.print_gate_counts(0); + } + + let data = builder.build::(); + let s = Instant::now(); + let proof = data.prove(pw).unwrap(); + println!("time taken for plonky2 recursive proof {:?}", s.elapsed()); + data.verify(proof.clone()).unwrap(); + (proof, data.verifier_only, data.common) +} + +// >::Field +pub type ProofTuple = ( + ProofWithPublicInputs, + VerifierOnlyCircuitData, + CommonCircuitData, +); diff --git a/casper-finality-proofs/src/verification/pubkey_to_g1.rs b/casper-finality-proofs/src/verification/pubkey_to_g1.rs new file mode 100644 index 000000000..5679df9e4 --- /dev/null +++ b/casper-finality-proofs/src/verification/pubkey_to_g1.rs @@ -0,0 +1,91 @@ +use num_bigint::BigUint; +use plonky2::{ + field::{extension::Extendable, types::Field}, + hash::hash_types::RichField, + iop::target::Target, + plonk::{ + circuit_builder::CircuitBuilder, + config::{GenericConfig, PoseidonGoldilocksConfig}, + }, +}; +use plonky2x::{ + backend::circuit::PlonkParameters, + frontend::{ + uint::num::{ + biguint::{BigUintTarget, CircuitBuilderBiguint}, + u32::gadgets::arithmetic_u32::{CircuitBuilderU32, U32Target}, + }, + vars::Variable, + }, +}; + +use crate::utils::plonky2x_extensions::{assert_zero, variable_to_le_bits}; + +use super::{curves::g1::PointG1Target, fields::fp::LIMBS, utils::native_bls::modulus}; + +pub const PUB_KEY_LEN: usize = 48; + +const D: usize = 2; +type C = PoseidonGoldilocksConfig; +type F = >::F; + +pub fn pubkey_to_g1_check, const D: usize>( + builder: &mut CircuitBuilder, + point: &PointG1Target, + pk: &[Target; PUB_KEY_LEN], +) { + let msbs = builder.split_le(pk[0], 8); + let bflag = msbs[6]; + builder.assert_zero(bflag.target); + + let aflag = msbs[5]; + + let (x, y) = (&point[0], &point[1]); + let two = builder.constant_biguint(&2u32.into()); + let y_2 = builder.mul_biguint(y, &two); + let p = builder.constant_biguint(&modulus()); + let y_2_p = builder.div_biguint(&y_2, &p); + let zero = builder.zero_u32(); + for i in 0..y_2_p.limbs.len() { + if i == 0 { + builder.connect(aflag.target, y_2_p.limbs[i].target); + } else { + builder.connect_u32(y_2_p.limbs[i], zero); + } + } + + let z_limbs: Vec = pk + .chunks(4) + .into_iter() + .map(|chunk| { + let zero = builder.zero(); + let factor = builder.constant(F::from_canonical_u32(256)); + U32Target::from_target_unsafe( + chunk + .iter() + .fold(zero, |acc, c| builder.mul_add(acc, factor, *c)), + ) + }) + .rev() + .collect(); + let z = BigUintTarget { limbs: z_limbs }; + + let pow_2_383 = builder.constant_biguint(&(BigUint::from(1u32) << 383u32)); + let pow_2_381 = builder.constant_biguint(&(BigUint::from(1u32) << 381u32)); + let pow_2_381_or_zero = BigUintTarget { + limbs: (0..LIMBS) + .into_iter() + .map(|i| { + U32Target::from_target_unsafe(builder.select( + aflag, + pow_2_381.limbs[i].target, + zero.target, + )) + }) + .collect(), + }; + let flags = builder.add_biguint(&pow_2_383, &pow_2_381_or_zero); + let z_reconstructed = builder.add_biguint(x, &flags); + + builder.connect_biguint(&z, &z_reconstructed); +} diff --git a/casper-finality-proofs/src/verification/utils/big_arithmetic.rs b/casper-finality-proofs/src/verification/utils/big_arithmetic.rs new file mode 100644 index 000000000..003694918 --- /dev/null +++ b/casper-finality-proofs/src/verification/utils/big_arithmetic.rs @@ -0,0 +1,97 @@ +pub fn big_less_than(a: &[u32], b: &[u32]) -> bool { + assert_eq!(a.len(), b.len()); + for i in (0..a.len()).rev() { + if a[i] < b[i] { + return true; + } else if b[i] < a[i] { + return false; + } + } + false +} + +pub fn big_add(a: &[u32], b: &[u32]) -> Vec { + assert_eq!(a.len(), b.len()); + let mut c: Vec = Vec::with_capacity(a.len() + 1); + let mut carry: u32 = 0; + for (a_i, b_i) in a.iter().zip(b.iter()) { + let c_i = (*a_i as u64) + (*b_i as u64) + (carry as u64); + c.push(c_i as u32); + carry = (c_i >> 32) as u32; + } + c.push(carry as u32); + c +} + +pub fn big_sub(a: &[u32], b: &[u32]) -> (Vec, u32) { + // assume a>b + assert_eq!(a.len(), b.len()); + let mut c: Vec = Vec::with_capacity(a.len()); + let mut carry: u32 = 0; + for (a_i, b_i) in a.iter().zip(b.iter()) { + let b_plus_carry: u64 = (*b_i as u64) + (carry as u64); + if *a_i as u64 >= b_plus_carry { + c.push(a_i - (b_plus_carry as u32)); + carry = 0; + } else { + c.push(((1u64 << 32) + (*a_i as u64) - b_plus_carry) as u32); + carry = 1; + } + } + (c, carry) +} + +// a * b +pub fn big_multiply(a: &[u32], b: &[u32]) -> Vec { + assert_eq!(a.len(), b.len()); + let mut c: Vec = Vec::with_capacity(a.len() + 1); + let mut carry = 0; + for (a_i, b_i) in a.iter().zip(b.iter()) { + let c_i = (*a_i as u64) * (*b_i as u64) + (carry as u64); + c.push(c_i as u32); + carry = (c_i >> 32) as u32; + } + c.push(carry as u32); + c +} + +#[cfg(test)] +mod tests { + + use crate::verification::utils::big_arithmetic::{big_less_than, big_sub}; + + use super::big_add; + + #[test] + fn test_big_add() { + let a = vec![1 << 31, ((1u64 << 32) - 1) as u32, 1]; + let b = vec![1 << 31, 1, 4]; + let ans = vec![0, 1, 6, 0]; + let big_add_ans = big_add(&a, &b); + assert_eq!(ans, big_add_ans); + } + + #[test] + fn test_less_than() { + let a = vec![0, 1, 2]; + let b = vec![2, 3, 1]; + assert_eq!(big_less_than(&a, &b), false); + assert_eq!(big_less_than(&b, &a), true); + assert_eq!(big_less_than(&b, &b), false); + } + + #[test] + fn test_big_sub() { + let a = vec![1 << 31, 3, 1]; + let b = vec![1 << 31, 1, 1]; + let ans = vec![0, ((1u64 << 32) - 2) as u32, ((1u64 << 32) - 1) as u32]; + let (sub_ans, carry) = big_sub(&b, &a); + assert_eq!(ans, sub_ans); + assert_eq!(carry, 1); + + let ans = vec![0, 2, 0]; + let (sub_ans, carry) = big_sub(&a, &b); + assert_eq!(ans, sub_ans); + assert_eq!(carry, 0); + } +} diff --git a/casper-finality-proofs/src/verification/utils/mod.rs b/casper-finality-proofs/src/verification/utils/mod.rs new file mode 100644 index 000000000..7b1ee37d6 --- /dev/null +++ b/casper-finality-proofs/src/verification/utils/mod.rs @@ -0,0 +1,3 @@ +pub mod native_bls; +pub mod starky_utils; +pub mod big_arithmetic; \ No newline at end of file diff --git a/casper-finality-proofs/src/verification/utils/native_bls.rs b/casper-finality-proofs/src/verification/utils/native_bls.rs new file mode 100644 index 000000000..7e1ff6143 --- /dev/null +++ b/casper-finality-proofs/src/verification/utils/native_bls.rs @@ -0,0 +1,1576 @@ +// BLS Native + +use std::ops::{Add, Div, Mul, Neg, Sub}; + +use std::{str::FromStr, vec}; + +use num_bigint::{BigInt, BigUint, Sign, ToBigInt}; + +use super::big_arithmetic::{self, big_add, big_less_than}; + +pub fn modulus() -> BigUint { + BigUint::from_str("4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787").unwrap() +} + +pub fn modulus_digits() -> Vec { + modulus().to_u32_digits() +} + +pub fn get_bls_12_381_parameter() -> BigUint { + BigUint::from_str("15132376222941642752").unwrap() +} + +pub fn get_negate(y: &[u32; 12]) -> [u32; 12] { + let y_bu = BigUint::new(y.to_vec()); + let neg = modulus() - y_bu; + get_u32_vec_from_literal(neg) +} + +pub fn get_g2_invert(z1: &[u32; 12], z2: &[u32; 12]) -> [[u32; 12]; 2] { + let fp2 = Fp2([Fp(z1.clone()), Fp(z2.clone())]); + [fp2.invert().0[0].0, fp2.invert().0[1].0] +} + +pub fn get_u32_carries(x: &[u32; 12], y: &[u32; 12]) -> [u32; 12] { + let mut carries = [0u32; 12]; + let mut prev_carry = 0; + for i in 0..12 { + if i != 0 { + prev_carry = carries[i - 1]; + } + let z = (x[i] as u64) + (y[i] as u64) + (prev_carry as u64); + println!( + "i-{:?}--x:: {:?}, y:: {:?}, z:: {:?}, carry:: {:?}", + i, + x[i], + y[i], + prev_carry, + (z >> 32) as u32 + ); + if i != 11 { + carries[i] = (z >> 32) as u32 + } + } + carries[11] = 0; + carries +} + +pub fn multiply_by_slice(x: &[u32; 12], y: u32) -> ([u32; 13], [u32; 12]) { + let mut res: [u32; 13] = [0u32; 13]; + let mut carries: [u32; 12] = [0u32; 12]; + let mut prev_carry = 0; + for i in 0..12 { + let temp = (x[i] as u64 * y as u64) + prev_carry as u64; + let temp_res = temp as u32; + let new_carry = (temp >> 32) as u32; + prev_carry = new_carry; + res[i] = temp_res; + carries[i] = prev_carry; + } + res[12] = prev_carry; + (res, carries) +} + +pub fn add_u32_slices(x: &[u32; 24], y: &[u32; 24]) -> ([u32; 24], [u32; 24]) { + let mut prev_carry = 0u32; + let mut res = [0u32; 24]; + let mut carries = [0u32; 24]; + for i in 0..24 { + let s = x[i] as u64 + y[i] as u64 + prev_carry as u64; + let sum = s as u32; + let carry = (s >> 32) as u32; + prev_carry = carry; + res[i] = sum; + carries[i] = carry; + } + (res, carries) +} + +pub fn add_u32_slices_12(x: &[u32; 12], y: &[u32; 12]) -> ([u32; 12], [u32; 12]) { + let mut prev_carry = 0u32; + let mut res = [0u32; 12]; + let mut carries = [0u32; 12]; + for i in 0..12 { + let s = x[i] as u64 + y[i] as u64 + prev_carry as u64; + let sum = s as u32; + let carry = (s >> 32) as u32; + prev_carry = carry; + res[i] = sum; + carries[i] = carry; + } + (res, carries) +} + +// assume x > y +pub fn sub_u32_slices(x: &[u32; 24], y: &[u32; 24]) -> ([u32; 24], [u32; 24]) { + let mut prev_borrow = 0u32; + let mut res = [0u32; 24]; + let mut borrows = [0u32; 24]; + for i in 0..24 { + if x[i] >= y[i] + prev_borrow { + res[i] = x[i] - y[i] - prev_borrow; + borrows[i] = 0; + prev_borrow = 0; + } else { + res[i] = ((1u64 << 32) + x[i] as u64 - y[i] as u64 - prev_borrow as u64) as u32; + borrows[i] = 1; + prev_borrow = 1; + } + } + (res, borrows) +} + +// assume x > y +pub fn sub_u32_slices_12(x: &[u32; 12], y: &[u32; 12]) -> ([u32; 12], [u32; 12]) { + let mut prev_borrow = 0u32; + let mut res = [0u32; 12]; + let mut borrows = [0u32; 12]; + for i in 0..12 { + if x[i] >= y[i] + prev_borrow { + res[i] = x[i] - y[i] - prev_borrow; + borrows[i] = 0; + prev_borrow = 0; + } else { + res[i] = ((1u64 << 32) + x[i] as u64 - y[i] as u64 - prev_borrow as u64) as u32; + borrows[i] = 1; + prev_borrow = 1; + } + } + assert_eq!(borrows[11], 0); + (res, borrows) +} + +pub fn mul_u32_slice_u32(x: &[u32; 12], y: u32) -> ([u32; 12], [u32; 12]) { + let mut prev_carry = 0u32; + let mut res = [0u32; 12]; + let mut carries = [0u32; 12]; + for i in 0..12 { + let tmp = x[i] as u64 * y as u64 + prev_carry as u64; + res[i] = tmp as u32; + carries[i] = (tmp >> 32) as u32; + prev_carry = carries[i]; + } + assert_eq!(prev_carry, 0); + (res, carries) +} + +pub fn get_bits_as_array(number: u32) -> [u32; 32] { + let mut result = [0u32; 32]; // Assuming a u32 has 32 bits + + for i in 0..32 { + // Shift the 1 bit to the rightmost position and perform bitwise AND + result[i] = ((number >> i) & 1) as u32; + } + + result +} + +pub fn add_u32_slices_1(x: &[u32; 24], y: &[u32; 25]) -> ([u32; 25], [u32; 24]) { + let mut x_padded = [0u32; 25]; + x_padded[0..24].copy_from_slice(x); + let mut prev_carry = 0u32; + let mut res = [0u32; 25]; + let mut carries = [0u32; 24]; + for i in 0..24 { + let s = x[i] as u64 + y[i] as u64 + prev_carry as u64; + let sum = s as u32; + let carry = (s >> 32) as u32; + prev_carry = carry; + res[i] = sum; + carries[i] = carry; + } + res[24] = prev_carry; + (res, carries) +} + +pub fn egcd(a: BigUint, b: BigUint) -> BigUint { + // if a == BigUint::from(0 as u32){ + // (b, BigUint::from(0 as u32), BigUint::from(1 as u32)) + // } else { + // let (g, y, x) = egcd(b.clone()%a.clone(), a.clone()); + // (g, x - (b.clone()*(y.clone()/a.clone())), y) + // } + let mut a_ = BigInt::from_biguint(Sign::Plus, a); + let mut b_ = BigInt::from_biguint(Sign::Plus, b); + + let mut x = BigInt::from_str("0").unwrap(); + let mut y = BigInt::from_str("1").unwrap(); + let mut u = BigInt::from_str("1").unwrap(); + let mut v = BigInt::from_str("0").unwrap(); + + let zero = BigInt::from_str("0").unwrap(); + while a_ != zero { + let q = b_.clone() / a_.clone(); + let r = b_ % a_.clone(); + let m = x - (u.clone() * q.clone()); + let n = y - (v.clone() * q); + b_ = a_; + a_ = r; + x = u; + y = v; + u = m; + v = n; + } + // println!("x {:?}", x); + let mod_bigint = modulus().to_bigint().unwrap(); + if x < 0.into() { + ((x % mod_bigint.clone()) + mod_bigint) + .to_biguint() + .unwrap() + } else { + (x % mod_bigint.clone()).to_biguint().unwrap() + } +} + +pub fn mod_inverse(a: BigUint, m: BigUint) -> BigUint { + egcd(a, m.clone()) + // x % m +} + +pub fn fp4_square(a: Fp2, b: Fp2) -> (Fp2, Fp2) { + let a2 = a * a; + let b2 = b * b; + (b2.mul_by_nonresidue() + a2, ((a + b) * (a + b)) - a2 - b2) +} + +pub fn get_u32_vec_from_literal(x: BigUint) -> [u32; 12] { + let mut x_u32_vec: Vec = x.to_u32_digits(); + while x_u32_vec.len() != 12 { + x_u32_vec.push(0 as u32); + } + x_u32_vec.try_into().unwrap() +} + +pub fn get_u32_vec_from_literal_ref(x: &BigUint) -> [u32; 12] { + let mut x_u32_vec: Vec = x.to_u32_digits(); + while x_u32_vec.len() != 12 { + x_u32_vec.push(0 as u32); + } + x_u32_vec.try_into().unwrap() +} + +pub fn get_selector_bits_from_u32(x: u32) -> [u32; 12] { + // assert!(x<=4096); + let mut res = [0u32; 12]; + let mut val = x.clone(); + for i in 0..12 { + res[i] = val & 1; + val = val >> 1; + } + res +} + +pub fn get_u32_vec_from_literal_24(x: BigUint) -> [u32; 24] { + let mut x_u32_vec: Vec = x.to_u32_digits(); + while x_u32_vec.len() != 24 { + x_u32_vec.push(0 as u32); + } + x_u32_vec.try_into().unwrap() +} + +pub fn get_u32_vec_from_literal_ref_24(x: &BigUint) -> [u32; 24] { + let mut x_u32_vec: Vec = x.to_u32_digits(); + while x_u32_vec.len() != 24 { + x_u32_vec.push(0 as u32); + } + x_u32_vec.try_into().unwrap() +} + +pub fn get_div_rem_modulus_from_biguint_12(x: BigUint) -> ([u32; 12], [u32; 12]) { + let rem = x.clone() % modulus(); + let div = x / modulus(); + (get_u32_vec_from_literal(div), get_u32_vec_from_literal(rem)) +} + +pub fn calc_qs(x: Fp2, y: Fp2, z: Fp2) -> (Fp2, Fp2, Fp2) { + let ax = x * z.invert(); + let ay = y * z.invert(); + + let qx = ax.clone(); + let qy = ay.clone(); + let qz = Fp2::one(); + (qx, qy, qz) +} + +pub fn calc_precomp_stuff_loop0(rx: Fp2, ry: Fp2, rz: Fp2) -> Vec { + // runs 1 loop subpart 0 + let t0 = ry * ry; + let t1 = rz * rz; + let x0 = t1.mul(Fp::get_fp_from_biguint(BigUint::from(3 as u32))); + + let t2 = x0.multiply_by_b(); + let t3 = t2.mul(Fp::get_fp_from_biguint(BigUint::from(3 as u32))); + let x1 = ry * rz; + let t4 = x1.mul(Fp::get_fp_from_biguint(BigUint::from(2 as u32))); + let x2 = t2 - t0; + let x3 = rx * rx; + let x4 = x3.mul(Fp::get_fp_from_biguint(BigUint::from(3 as u32))); + let x5 = -t4; + + let k = mod_inverse(BigUint::from(2 as u32), modulus()); + + let x6 = t0 - t3; + let x7 = rx * ry; + let x8 = x6 * x7; + + let x9 = t0 + t3; + let x10 = x9 * Fp::get_fp_from_biguint(k.clone()); + let x11 = x10 * x10; + + let x12 = t2 * t2; + let x13 = x12 * Fp::get_fp_from_biguint(BigUint::from(3 as u32)); + + let new_rx = x8 * Fp::get_fp_from_biguint(k.clone()); + let new_ry = x11 - x13; + let new_rz = t0 * t4; + + vec![ + new_rx, new_ry, new_rz, t0, t1, x0, t2, t3, x1, t4, x3, x2, x4, x5, x6, x7, x8, x9, x10, + x11, x12, x13, + ] +} + +pub fn calc_precomp_stuff_loop1(rx: Fp2, ry: Fp2, rz: Fp2, qx: Fp2, qy: Fp2) -> Vec { + let bit1_t0 = qy * rz; + let bit1_t1 = ry - bit1_t0; + // println!("bit1_t1__ {:?}", bit1_t1.to_biguint()); + let bit1_t2 = qx * rz; + let bit1_t3 = rx - bit1_t2; + // println!("t1__ {:?}", bit1_t3.to_biguint()); + let bit1_t4 = bit1_t1 * qx; + let bit1_t5 = bit1_t3 * qy; + let bit1_t6 = bit1_t4 - bit1_t5; + let bit1_t7 = -bit1_t1; + // println!("ell_coeff_1_0 {:?}", ell_coeff[1][0].to_biguint()); + // println!("ell_coeff_1_1 {:?}", ell_coeff[1][1].to_biguint()); + // println!("ell_coeff_1_2 {:?}", ell_coeff[1][2].to_biguint()); + let bit1_t8 = bit1_t3 * bit1_t3; + // println!("t2__ {:?}", bit1_t8.to_biguint()); + let bit1_t9 = bit1_t8 * bit1_t3; + // println!("t3__ {:?}", bit1_t9.to_biguint()); + let bit1_t10 = bit1_t8 * rx; + // println!("t4__ {:?}", bit1_t10.to_biguint()); + let bit1_t11 = bit1_t1 * bit1_t1; + let bit1_t12 = bit1_t11 * rz; + let bit1_t13 = bit1_t10 * Fp::get_fp_from_biguint(BigUint::from(2 as u32)); + let bit1_t14 = bit1_t9 - bit1_t13; + let bit1_t15 = bit1_t14 + bit1_t12; + // println!("t5__ {:?}", bit1_t15.to_biguint()); + let bit1_t16 = bit1_t10 - bit1_t15; + let bit1_t17 = bit1_t16 * bit1_t1; + let bit1_t18 = bit1_t9 * ry; + let new_rx = bit1_t3 * bit1_t15; + let new_ry = bit1_t17 - bit1_t18; + let new_rz = rz * bit1_t9; + + vec![ + new_rx, new_ry, new_rz, bit1_t0, bit1_t1, bit1_t2, bit1_t3, bit1_t4, bit1_t5, bit1_t6, + bit1_t7, bit1_t8, bit1_t9, bit1_t10, bit1_t11, bit1_t12, bit1_t13, bit1_t14, bit1_t15, + bit1_t16, bit1_t17, bit1_t18, + ] +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct Fp(pub(crate) [u32; 12]); + +impl Fp { + pub fn zero() -> Fp { + Fp([0; 12]) + } + + pub fn one() -> Fp { + let mut x = Fp([0; 12]); + x.0[0] = 1; + x + } + + pub fn get_fp_from_biguint(x: BigUint) -> Fp { + Fp(get_u32_vec_from_literal(x)) + } + + pub fn get_bitlen(&self) -> u64 { + BigUint::new(self.0.try_into().unwrap()).bits() + } + + pub fn get_bit(&self, idx: u64) -> bool { + BigUint::new(self.0.try_into().unwrap()).bit(idx) + } + + pub fn invert(&self) -> Self { + let rhs_buint = BigUint::new(self.0.try_into().unwrap()); + let inv = mod_inverse(rhs_buint, modulus()); + // println!("inv {:?}", inv); + Fp::get_fp_from_biguint(inv) + } + + pub fn to_biguint(&self) -> BigUint { + BigUint::new(self.0.to_vec()) + } +} + +impl Div for Fp { + type Output = Self; + + fn div(self, rhs: Self) -> Self::Output { + let rhs_buint = BigUint::new(rhs.0.try_into().unwrap()); + let inv = mod_inverse(rhs_buint, modulus()); + self * Fp::get_fp_from_biguint(inv) + } +} + +impl Add for Fp { + type Output = Self; + + fn add(self, rhs: Self) -> Self::Output { + add_fp(self, rhs) + } +} + +impl Mul for Fp { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + // let x_b = BigUint::new(self.0.try_into().unwrap()); + // let y_b = BigUint::new(rhs.0.try_into().unwrap()); + // let z = (x_b * y_b).modpow(&BigUint::from_str("1").unwrap(), &modulus()); + // Fp(get_u32_vec_from_literal(z)) + mul_fp(self, rhs) + } +} + +impl Neg for Fp { + type Output = Self; + + fn neg(self) -> Self::Output { + let x: BigUint = BigUint::new(self.0.try_into().unwrap()); + Fp(get_u32_vec_from_literal(modulus() - x)) + } +} + +impl Sub for Fp { + type Output = Self; + + fn sub(self, rhs: Self) -> Self::Output { + // let x_b = BigUint::new(self.0.try_into().unwrap()); + // let y_b = BigUint::new(rhs.0.try_into().unwrap()); + // let z = (x_b - y_b + modulus()).modpow(&BigUint::from_str("1").unwrap(), &modulus()); + // Fp(get_u32_vec_from_literal(z)) + sub_fp(self, rhs) + } +} + +pub fn add_fp(x: Fp, y: Fp) -> Fp { + // let x_b = BigUint::new(x.0.try_into().unwrap()); + // let y_b = BigUint::new(y.0.try_into().unwrap()); + // let z = (x_b + y_b).modpow(&BigUint::from_str("1").unwrap(), &modulus()); + // Fp(get_u32_vec_from_literal(z)) + let x_plus_y = big_add(&x.0, &y.0); + let mut m = modulus_digits(); + m.push(0); + if big_less_than(&x_plus_y, &m) { + Fp(x_plus_y[..12].try_into().unwrap()) + } else { + let (x_plus_y_reduce, _) = big_arithmetic::big_sub(&x_plus_y, &m); + Fp(x_plus_y_reduce[..12].try_into().unwrap()) + } + // todo!() +} + +pub fn add_fp_without_reduction(x: Fp, y: Fp) -> [u32; 12] { + // let x_b = BigUint::new(x.0.try_into().unwrap()); + // let y_b = BigUint::new(y.0.try_into().unwrap()); + // let z = (x_b + y_b).modpow(&BigUint::from_str("1").unwrap(), &modulus()); + // Fp(get_u32_vec_from_literal(z)) + let x_plus_y = big_add(&x.0, &y.0); + get_u32_vec_from_literal(BigUint::new(x_plus_y)) + // todo!() +} + +pub fn mul_fp(x: Fp, y: Fp) -> Fp { + //println!("sub_fp x{:?}, y{:?}", x, y); + let x_b = BigUint::new(x.0.try_into().unwrap()); + let y_b = BigUint::new(y.0.try_into().unwrap()); + let z = (x_b * y_b).modpow(&BigUint::from_str("1").unwrap(), &modulus()); + //println!("z {:?} {:?}", z.to_u32_digits(), z.to_u32_digits().len()); + Fp(get_u32_vec_from_literal(z)) +} + +pub fn mul_fp_without_reduction(x: Fp, y: Fp) -> [u32; 24] { + let x_b = BigUint::new(x.0.try_into().unwrap()); + let y_b = BigUint::new(y.0.try_into().unwrap()); + let z = x_b * y_b; + get_u32_vec_from_literal_24(z) +} + +pub fn negate_fp(x: Fp) -> Fp { + let x: BigUint = BigUint::new(x.0.try_into().unwrap()); + Fp(get_u32_vec_from_literal(modulus() - x)) +} + +pub fn sub_fp(x: Fp, y: Fp) -> Fp { + // println!("sub_fp x{:?}, y{:?}", x, y); + let x_b = BigUint::new(x.0.try_into().unwrap()); + let y_b = BigUint::new(y.0.try_into().unwrap()); + let z = (modulus() + x_b - y_b).modpow(&BigUint::from_str("1").unwrap(), &modulus()); + // println!("sub_fp::{:?}-{:?}",z.to_u32_digits(), z.to_u32_digits().len()); + Fp(get_u32_vec_from_literal(z)) +} + +pub fn sum_of_products(a: Vec, b: Vec) -> Fp { + let acc = a.iter().zip(b.iter()).fold(Fp([0; 12]), |acc, (a_i, b_i)| { + add_fp(mul_fp(a_i.clone(), b_i.clone()), acc) + }); + acc +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct Fp2(pub [Fp; 2]); + +impl Fp2 { + pub fn zero() -> Fp2 { + Fp2([Fp::zero(), Fp::zero()]) + } + + pub fn one() -> Fp2 { + Fp2([Fp::one(), Fp::zero()]) + } + + pub fn non_residue() -> Fp { + Fp::get_fp_from_biguint(modulus() - BigUint::from(1 as u32)) + } + + pub fn multiply_by_b(&self) -> Fp2 { + let t0 = self.0[0].mul(Fp::get_fp_from_biguint(BigUint::from(4 as u32))); + let t1 = self.0[1].mul(Fp::get_fp_from_biguint(BigUint::from(4 as u32))); + Fp2([t0 - t1, t0 + t1]) + } + + pub fn mul_by_nonresidue(&self) -> Self { + let c0 = self.0[0]; + let c1 = self.0[1]; + Fp2([c0 - c1, c0 + c1]) + } + + pub fn invert(&self) -> Self { + let re = self.0[0]; + let im = self.0[1]; + let factor_fp = (re * re) + (im * im); + let factor = factor_fp.invert(); + Fp2([factor * re, factor * (-im)]) + } + + pub fn to_biguint(&self) -> [BigUint; 2] { + [ + BigUint::new(self.0[0].0.to_vec()), + BigUint::new(self.0[1].0.to_vec()), + ] + } + + pub fn get_u32_slice(&self) -> [[u32; 12]; 2] { + [self.0[0].0, self.0[1].0] + } +} + +impl Add for Fp2 { + type Output = Self; + + fn add(self, rhs: Self) -> Self::Output { + add_fp2(self, rhs) + } +} + +impl Mul for Fp2 { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + mul_fp2(self, rhs) + } +} + +impl Sub for Fp2 { + type Output = Self; + fn sub(self, rhs: Self) -> Self::Output { + sub_fp2(self, rhs) + } +} + +impl Div for Fp2 { + type Output = Self; + fn div(self, rhs: Self) -> Self::Output { + self * rhs.invert() + } +} + +impl Fp2 { + pub fn roots_of_unity_8th() -> Vec { + vec![ + Fp2([Fp::one(), Fp::zero()]), + Fp2([Fp::zero(), Fp::one()]), + Fp2([Fp::get_fp_from_biguint(BigUint::from_str( + "1028732146235106349975324479215795277384839936929757896155643118032610843298655225875571310552543014690878354869257" + ).unwrap()); 2]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "1028732146235106349975324479215795277384839936929757896155643118032610843298655225875571310552543014690878354869257" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "2973677408986561043442465346520108879172042883009249989176415018091420807192182638567116318576472649347015917690530" + ).unwrap()), + ]) + ] + } + + pub fn etas() -> Vec { + vec![ + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "1015919005498129635886032702454337503112659152043614931979881174103627376789972962005013361970813319613593700736144" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "1244231661155348484223428017511856347821538750986231559855759541903146219579071812422210818684355842447591283616181" + ).unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "2758177894066318909194361808224047808735344068952776325476298594220885430911766052020476810444659821590302988943606" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "1015919005498129635886032702454337503112659152043614931979881174103627376789972962005013361970813319613593700736144" + ).unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "1646015993121829755895883253076789309308090876275172350194834453434199515639474951814226234213676147507404483718679" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "1637752706019426886789797193293828301565549384974986623510918743054325021588194075665960171838131772227885159387073" + ).unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str( + "2364656849202240506627992632442075854991333434964021261821139393069706628902643788776727457290883891810009113172714" + ).unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str( + "1646015993121829755895883253076789309308090876275172350194834453434199515639474951814226234213676147507404483718679" + ).unwrap()), + ]), + ] + } +} + +impl Mul for Fp2 { + type Output = Fp2; + + fn mul(self, rhs: Fp) -> Self::Output { + // let mut ans = Fp2::zero(); + // let mut found_one = false; + // for i in (0..rhs.get_bitlen()).rev() { + // if found_one { + // ans = ans + ans; + // } + // let bit = rhs.get_bit(i); + // if bit { + // found_one = true; + // ans = ans + self; + // } + // } + let fp2 = self.0; + + let ans = Fp2([fp2[0] * rhs, fp2[1] * rhs]); + ans + } +} + +impl Neg for Fp2 { + type Output = Self; + + fn neg(self) -> Self::Output { + Fp2([self.0[0].neg(), self.0[1].neg()]) + } +} + +pub fn sub_fp2(x: Fp2, y: Fp2) -> Fp2 { + Fp2([sub_fp(x.0[0], y.0[0]), sub_fp(x.0[1], y.0[1])]) +} + +pub fn add_fp2(x: Fp2, y: Fp2) -> Fp2 { + Fp2([add_fp(x.0[0], y.0[0]), add_fp(x.0[1], y.0[1])]) +} + +pub fn mul_fp2(x: Fp2, y: Fp2) -> Fp2 { + //println!("x:: {:?}", x); + //println!("y:: {:?}", y); + let c0 = sub_fp(mul_fp(x.0[0], y.0[0]), mul_fp(x.0[1], y.0[1])); + let c1 = add_fp(mul_fp(x.0[0], y.0[1]), mul_fp(x.0[1], y.0[0])); + Fp2([c0, c1]) +} + +// pub fn mul_fp2_without_reduction(x: Fp2, y: Fp2) -> Fp2 { + +// } + +#[derive(Clone, Copy, Debug)] +pub struct Fp6(pub(crate) [Fp; 6]); + +impl Fp6 { + pub fn invert(&self) -> Self { + let c0c1c2 = self; + let c0 = Fp2(c0c1c2.0[0..2].to_vec().try_into().unwrap()); + let c1 = Fp2(c0c1c2.0[2..4].to_vec().try_into().unwrap()); + let c2 = Fp2(c0c1c2.0[4..6].to_vec().try_into().unwrap()); + let t0 = (c0 * c0) - (c2 * c1).mul_by_nonresidue(); + let t1 = (c2 * c2).mul_by_nonresidue() - (c0 * c1); + let t2 = (c1 * c1) - (c0 * c2); + let t4 = (((c2 * t1) + (c1 * t2)).mul_by_nonresidue() + (c0 * t0)).invert(); + Fp6([(t4 * t0).0, (t4 * t1).0, (t4 * t2).0] + .concat() + .try_into() + .unwrap()) + } + + pub fn get_u32_slice(&self) -> [[u32; 12]; 6] { + self.0 + .iter() + .map(|f| f.0) + .collect::>() + .try_into() + .unwrap() + } + + pub fn print(&self) { + // println!("--- Printing Fp6 ---"); + // for i in 0..self.0.len() { + // let fp = Fp::get_fp_from_biguint(BigUint::new(self.0[i].0.to_vec())); + // println!("i -- {:?}",fp.to_biguint()); + // } + // println!("--- Printed Fp6 ---"); + } +} + +impl Add for Fp6 { + type Output = Self; + + fn add(self, rhs: Self) -> Self::Output { + add_fp6(self, rhs) + } +} + +impl Sub for Fp6 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self::Output { + sub_fp6(self, rhs) + } +} + +impl Div for Fp6 { + type Output = Self; + + fn div(self, rhs: Self) -> Self::Output { + self * rhs.invert() + } +} + +impl Mul for Fp6 { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + mul_fp6(self, rhs) + } +} + +impl Neg for Fp6 { + type Output = Self; + + fn neg(self) -> Self::Output { + let c0c1c2 = self; + let c0 = Fp2(c0c1c2.0[0..2].to_vec().try_into().unwrap()); + let c1 = Fp2(c0c1c2.0[2..4].to_vec().try_into().unwrap()); + let c2 = Fp2(c0c1c2.0[4..6].to_vec().try_into().unwrap()); + Fp6([c0.neg().0, c1.neg().0, c2.neg().0] + .concat() + .try_into() + .unwrap()) + } +} + +pub fn add_fp6(x: Fp6, y: Fp6) -> Fp6 { + Fp6([ + add_fp(x.0[0], y.0[0]), + add_fp(x.0[1], y.0[1]), + add_fp(x.0[2], y.0[2]), + add_fp(x.0[3], y.0[3]), + add_fp(x.0[4], y.0[4]), + add_fp(x.0[5], y.0[5]), + ]) +} + +pub fn sub_fp6(x: Fp6, y: Fp6) -> Fp6 { + Fp6([ + sub_fp(x.0[0], y.0[0]), + sub_fp(x.0[1], y.0[1]), + sub_fp(x.0[2], y.0[2]), + sub_fp(x.0[3], y.0[3]), + sub_fp(x.0[4], y.0[4]), + sub_fp(x.0[5], y.0[5]), + ]) +} +/* +Fp6 -> Fp2(c0), c1, c2 + + [c0.c0, c0.c1, c1.c0, c1.c1, c2.c0, c2.c1] + */ +pub fn mul_fp6(x: Fp6, y: Fp6) -> Fp6 { + let c0 = Fp2([x.0[0], x.0[1]]); + let c1 = Fp2([x.0[2], x.0[3]]); + let c2 = Fp2([x.0[4], x.0[5]]); + + let r0 = Fp2([y.0[0], y.0[1]]); + let r1 = Fp2([y.0[2], y.0[3]]); + let r2 = Fp2([y.0[4], y.0[5]]); + + let t0 = c0 * r0; + let t1 = c1 * r1; + let t2 = c2 * r2; + + let t3 = c1 + c2; + let t4 = r1 + r2; + let t5 = t3 * t4; + let t6 = t5 - t1; + let t7 = t6 - t2; + let t8 = t7.mul_by_nonresidue(); + let x = t8 + t0; + + let t9 = c0 + c1; + let t10 = r0 + r1; + let t11 = t9 * t10; + let t12 = t11 - t0; + let t13 = t12 - t1; + let t14 = t2.mul_by_nonresidue(); + let y = t13 + t14; + + let t15 = c0 + c2; + let t16 = r0 + r2; + let t17 = t15 * t16; + let t18 = t17 - t0; + let t19 = t18 - t2; + let z = t19 + t1; + + Fp6([x.0[0], x.0[1], y.0[0], y.0[1], z.0[0], z.0[1]]) +} + +pub fn mul_by_nonresidue(x: [Fp; 6]) -> Fp6 { + let mut ans: [Fp; 6] = [Fp::zero(); 6]; + let c0 = Fp2([x[4], x[5]]).mul_by_nonresidue(); + ans[0] = c0.0[0]; + ans[1] = c0.0[1]; + ans[2] = x[0]; + ans[3] = x[1]; + ans[4] = x[2]; + ans[5] = x[3]; + Fp6(ans) +} + +impl Fp6 { + pub fn multiply_by_01(&self, b0: Fp2, b1: Fp2) -> Self { + let c0 = Fp2(self.0[0..2].to_vec().try_into().unwrap()); + let c1 = Fp2(self.0[2..4].to_vec().try_into().unwrap()); + let c2 = Fp2(self.0[4..6].to_vec().try_into().unwrap()); + + let t0 = c0 * b0; + let t1 = c1 * b1; + + let t2 = c2 * b1; + let t3 = t2.mul_by_nonresidue(); + let x = t3 + t0; + + let t4 = b0 + b1; + let t5 = c0 + c1; + let t6 = t4 * t5; + let t7 = t6 - t0; + let y = t7 - t1; + + let t8 = c2 * b0; + let z = t8 + t1; + Fp6([x.0, y.0, z.0].concat().try_into().unwrap()) + } + + pub fn multiply_by_1(&self, b1: Fp2) -> Self { + let c0 = Fp2(self.0[0..2].to_vec().try_into().unwrap()); + let c1 = Fp2(self.0[2..4].to_vec().try_into().unwrap()); + let c2 = Fp2(self.0[4..6].to_vec().try_into().unwrap()); + + let t0 = c2 * b1; + let x = t0.mul_by_nonresidue(); + + let y = c0 * b1; + + let z = c1 * b1; + Fp6([x.0, y.0, z.0].concat().try_into().unwrap()) + } +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct Fp12(pub(crate) [Fp; 12]); + +impl Fp12 { + pub fn one() -> Fp12 { + let mut x = [Fp::zero(); 12]; + x[0] = Fp::one(); + Fp12(x) + } + + pub fn invert(&self) -> Self { + let c0 = Fp6(self.0[0..6].try_into().unwrap()); + let c1 = Fp6(self.0[6..12].try_into().unwrap()); + let t = (c0 * c0 - mul_by_nonresidue((c1 * c1).0)).invert(); + Fp12([(c0 * t).0, (-(c1 * t)).0].concat().try_into().unwrap()) + } + + pub fn print(&self) { + // println!("--- Printing Fp12 ---"); + // for i in 0..self.0.len() { + // let fp = Fp::get_fp_from_biguint(BigUint::new(self.0[i].0.to_vec())); + // println!("i -- {:?}",fp.to_biguint()); + // } + // println!("--- Printed Fp12 ---"); + } + + pub fn from_str(x: [&str; 12]) -> Self { + let mut ans: Fp12 = Fp12::one(); + for i in 0..12 { + let bu = Fp::get_fp_from_biguint(BigUint::from_str(x[i]).unwrap()); + ans.0[i] = bu; + } + ans + } + + pub fn get_u32_slice(&self) -> [[u32; 12]; 12] { + self.0 + .iter() + .map(|f| f.0) + .collect::>() + .try_into() + .unwrap() + } +} + +impl Add for Fp12 { + type Output = Self; + + fn add(self, rhs: Self) -> Self::Output { + add_fp12(self, rhs) + } +} + +impl Mul for Fp12 { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + mul_fp_12(self, rhs) + } +} + +impl Div for Fp12 { + type Output = Self; + + fn div(self, rhs: Self) -> Self::Output { + self * rhs.invert() + } +} + +impl Neg for Fp12 { + type Output = Self; + + fn neg(self) -> Self::Output { + todo!() + } +} + +// impl Debug for Fp12 { +// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +// f.debug_tuple("Fp12").field(&self.0).finish() +// } +// } + +pub fn add_fp12(x: Fp12, y: Fp12) -> Fp12 { + let mut ans: [Fp; 12] = [Fp::zero(); 12]; + for i in 0..12 { + ans[i] = add_fp(x.0[i], y.0[i]); + } + Fp12(ans) +} + +pub fn mul_fp_12(x: Fp12, y: Fp12) -> Fp12 { + let c0 = Fp6(x.0[0..6].try_into().unwrap()); + let c1 = Fp6(x.0[6..12].try_into().unwrap()); + let r0 = Fp6(y.0[0..6].try_into().unwrap()); + let r1 = Fp6(y.0[6..12].try_into().unwrap()); + + let t0 = c0 * r0; + let t1 = c1 * r1; + let t2 = mul_by_nonresidue(t1.0); + let x = t0 + t2; + + let t3 = c0 + c1; + let t4 = r0 + r1; + let t5 = t3 * t4; + let t6 = t5 - t0; + let y = t6 - t1; + + Fp12([x.0, y.0].concat().try_into().unwrap()) +} + +pub trait Pow +where + Self: Copy + Mul, +{ + fn pow(&self, one: Self, exp: BigUint) -> Self { + if exp == 0u32.into() { + return one; + } + if exp == 1u32.into() { + return *self; + } + if exp.clone() % 2u32 == 1u32.into() { + return *self * self.pow(one, exp - 1u32); + } + let d = self.pow(one, exp >> 1); + d * d + } +} + +impl Pow for T where T: Copy + Mul {} + +impl Fp2 { + pub fn forbenius_coefficients() -> [Fp; 2] { + [ + Fp::get_fp_from_biguint(BigUint::from_str("1").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559786").unwrap()), + ] + } + pub fn forbenius_map(&self, pow: usize) -> Self { + let constants = Fp2::forbenius_coefficients(); + Fp2([self.0[0], self.0[1] * constants[pow % 2]]) + } +} + +impl Fp6 { + pub fn forbenius_coefficients_1() -> [Fp2; 6] { + [ + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("1").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939436").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620350").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("1").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620350").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939436").unwrap()), + ]), + ] + } + + pub fn forbenius_coefficients_2() -> [Fp2; 6] { + [ + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("1").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939437").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939436").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559786").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620350").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620351").unwrap()), + ]), + ] + } + pub fn forbenius_map(&self, pow: usize) -> Self { + // println!("--- fp6 forbenius map ---"); + let fp6_frobenius_coefficients_1 = Fp6::forbenius_coefficients_1(); + + let fp6_frobenius_coefficients_2 = Fp6::forbenius_coefficients_2(); + self.print(); + let c0 = Fp2(self.0[0..2].to_vec().try_into().unwrap()); + // println!("c0 {:?}", c0.to_biguint()); + let c1 = Fp2(self.0[2..4].to_vec().try_into().unwrap()); + // println!("c1 {:?}", c0.to_biguint()); + let c2 = Fp2(self.0[4..6].to_vec().try_into().unwrap()); + // println!("c2 {:?}", c0.to_biguint()); + // println!("--- fp6 forbenius map ---"); + Fp6([ + c0.forbenius_map(pow).0, + (c1.forbenius_map(pow) * fp6_frobenius_coefficients_1[pow % 6]).0, + (c2.forbenius_map(pow) * fp6_frobenius_coefficients_2[pow % 6]).0, + ] + .concat() + .try_into() + .unwrap()) + } +} + +impl Fp12 { + pub fn forbenius_coefficients() -> [Fp2; 12] { + [ + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("1").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("3850754370037169011952147076051364057158807420970682438676050522613628423219637725072182697113062777891589506424760").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("151655185184498381465642749684540099398075398968325446656007613510403227271200139370504932015952886146304766135027").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620351").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("2973677408986561043442465346520108879172042883009249989176415018091420807192182638567116318576472649347015917690530").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("1028732146235106349975324479215795277384839936929757896155643118032610843298655225875571310552543014690878354869257").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620350").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("3125332594171059424908108096204648978570118281977575435832422631601824034463382777937621250592425535493320683825557").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("877076961050607968509681729531255177986764537961432449499635504522207616027455086505066378536590128544573588734230").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559786").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("151655185184498381465642749684540099398075398968325446656007613510403227271200139370504932015952886146304766135027").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("3850754370037169011952147076051364057158807420970682438676050522613628423219637725072182697113062777891589506424760").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939436").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("1028732146235106349975324479215795277384839936929757896155643118032610843298655225875571310552543014690878354869257").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("2973677408986561043442465346520108879172042883009249989176415018091420807192182638567116318576472649347015917690530").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939437").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("877076961050607968509681729531255177986764537961432449499635504522207616027455086505066378536590128544573588734230").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("3125332594171059424908108096204648978570118281977575435832422631601824034463382777937621250592425535493320683825557").unwrap()), + ]), + ] + } + + pub fn forbenius_map(&self, pow: usize) -> Self { + // println!(" ---- forbenius - map -----"); + let fp12_forbenius_coefficients = Fp12::forbenius_coefficients(); + let r0 = Fp6(self.0[0..6].to_vec().try_into().unwrap()).forbenius_map(pow); + r0.print(); + let c0c1c2 = Fp6(self.0[6..12].to_vec().try_into().unwrap()).forbenius_map(pow); + c0c1c2.print(); + let c0 = Fp2(c0c1c2.0[0..2].to_vec().try_into().unwrap()); + // println!("c0 - {:?}", c0.to_biguint()); + let c1 = Fp2(c0c1c2.0[2..4].to_vec().try_into().unwrap()); + // println!("c1 - {:?}", c1.to_biguint()); + let c2 = Fp2(c0c1c2.0[4..6].to_vec().try_into().unwrap()); + // println!("c2 - {:?}", c2.to_biguint()); + let coeff = fp12_forbenius_coefficients[pow % 12]; + // println!("coeff - {:?}", coeff.to_biguint()); + Fp12( + [ + r0.0, + [(c0 * coeff).0, (c1 * coeff).0, (c2 * coeff).0] + .concat() + .try_into() + .unwrap(), + ] + .concat() + .try_into() + .unwrap(), + ) + } +} + +impl Fp12 { + pub fn multiply_by_014(&self, o0: Fp2, o1: Fp2, o4: Fp2) -> Self { + let c0 = Fp6(self.0[0..6].to_vec().try_into().unwrap()); + let c1 = Fp6(self.0[6..12].to_vec().try_into().unwrap()); + let t0 = c0.multiply_by_01(o0, o1); + let t1 = c1.multiply_by_1(o4); + let t2 = mul_by_nonresidue(t1.0); + let x = t2 + t0; + + let t3 = c1 + c0; + let t4 = o1 + o4; + let t5 = t3.multiply_by_01(o0, t4); + let t6 = t5 - t0; + let y = t6 - t1; + Fp12([x.0, y.0].concat().try_into().unwrap()) + } + + pub fn conjugate(&self) -> Self { + let mut x = self.0.clone(); + for i in 6..12 { + x[i] = -x[i]; + } + Fp12(x) + } + + pub fn cyclotomic_square(&self) -> Self { + let two = Fp::get_fp_from_biguint(BigUint::from(2 as u32)); + + let c0c0 = Fp2(self.0[0..2].try_into().unwrap()); + let c0c1 = Fp2(self.0[2..4].try_into().unwrap()); + let c0c2 = Fp2(self.0[4..6].try_into().unwrap()); + let c1c0 = Fp2(self.0[6..8].try_into().unwrap()); + let c1c1 = Fp2(self.0[8..10].try_into().unwrap()); + let c1c2 = Fp2(self.0[10..12].try_into().unwrap()); + + let t0 = fp4_square(c0c0, c1c1); + let t1 = fp4_square(c1c0, c0c2); + let t2 = fp4_square(c0c1, c1c2); + let t3 = t2.1.mul_by_nonresidue(); + + let t4 = t0.0 - c0c0; + let t5 = t4 * two; + let c0 = t5 + t0.0; + + let t6 = t1.0 - c0c1; + let t7 = t6 * two; + let c1 = t7 + t1.0; + + let t8 = t2.0 - c0c2; + let t9 = t8 * two; + let c2 = t9 + t2.0; + + let t10 = t3 + c1c0; + let t11 = t10 * two; + let c3 = t11 + t3; + + let t12 = t0.1 + c1c1; + let t13 = t12 * two; + let c4 = t13 + t0.1; + + let t14 = t1.1 + c1c2; + let t15 = t14 * two; + let c5 = t15 + t1.1; + + Fp12( + [c0.0, c1.0, c2.0, c3.0, c4.0, c5.0] + .concat() + .try_into() + .unwrap(), + ) + } + + pub fn cyclotocmic_exponent(&self) -> Fp12 { + let mut z = Fp12::one(); + for i in (0..get_bls_12_381_parameter().bits()).rev() { + z = z.cyclotomic_square(); + if get_bls_12_381_parameter().bit(i) { + z = z * self.clone(); + } + } + z + } + + pub fn final_exponentiate(&self) -> Self { + let t_0 = self.forbenius_map(6); + let t_1 = t_0 / self.clone(); + let t_2 = t_1.forbenius_map(2); + let t_3 = t_2 * t_1; + let t_4 = t_3.cyclotocmic_exponent(); + let t_5 = t_4.conjugate(); + let t_6 = t_3.cyclotomic_square(); + let t_7 = t_6.conjugate(); + let t_8 = t_7 * t_5; + let t_9 = t_8.cyclotocmic_exponent(); + let t_10 = t_9.conjugate(); + let t_11 = t_10.cyclotocmic_exponent(); + let t_12 = t_11.conjugate(); + let t_13 = t_12.cyclotocmic_exponent(); + let t_14 = t_13.conjugate(); + let t_15 = t_5.cyclotomic_square(); + let t_16 = t_14 * t_15; + let t_17 = t_16.cyclotocmic_exponent(); + let t_18 = t_17.conjugate(); + let t_19 = t_5 * t_12; + let t_20 = t_19.forbenius_map(2); + let t_21 = t_10 * t_3; + let t_22 = t_21.forbenius_map(3); + let t_23 = t_3.conjugate(); + let t_24 = t_16 * t_23; + let t_25 = t_24.forbenius_map(1); + let t_26 = t_8.conjugate(); + let t_27 = t_18 * t_26; + let t_28 = t_27 * t_3; + let t_29 = t_20 * t_22; + let t_30 = t_29 * t_25; + let t_31 = t_30 * t_28; + t_31 + } +} + +pub fn inverse_fp2(x: Fp2) -> Fp2 { + let t0 = x.0[0] * x.0[0]; + let t1 = x.0[1] * x.0[1]; + let t2 = t0 - (t1 * Fp2::non_residue()); + let t3 = Fp::one() / t2; + Fp2([x.0[0] * t3, -(x.0[1] * t3)]) +} + +pub fn calc_pairing_precomp(x: Fp2, y: Fp2, z: Fp2) -> Vec<[Fp2; 3]> { + let ax = x * (z.invert()); + let ay = y * (z.invert()); + + let qx = ax.clone(); + let qy = ay.clone(); + let qz = Fp2::one(); + + let mut rx = qx.clone(); + let mut ry = qy.clone(); + let mut rz = qz.clone(); + + let mut ell_coeff: Vec<[Fp2; 3]> = Vec::<[Fp2; 3]>::new(); + + for i in (0..get_bls_12_381_parameter().bits() - 1).rev() { + let t0 = ry * ry; + let t1 = rz * rz; + let x0 = t1.mul(Fp::get_fp_from_biguint(BigUint::from(3 as u32))); + + let t2 = x0.multiply_by_b(); + let t3 = t2.mul(Fp::get_fp_from_biguint(BigUint::from(3 as u32))); + let x1 = ry * rz; + let t4 = x1.mul(Fp::get_fp_from_biguint(BigUint::from(2 as u32))); + let x2 = t2 - t0; + let x3 = rx * rx; + let x4 = x3.mul(Fp::get_fp_from_biguint(BigUint::from(3 as u32))); + + let x5 = -t4; + ell_coeff.push([x2, x4, x5]); + + let k = mod_inverse(BigUint::from(2 as u32), modulus()); + + let x6 = t0 - t3; + let x7 = rx * ry; + let x8 = x6 * x7; + + let x9 = t0 + t3; + let x10 = x9 * Fp::get_fp_from_biguint(k.clone()); + let x11 = x10 * x10; + + let x12 = t2 * t2; + let x13 = x12 * Fp::get_fp_from_biguint(BigUint::from(3 as u32)); + + rx = x8 * Fp::get_fp_from_biguint(k.clone()); + ry = x11 - x13; + rz = t0 * t4; + if get_bls_12_381_parameter().bit(i) { + let bit1_t0 = qy * rz; + let bit1_t1 = ry - bit1_t0; + let bit1_t2 = qx * rz; + let bit1_t3 = rx - bit1_t2; + let bit1_t4 = bit1_t1 * qx; + let bit1_t5 = bit1_t3 * qy; + let bit1_t6 = bit1_t4 - bit1_t5; + let bit1_t7 = -bit1_t1; + ell_coeff.push([bit1_t6, bit1_t7, bit1_t3]); + let bit1_t8 = bit1_t3 * bit1_t3; + let bit1_t9 = bit1_t8 * bit1_t3; + let bit1_t10 = bit1_t8 * rx; + let bit1_t11 = bit1_t1 * bit1_t1; + let bit1_t12 = bit1_t11 * rz; + let bit1_t13 = bit1_t10 * Fp::get_fp_from_biguint(BigUint::from(2 as u32)); + let bit1_t14 = bit1_t9 - bit1_t13; + let bit1_t15 = bit1_t14 + bit1_t12; + rx = bit1_t3 * bit1_t15; + let bit1_t16 = bit1_t10 - bit1_t15; + let bit1_t17 = bit1_t16 * bit1_t1; + let bit1_t18 = bit1_t9 * ry; + ry = bit1_t17 - bit1_t18; + rz = rz * bit1_t9; + } + } + return ell_coeff; +} + +pub fn miller_loop(g1_x: Fp, g1_y: Fp, g2_x: Fp2, g2_y: Fp2, g2_z: Fp2) -> Fp12 { + let precomputes = calc_pairing_precomp(g2_x, g2_y, g2_z); + // for i in 0..precomputes.len() { + // println!("{:?} ----", i); + // println!("precomputes calculated 1 - {:?}", precomputes[i][0].to_biguint()); + // println!("precomputes calculated 2 - {:?}", precomputes[i][1].to_biguint()); + // println!("precomputes calculated 3 - {:?}", precomputes[i][2].to_biguint()); + // } + // return Fp12::one(); + let px = g1_x.clone(); + let py = g1_y.clone(); + let mut f12 = Fp12::one(); + let mut j = 0; + + for i in (0..get_bls_12_381_parameter().bits() - 1).rev() { + let ell_coeffs = precomputes[j]; + f12 = f12.multiply_by_014(ell_coeffs[0], ell_coeffs[1] * px, ell_coeffs[2] * py); + if get_bls_12_381_parameter().bit(i) { + j += 1; + let ell_coeffs = precomputes[j]; + f12 = f12.multiply_by_014(ell_coeffs[0], ell_coeffs[1] * px, ell_coeffs[2] * py); + } + if i != 0 { + f12 = mul_fp_12(f12, f12); + } + j += 1; + } + f12.conjugate() +} + +pub fn pairing(p_x: Fp, p_y: Fp, q_x: Fp2, q_y: Fp2, q_z: Fp2) -> Fp12 { + let looped = miller_loop(p_x, p_y, q_x, q_y, q_z); + looped + // looped.final_exponentiate() +} + +pub fn verify_bls_signatures() -> bool { + // Public key + // Splits into little endian + let pk_x = BigUint::from_str("2620359726099670991095913421423408052907220385587653382880494211997835858894431070728023161812841650498384724513574").unwrap().to_u32_digits(); + let pk_y = BigUint::from_str("3516737663249789719313994746945990853755171862112391852604784999536233979171013701039178918880615112139780777770781").unwrap().to_u32_digits(); + // Hashed message in g2 + let hm_x1 = BigUint::from_str("2260803321181951703309420903406460477209912434020120381027413359130883713514969717876465885091628521232768207917010").unwrap().to_u32_digits(); + let hm_x2 = BigUint::from_str("2651754974217764549573984422821173864573267897233450902768900290919635595830847280035238812354259899816422437732519").unwrap().to_u32_digits(); + let hm_y1 = BigUint::from_str("98328085801950751198634977711657076320088798571641012335466428770177401024922163125657710674003178075431656844523").unwrap().to_u32_digits(); + let hm_y2 = BigUint::from_str("1156585784149709375944843577113354173925120574246839648967751052400396372157500751188298724114933365921247443786825").unwrap().to_u32_digits(); + let hm_z1 = BigUint::from_str("1").unwrap().to_u32_digits(); + let hm_z2 = BigUint::from_str("0").unwrap().to_u32_digits(); + // Generator + let gx = BigUint::from_str("3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507").unwrap().to_u32_digits(); + let gy = BigUint::from_str("1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569").unwrap().to_u32_digits(); + // Signature + let s_x1 = BigUint::from_str("1836830352577417292089156350591626007357750969609299199820146458689304398967104037069103513169938118550765216427090").unwrap().to_u32_digits(); + let s_x2 = BigUint::from_str("2100427494885604888487796981102940167438916035063712025295231442815788486916593575072180414962669967540847907858502").unwrap().to_u32_digits(); + let s_y1 = BigUint::from_str("2555154678035007654633840738122526356989849358171638629627190730328888205299908476410927833296830659413727831906911").unwrap().to_u32_digits(); + let s_y2 = BigUint::from_str("697448450483092846649680958149948400499140883635140106996999493850809967308993531752440334328367413010709405099565").unwrap().to_u32_digits(); + let s_z1 = BigUint::from_str("1").unwrap().to_u32_digits(); + let s_z2 = BigUint::from_str("0").unwrap().to_u32_digits(); + + // 1. negate Signature + let pk_x_negate = pk_x.clone(); + let pk_y_negate = (modulus() - BigUint::new(pk_y)).to_u32_digits(); + + let pk_x_neg_fp = Fp::get_fp_from_biguint(BigUint::new(pk_x_negate)); + let pk_y_neg_fp = Fp::get_fp_from_biguint(BigUint::new(pk_y_negate)); + + let hmx_fp2 = Fp2([ + Fp::get_fp_from_biguint(BigUint::new(hm_x1)), + Fp::get_fp_from_biguint(BigUint::new(hm_x2)), + ]); + let hmy_fp2 = Fp2([ + Fp::get_fp_from_biguint(BigUint::new(hm_y1)), + Fp::get_fp_from_biguint(BigUint::new(hm_y2)), + ]); + let hmz_fp2 = Fp2([ + Fp::get_fp_from_biguint(BigUint::new(hm_z1)), + Fp::get_fp_from_biguint(BigUint::new(hm_z2)), + ]); + + let sx_fp2 = Fp2([ + Fp::get_fp_from_biguint(BigUint::new(s_x1)), + Fp::get_fp_from_biguint(BigUint::new(s_x2)), + ]); + let sy_fp2 = Fp2([ + Fp::get_fp_from_biguint(BigUint::new(s_y1)), + Fp::get_fp_from_biguint(BigUint::new(s_y2)), + ]); + let sz_fp2 = Fp2([ + Fp::get_fp_from_biguint(BigUint::new(s_z1)), + Fp::get_fp_from_biguint(BigUint::new(s_z2)), + ]); + + let g_x = Fp::get_fp_from_biguint(BigUint::new(gx)); + let g_y = Fp::get_fp_from_biguint(BigUint::new(gy)); + // 2. P(pk_negate, Hm) + let e_p_hm = pairing(pk_x_neg_fp, pk_y_neg_fp, hmx_fp2, hmy_fp2, hmz_fp2); + let e_g_s = pairing(g_x, g_y, sx_fp2, sy_fp2, sz_fp2); + + let mu = e_p_hm * e_g_s; + + let mu_finaexp = mu.final_exponentiate(); + + mu_finaexp == Fp12::one() +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use num_bigint::BigUint; + use crate::verification::utils::native_bls::sub_u32_slices_12; + + use super::{get_u32_vec_from_literal, modulus, verify_bls_signatures, Fp12}; + + #[test] + pub fn test_bls_signature_verification() { + assert!(verify_bls_signatures()); + } + + #[test] + pub fn test_final_exponentiate() { + let aa = ["2181142506194812233868097821779361009807326315828153071050324314717744521676711650071190927260282422014627435089208", + "3266212670671256779826008414922395966600400122723332695666308996296105595418386213353825620535446475769829785237189", + "3280330655787598118299804758957910379684134784964426565939861302675766948066521588562898980898245868682162153155911", + "333668007718210311816046938245689395232794221928183840372182128979685996722059498232053963662509478803385469716056", + "1650925102445293819378017648160637800280351377141029658990698964033732511884552459036333864590686008335846481856882", + "3925133212240632255860280854235945320282874550806663137653784505923891479863770370026712801361887427462376126696706", + "2444089052091192833501409081021321360112867893942837175254954622703299880931587618210267154453853513743076365662283", + "3142914221549818039420055870398197863502329018278548609868118001898418737390067291084903575823960349378631910285921", + "1952057563719092278028425573632201081234877258097927010867141683896274170520489868686437644804596724295624637397077", + "254131389529427774765960554324483250584297364987873642087841623909520980093766889928789173976296059957431962608694", + "1385128161651935856764061834929068245137081648283968377947672499160305921464670953157912428887005620142387465559867", + "101302147352745188522496764263445345397483945567997375025250825330209385517139484882425580831299520200841767383756"]; + + let aa_fp12 = Fp12::from_str(aa); + let mu_finaexp = aa_fp12.final_exponentiate(); + mu_finaexp.print(); + assert_eq!(mu_finaexp, Fp12::one()) + } + + #[test] + fn test_subu32() { + let x: BigUint = BigUint::from_str("1").unwrap() << 381; + let y = modulus(); + let x_u32 = get_u32_vec_from_literal(x.clone()); + let y_u32 = get_u32_vec_from_literal(y.clone()); + let (res, _carries) = sub_u32_slices_12(&x_u32, &y_u32); + assert_eq!(x - y, BigUint::new(res.to_vec())); + } +} diff --git a/casper-finality-proofs/src/verification/utils/starky_utils.rs b/casper-finality-proofs/src/verification/utils/starky_utils.rs new file mode 100644 index 000000000..beff595c3 --- /dev/null +++ b/casper-finality-proofs/src/verification/utils/starky_utils.rs @@ -0,0 +1,35 @@ +use plonky2::{field::extension::Extendable, hash::hash_types::RichField}; + +pub fn assign_u32_12, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + row: usize, + start_col: usize, + val: [u32; 12], +) { + for i in 0..12 { + trace[row][start_col + i] = F::from_canonical_u32(val[i]); + } +} + +pub fn assign_u32_in_series, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + row: usize, + start_col: usize, + val: &[u32], +) { + for i in 0..val.len() { + trace[row][start_col + i] = F::from_canonical_u32(val[i]); + } +} + +pub fn assign_cols_from_prev, const D: usize, const C: usize>( + trace: &mut Vec<[F; C]>, + row: usize, + start_col: usize, + num_cols: usize, +) { + assert!(row >= 1); + for i in start_col..start_col + num_cols { + trace[row][start_col + i] = trace[row - 1][start_col + i]; + } +} diff --git a/casper-finality-proofs/src/verification/verify.rs b/casper-finality-proofs/src/verification/verify.rs new file mode 100644 index 000000000..83073f03a --- /dev/null +++ b/casper-finality-proofs/src/verification/verify.rs @@ -0,0 +1,490 @@ +use std::str::FromStr; + +use ark_bls12_381::G2Affine; +use num_bigint::BigUint; +use plonky2::{ + field::goldilocks_field::GoldilocksField, + plonk::config::{GenericConfig, PoseidonGoldilocksConfig}, +}; +use plonky2x::{ + backend::circuit::PlonkParameters, + frontend::{ + builder::CircuitBuilder, + uint::num::biguint::{BigUintTarget, CircuitBuilderBiguint}, + vars::ByteVariable, + }, +}; + +use crate::verification::{ + curves::{g1::PointG1Target, g2::PointG2Target}, + proofs::{ + final_exponentiate::FinalExponentiateStark, + miller_loop::MillerLoopStark, + proofs::{ + ec_aggregate_main, final_exponentiate_main, miller_loop_main, recursive_proof, + ProofTuple, + }, + }, + utils::native_bls::{calc_pairing_precomp, Fp, Fp12, Fp2}, +}; + +use super::{aggregation::hash_to_curve::hash_to_curve, proofs::ecc_aggregate::ECCAggStark}; + +const D: usize = 2; +type C = PoseidonGoldilocksConfig; +type F = >::F; + +type MlStark = MillerLoopStark; +type FeStark = FinalExponentiateStark; +type ECAggStark = ECCAggStark; + +pub fn verify_pubkeys_aggregation( + points: Vec<[Fp; 2]>, + res: [Fp; 2], + bits: Vec, +) -> ProofTuple { + let (stark_ec, proof_ec, config_ec) = ec_aggregate_main::(points, res, bits.clone()); + let recursive_ec = + recursive_proof::, C, D>(stark_ec, proof_ec, &config_ec, true); + + recursive_ec +} + +pub fn verify_miller_loop(x: Fp, y: Fp, q_x: Fp2, q_y: Fp2, q_z: Fp2) -> ProofTuple { + let (stark_ml, proof_ml, config_ml) = miller_loop_main::(x, y, q_x, q_y, q_z); + let recursive_ml = recursive_proof::(stark_ml, proof_ml, &config_ml, true); + + recursive_ml +} + +pub fn verify_final_exponentiation(f: Fp12) -> ProofTuple { + let (stark_final_exp, proof_final_exp, config_final_exp) = + final_exponentiate_main::(f); + let recursive_final_exp = recursive_proof::( + stark_final_exp, + proof_final_exp, + &config_final_exp, + true, + ); + + recursive_final_exp +} + +fn fp12_as_biguint_target, const D: usize>( + builder: &mut CircuitBuilder, + f_inputs: Vec, + i: usize, +) -> Vec { + let mut f = Vec::new(); + let mut i = i; + for _ in 0..12 { + f.push(builder.api.constant_biguint(&BigUint::new( + f_inputs[i..i + 12].iter().map(|x| x.0 as u32).collect(), + ))); + i += 12; + } + + f +} + +fn fp12_as_fp_limbs(f_inputs: Vec, i: usize) -> Vec { + let mut f = Vec::new(); + let mut i = i; + for _ in 0..12 { + f.push(Fp::get_fp_from_biguint(BigUint::new( + f_inputs[i..i + 12].iter().map(|x| x.0 as u32).collect(), + ))); + i += 12; + } + + f +} + +fn vec_limbs_to_fixed_array(v: Vec) -> [T; N] { + v.try_into() + .unwrap_or_else(|v: Vec| panic!("Expected a Vec of length {} but it was {}", N, v.len())) +} + +pub fn calc_ell_coeffs_and_generate_g2_point, const D: usize>( + builder: &mut CircuitBuilder, + g2_point: G2Affine, +) -> PointG2Target { + let ell_coeffs = calc_pairing_precomp( + Fp2([ + Fp::get_fp_from_biguint(g2_point.x.c0.to_string().parse::().unwrap()), + Fp::get_fp_from_biguint(g2_point.x.c1.to_string().parse::().unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(g2_point.y.c0.to_string().parse::().unwrap()), + Fp::get_fp_from_biguint(g2_point.y.c1.to_string().parse::().unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("1").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + ); + + [ + [ + builder + .api + .constant_biguint(&ell_coeffs[0][0].0[0].to_biguint()), + builder + .api + .constant_biguint(&ell_coeffs[0][0].0[1].to_biguint()), + ], + [ + builder + .api + .constant_biguint(&ell_coeffs[0][1].0[0].to_biguint()), + builder + .api + .constant_biguint(&ell_coeffs[0][1].0[1].to_biguint()), + ], + ] +} + +pub fn verify_bls_signatures( + builder: &mut CircuitBuilder, D>, + first_ml_proof: ProofTuple, + second_ml_proof: ProofTuple, + g1_generator: &PointG1Target, + signature: &PointG2Target, + public_key: &PointG1Target, + hm_g2: &[ByteVariable], +) { + let hm_g2 = hash_to_curve(builder, hm_g2); + let first_ml_pub_inputs = first_ml_proof.0.public_inputs; + let second_ml_pub_inputs = second_ml_proof.0.public_inputs; + + // FIRST MILLER LOOP + let g1_x_input = builder.api.constant_biguint(&BigUint::new( + first_ml_pub_inputs[0..12] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + let g1_y_input = builder.api.constant_biguint(&BigUint::new( + first_ml_pub_inputs[12..24] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + + let g2_x_input_c0 = builder.api.constant_biguint(&BigUint::new( + first_ml_pub_inputs[24..36] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + let g2_x_input_c1 = builder.api.constant_biguint(&BigUint::new( + first_ml_pub_inputs[36..48] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + let g2_y_input_c0 = builder.api.constant_biguint(&BigUint::new( + first_ml_pub_inputs[48..60] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + let g2_y_input_c1 = builder.api.constant_biguint(&BigUint::new( + first_ml_pub_inputs[60..72] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + + builder.api.connect_biguint(&g1_generator[0], &g1_x_input); + builder.api.connect_biguint(&g1_generator[1], &g1_y_input); + + builder + .api + .connect_biguint(&signature[0][0], &g2_x_input_c0); + builder + .api + .connect_biguint(&signature[0][1], &g2_x_input_c1); + builder + .api + .connect_biguint(&signature[1][0], &g2_y_input_c0); + builder + .api + .connect_biguint(&signature[1][1], &g2_y_input_c1); + + let first_ml_r = fp12_as_fp_limbs(first_ml_pub_inputs, 4920); + let (_, proof_final_exp, _) = final_exponentiate_main::(Fp12( + vec_limbs_to_fixed_array::(first_ml_r.clone()), + )); + let first_fin_exp_pub_inputs = proof_final_exp.public_inputs; + let first_fin_exp_pub_inputs = fp12_as_biguint_target(builder, first_fin_exp_pub_inputs, 144); + + // SECOND MILLER LOOP + let g1_x_input = builder.api.constant_biguint(&BigUint::new( + second_ml_pub_inputs[0..12] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + let g1_y_input = builder.api.constant_biguint(&BigUint::new( + second_ml_pub_inputs[12..24] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + + let g2_x_input_c0 = builder.api.constant_biguint(&BigUint::new( + second_ml_pub_inputs[24..36] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + let g2_x_input_c1 = builder.api.constant_biguint(&BigUint::new( + second_ml_pub_inputs[36..48] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + let g2_y_input_c0 = builder.api.constant_biguint(&BigUint::new( + second_ml_pub_inputs[48..60] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + let g2_y_input_c1 = builder.api.constant_biguint(&BigUint::new( + second_ml_pub_inputs[60..72] + .iter() + .map(|x| x.0 as u32) + .collect(), + )); + + builder.api.connect_biguint(&public_key[0], &g1_x_input); + builder.api.connect_biguint(&public_key[1], &g1_y_input); + + builder.api.connect_biguint(&hm_g2[0][0], &g2_x_input_c0); + builder.api.connect_biguint(&hm_g2[0][1], &g2_x_input_c1); + builder.api.connect_biguint(&hm_g2[1][0], &g2_y_input_c0); + builder.api.connect_biguint(&hm_g2[1][1], &g2_y_input_c1); + + let second_ml_r = fp12_as_fp_limbs(second_ml_pub_inputs.clone(), 4920); + + let (_, proof_final_exp, _) = + final_exponentiate_main::(Fp12(vec_limbs_to_fixed_array::(second_ml_r))); + let second_fin_exp_pub_inputs = proof_final_exp.public_inputs; + let second_fin_exp_pub_inputs = fp12_as_biguint_target(builder, second_fin_exp_pub_inputs, 144); + + for i in 0..12 { + builder + .api + .connect_biguint(&first_fin_exp_pub_inputs[i], &second_fin_exp_pub_inputs[i]); + } +} + +#[cfg(test)] +mod tests { + use std::{str::FromStr, time::Instant}; + + use ark_bls12_381::{Fr, G1Affine, G2Affine}; + use ark_ec::AffineRepr; + use ark_std::UniformRand; + use num_bigint::BigUint; + use plonky2::{ + field::{goldilocks_field::GoldilocksField, types::Field}, + iop::witness::PartialWitness, + plonk::config::{GenericConfig, PoseidonGoldilocksConfig}, + }; + use plonky2x::frontend::{ + builder::DefaultBuilder, uint::num::biguint::CircuitBuilderBiguint, vars::ByteVariable, + }; + + use super::{calc_ell_coeffs_and_generate_g2_point, verify_pubkeys_aggregation}; + use crate::verification::{ + curves::{ + g1::{g1_ecc_aggregate, PointG1Target}, + g2::PointG2Target, + }, + proofs::miller_loop::MillerLoopStark, + utils::native_bls::{Fp, Fp2}, + verify::verify_bls_signatures, + }; + + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + type _MlStark = MillerLoopStark; + + use super::verify_miller_loop; + + #[test] + fn test_verify_proofs() { + use jemallocator::Jemalloc; + + #[global_allocator] + static GLOBAL: Jemalloc = Jemalloc; + let mut builder = DefaultBuilder::new(); + + /* Test purposes */ + let rng = &mut ark_std::rand::thread_rng(); + let g1 = G1Affine::generator(); + let sk: Fr = Fr::rand(rng); + let pk = Into::::into(g1 * sk); + let message = G2Affine::rand(rng); + let signature = Into::::into(message * sk); + /* Test purposes */ + let first_ml_proof = verify_miller_loop( + Fp::get_fp_from_biguint(g1.x.to_string().parse::().unwrap()), + Fp::get_fp_from_biguint(g1.y.to_string().parse::().unwrap()), + Fp2([ + Fp::get_fp_from_biguint(signature.x.c0.to_string().parse::().unwrap()), + Fp::get_fp_from_biguint(signature.x.c1.to_string().parse::().unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(signature.y.c0.to_string().parse::().unwrap()), + Fp::get_fp_from_biguint(signature.y.c1.to_string().parse::().unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("1").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + ); + + let second_ml_proof = verify_miller_loop( + Fp::get_fp_from_biguint(pk.x.to_string().parse::().unwrap()), + Fp::get_fp_from_biguint(pk.y.to_string().parse::().unwrap()), + Fp2([ + Fp::get_fp_from_biguint(message.x.c0.to_string().parse::().unwrap()), + Fp::get_fp_from_biguint(message.x.c1.to_string().parse::().unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(message.y.c0.to_string().parse::().unwrap()), + Fp::get_fp_from_biguint(message.y.c1.to_string().parse::().unwrap()), + ]), + Fp2([ + Fp::get_fp_from_biguint(BigUint::from_str("1").unwrap()), + Fp::get_fp_from_biguint(BigUint::from_str("0").unwrap()), + ]), + ); + + // G1 GENERATOR POINT + let g1_generator: PointG1Target = [ + builder + .api + .constant_biguint(&g1.x.to_string().parse::().unwrap()), + builder + .api + .constant_biguint(&g1.y.to_string().parse::().unwrap()), + ]; + + // SIGNATURE + let signature: PointG2Target = + calc_ell_coeffs_and_generate_g2_point(&mut builder, signature); + + // PUBLIC KEY + let public_key: PointG1Target = [ + builder + .api + .constant_biguint(&pk.x.to_string().parse::().unwrap()), + builder + .api + .constant_biguint(&pk.y.to_string().parse::().unwrap()), + ]; + + // MESSAGE + let message: PointG2Target = calc_ell_coeffs_and_generate_g2_point(&mut builder, message); + + // MESSAGE in bytes + let message = [ + 103, 140, 163, 210, 238, 252, 75, 8, 227, 27, 60, 229, 125, 150, 241, 222, 217, 156, + 178, 17, 14, 199, 15, 172, 94, 179, 249, 0, 197, 206, 104, 200, 165, 253, 55, 147, 171, + 191, 118, 189, 133, 138, 2, 22, 237, 6, 62, 10, 68, 105, 208, 102, 66, 70, 170, 114, + 194, 80, 215, 5, 63, 95, 202, 1, 99, 153, 67, 115, 7, 122, 235, 255, 142, 44, 3, 65, + 190, 166, 218, 72, 230, 196, 24, 88, 146, 193, 211, 90, 37, 173, 71, 152, 21, 226, 89, + 79, 239, 81, 149, 135, 188, 51, 52, 116, 26, 30, 126, 31, 35, 240, 201, 101, 33, 61, + 220, 192, 86, 47, 214, 243, 224, 136, 50, 56, 42, 233, 148, 244, 203, 198, 195, 120, + 36, 221, 181, 53, 160, 58, 167, 131, 216, 183, 83, 232, 151, 87, 46, 54, 128, 123, 231, + 212, 130, 19, 28, 96, 108, 111, 137, 154, 40, 184, 74, 69, 100, 64, 177, 98, 248, 32, + 12, 97, 49, 187, 39, 159, 168, 247, 29, 246, 209, 110, 77, 73, 20, 23, 174, 143, 93, + 92, 162, 48, 134, 119, 213, 139, 234, 205, 91, 113, 204, 121, 57, 4, 41, 180, 144, 76, + 107, 59, 176, 43, 11, 127, 34, 38, 164, 9, 141, 78, 245, 175, 145, 112, 129, 109, 18, + 250, 85, 16, 124, 182, 242, 158, 84, 219, 13, 207, 186, 82, 157, 132, 225, 236, 45, + 185, 228, 161, 169, 106, 25, 155, 251, 254, 223, + ] + .iter() + .map(|b| { + let b_v = builder.constant(GoldilocksField::from_canonical_u8(*b)); + ByteVariable::from_variable(&mut builder, b_v) + }) + .collect::>(); + + verify_bls_signatures( + &mut builder, + first_ml_proof, + second_ml_proof, + &g1_generator, + &signature, + &public_key, + &message, + ); + + // Build your circuit. + let circuit = builder.build(); + + // Write to the circuit input. + let input = circuit.input(); + + let s = Instant::now(); + // Generate a proof. + let (proof, output) = circuit.prove(&input); + println!("Time to generate a proof {:?}", s.elapsed()); + // Verify proof. + let s = Instant::now(); + circuit.verify(&proof, &input, &output); + println!("Time to verify proof {:?}", s.elapsed()); + } + + #[test] + fn test_pubkeys_aggregation() { + let circuit_config = + plonky2::plonk::circuit_data::CircuitConfig::standard_recursion_config(); + let mut builder = + plonky2::plonk::circuit_builder::CircuitBuilder::::new(circuit_config); + + let a_bigu = BigUint::from_str( + "1216495682195235861952885506871698490232894470117269383940381148575524314493849307811227440691167647909822763414941" + ).unwrap(); + let b_bigu = BigUint::from_str( + "2153848155426317245700560287567131132765685008362732985860101000686875894603366983854567186180519945327668975076337" + ).unwrap(); + + let a_fp = Fp::get_fp_from_biguint(a_bigu.clone()); + let b_fp = Fp::get_fp_from_biguint(b_bigu.clone()); + + let a_bigu_t = builder.constant_biguint(&a_bigu); + let b_bigu_t = builder.constant_biguint(&b_bigu); + + let ec_proof = verify_pubkeys_aggregation( + vec![[a_fp, b_fp], [a_fp, b_fp]], + [a_fp, b_fp], + vec![true, false], + ); + let point = [a_bigu_t, b_bigu_t]; + g1_ecc_aggregate(&mut builder, point.clone(), point); + + // If we are going to check the pubkey ec point + // let ec_proof_pub_inputs = ec_proof.0.public_inputs; + + // // + // let g1_pk_point_x_input = builder.constant_biguint(&BigUint::new( + // ec_proof_pub_inputs[0..12] + // .iter() + // .map(|x| x.0 as u32) + // .collect(), + // )); + // let g1_pk_point_y_input = builder.constant_biguint(&BigUint::new( + // ec_proof_pub_inputs[12..24] + // .iter() + // .map(|x| x.0 as u32) + // .collect(), + // )); + } +} diff --git a/casper-finality-proofs/src/weigh_justification_and_finalization/epoch_processing.rs b/casper-finality-proofs/src/weigh_justification_and_finalization/epoch_processing.rs index 373f7015c..20139f3d4 100644 --- a/casper-finality-proofs/src/weigh_justification_and_finalization/epoch_processing.rs +++ b/casper-finality-proofs/src/weigh_justification_and_finalization/epoch_processing.rs @@ -31,7 +31,7 @@ pub fn assert_epoch_is_not_genesis_epoch, const D: usize>( builder: &mut CircuitBuilder, epoch: Epoch, ) { - let one = builder.one(); + let one: U64Variable = builder.one(); let pred = builder.gte(epoch, one); assert_is_true(builder, pred); } diff --git a/shell.nix b/shell.nix index 7f3442748..94b1c3149 100644 --- a/shell.nix +++ b/shell.nix @@ -8,16 +8,15 @@ }: let inherit (inputs'.mcl-blockchain.legacyPackages) pkgs-with-rust-overlay rust-stable; inherit (pkgs-with-rust-overlay) rust-bin; - - rust-nightly = rust-bin.nightly."2023-06-12".default.override { - extensions = ["rust-src" "rust-analyzer"]; - }; + # rust-nightly = rust-bin.nightly."2024-01-21".default.override { + # extensions = ["rust-src" "rust-analyzer"]; + # }; in { devShells.default = with pkgs; let shell-pkgs = import ./libs/nix/common-shell-pkgs.nix {inherit pkgs rust-stable;}; in mkShell { - packages = [rust-nightly] ++ shell-pkgs; + packages = [cmake] ++ shell-pkgs; nativeBuildInputs = [pkg-config openssl]; diff --git a/yarn.lock b/yarn.lock index 1975b287c..aa5c2c086 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1674,6 +1674,7 @@ __metadata: r1csfile: "npm:^0.0.40" snarkjs: "https://github.com/metacraft-labs/snarkjs.git#commit=091ee9c3744c660b0ecd961f9197a8249a8f7d3b" ts-node: "npm:^10.9.1" + tsx: "npm:^4.7.1" typescript: "npm:5.2.2" web3: "npm:1.10.4" languageName: unknown