diff --git a/Cargo.toml b/Cargo.toml index 0a0b6acae..c46403716 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,5 +7,6 @@ members = [ "storage-proofs/porep", "storage-proofs/post", "fil-proofs-tooling", - "sha2raw" + "sha2raw", + "phase2", ] diff --git a/filecoin-proofs/Cargo.toml b/filecoin-proofs/Cargo.toml index dd3593e1b..ba90f2ca7 100644 --- a/filecoin-proofs/Cargo.toml +++ b/filecoin-proofs/Cargo.toml @@ -11,7 +11,6 @@ readme = "README.md" [dependencies] storage-proofs = { version = "^5.0.0", path = "../storage-proofs", default-features = false } bitvec = "0.17" -chrono = "0.4" rand = "0.7" lazy_static = "1.2" memmap = "0.7" @@ -20,11 +19,9 @@ byteorder = "1" itertools = "0.9" serde = { version = "1.0", features = ["rc", "derive"] } serde_json = "1.0" -regex = "1.3.7" ff = { version = "0.2.3", package = "fff" } blake2b_simd = "0.5" bellperson = { version = "0.12", default-features = false } -clap = "2" log = "0.4.7" fil_logger = "0.1" env_proxy = "0.4" @@ -41,15 +38,13 @@ sha2 = "0.9.1" typenum = "1.11.2" bitintr = "0.3.0" gperftools = { version = "0.2", optional = true } -phase2 = { version = "0.11", package = "phase21", default-features = false } -simplelog = "0.8.0" -rand_chacha = "0.2.1" -dialoguer = "0.7.1" generic-array = "0.14.4" structopt = "0.3.12" humansize = "1.1.0" indicatif = "0.15.0" groupy = "0.3.0" +dialoguer = "0.7.1" +clap = "2.33.3" [dependencies.reqwest] version = "0.10" @@ -70,8 +65,8 @@ heap-profile = ["gperftools/heap"] simd = ["storage-proofs/simd"] asm = ["storage-proofs/asm"] gpu = ["storage-proofs/gpu", "bellperson/gpu"] -pairing = ["storage-proofs/pairing", "bellperson/pairing", "phase2/pairing"] -blst = ["storage-proofs/blst", "bellperson/blst", "phase2/blst"] +pairing = ["storage-proofs/pairing", "bellperson/pairing"] +blst = ["storage-proofs/blst", "bellperson/blst"] [[bench]] name = "preprocessing" diff --git a/phase2/Cargo.toml b/phase2/Cargo.toml new file mode 100644 index 000000000..a4847954f --- /dev/null +++ b/phase2/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "filecoin-phase2" +version = "0.1.0" +description = "Phase2 for Filecoin circuits" +authors = ["dignifiedquire "] +license = "MIT OR Apache-2.0" +edition = "2018" +repository = "https://github.com/filecoin-project/rust-fil-proofs" +readme = "README.md" + + +[dependencies] +bellperson = { version = "0.12.0", default-features = false } +filecoin-proofs = { version = "5.4.0", path = "../filecoin-proofs", default-features = false } +storage-proofs = { version = "5.4.0", path = "../storage-proofs", default-features = false } +groupy = "0.3.1" +log = "0.4.11" +clap = "2.33.3" +byteorder = "1.3.4" +rand = "0.7.3" +rand_chacha = "0.2.2" +simplelog = "0.8.0" +dialoguer = "0.7.1" +hex = "0.4.2" +blake2b_simd = "0.5.11" +fff = "0.2.3" +rayon = "1.5.0" +num_cpus = "1.13.0" +crossbeam = "0.8.0" + +[features] +default = ["pairing", "gpu"] +gpu = ["bellperson/gpu", "filecoin-proofs/gpu", "storage-proofs/gpu"] +pairing = ["bellperson/pairing", "filecoin-proofs/pairing", "storage-proofs/pairing"] +blst = ["bellperson/blst", "filecoin-proofs/blst", "storage-proofs/blst"] diff --git a/phase2/LICENSE-APACHE b/phase2/LICENSE-APACHE new file mode 100644 index 000000000..f8e5e5ea0 --- /dev/null +++ b/phase2/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/phase2/LICENSE-MIT b/phase2/LICENSE-MIT new file mode 100644 index 000000000..468cd79a8 --- /dev/null +++ b/phase2/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/phase2/README.md b/phase2/README.md new file mode 100644 index 000000000..b71c6f11b --- /dev/null +++ b/phase2/README.md @@ -0,0 +1,8 @@ +# Filecoin Phase2 + +Library and binary to run the 2nd phase of the trusted setup for the circuits. + +## License + +MIT or Apache 2.0 + diff --git a/phase2/src/lib.rs b/phase2/src/lib.rs new file mode 100644 index 000000000..229003c0b --- /dev/null +++ b/phase2/src/lib.rs @@ -0,0 +1,1619 @@ +//! # zk-SNARK MPCs, made easy. +//! +//! ## Make your circuit +//! +//! Grab the [`bellperson`](https://github.com/filecoin-project/bellman) crate. Bellman +//! provides a trait called `Circuit`, which you must implement +//! for your computation. +//! +//! Here's a silly example: proving you know the cube root of +//! a field element. +//! +//! ```rust +//! use fff::Field; +//! use bellperson::{ +//! Circuit, +//! ConstraintSystem, +//! SynthesisError, +//! bls::Engine, +//! }; +//! +//! struct CubeRoot { +//! cube_root: Option +//! } +//! +//! impl Circuit for CubeRoot { +//! fn synthesize>( +//! self, +//! cs: &mut CS +//! ) -> Result<(), SynthesisError> +//! { +//! // Witness the cube root +//! let root = cs.alloc(|| "root", || { +//! self.cube_root.ok_or(SynthesisError::AssignmentMissing) +//! })?; +//! +//! // Witness the square of the cube root +//! let square = cs.alloc(|| "square", || { +//! self.cube_root +//! .ok_or(SynthesisError::AssignmentMissing) +//! .map(|mut root| {root.square(); root }) +//! })?; +//! +//! // Enforce that `square` is root^2 +//! cs.enforce( +//! || "squaring", +//! |lc| lc + root, +//! |lc| lc + root, +//! |lc| lc + square +//! ); +//! +//! // Witness the cube, as a public input +//! let cube = cs.alloc_input(|| "cube", || { +//! self.cube_root +//! .ok_or(SynthesisError::AssignmentMissing) +//! .map(|root| { +//! let mut tmp = root; +//! tmp.square(); +//! tmp.mul_assign(&root); +//! tmp +//! }) +//! })?; +//! +//! // Enforce that `cube` is root^3 +//! // i.e. that `cube` is `root` * `square` +//! cs.enforce( +//! || "cubing", +//! |lc| lc + root, +//! |lc| lc + square, +//! |lc| lc + cube +//! ); +//! +//! Ok(()) +//! } +//! } +//! ``` +//! +//! ## Create some proofs +//! +//! Now that we have `CubeRoot` implementing `Circuit`, +//! let's create some parameters and make some proofs. +//! +//! ```rust,ignore +//! use bellperson::bls::{Bls12, Fr}; +//! use bellperson::groth16::{ +//! generate_random_parameters, +//! create_random_proof, +//! prepare_verifying_key, +//! verify_proof +//! }; +//! use rand::{OsRng, Rand}; +//! +//! let rng = &mut OsRng::new(); +//! +//! // Create public parameters for our circuit +//! let params = { +//! let circuit = CubeRoot:: { +//! cube_root: None +//! }; +//! +//! generate_random_parameters::( +//! circuit, +//! rng +//! ).unwrap() +//! }; +//! +//! // Prepare the verifying key for verification +//! let pvk = prepare_verifying_key(¶ms.vk); +//! +//! // Let's start making proofs! +//! for _ in 0..50 { +//! // Verifier picks a cube in the field. +//! // Let's just make a random one. +//! let root = Fr::rand(rng); +//! let mut cube = root; +//! cube.square(); +//! cube.mul_assign(&root); +//! +//! // Prover gets the cube, figures out the cube +//! // root, and makes the proof: +//! let proof = create_random_proof( +//! CubeRoot:: { +//! cube_root: Some(root) +//! }, ¶ms, rng +//! ).unwrap(); +//! +//! // Verifier checks the proof against the cube +//! assert!(verify_proof(&pvk, &proof, &[cube]).unwrap()); +//! } +//! ``` +//! ## Creating parameters +//! +//! Notice in the previous example that we created our zk-SNARK +//! parameters by calling `generate_random_parameters`. However, +//! if you wanted you could have called `generate_parameters` +//! with some secret numbers you chose, and kept them for +//! yourself. Given those numbers, you can create false proofs. +//! +//! In order to convince others you didn't, a multi-party +//! computation (MPC) can be used. The MPC has the property that +//! only one participant needs to be honest for the parameters to +//! be secure. This crate (`phase21`) is about creating parameters +//! securely using such an MPC. +//! +//! Let's start by using `phase21` to create some base parameters +//! for our circuit: +//! +//! ```rust,ignore +//! let mut params = phase21::MPCParameters::new(CubeRoot { +//! cube_root: None +//! }).unwrap(); +//! ``` +//! +//! The first time you try this, it will try to read a file like +//! `phase1radix2m2` from the current directory. You need to grab +//! that from the [Powers of Tau](https://lists.z.cash.foundation/pipermail/zapps-wg/2018/000362.html). +//! +//! These parameters are not safe to use; false proofs can be +//! created for them. Let's contribute some randomness to these +//! parameters. +//! +//! ```rust,ignore +//! // Contribute randomness to the parameters. Remember this hash, +//! // it's how we know our contribution is in the parameters! +//! let hash = params.contribute(rng); +//! ``` +//! +//! These parameters are now secure to use, so long as you weren't +//! malicious. That may not be convincing to others, so let them +//! contribute randomness too! `params` can be serialized and sent +//! elsewhere, where they can do the same thing and send new +//! parameters back to you. Only one person needs to be honest for +//! the final parameters to be secure. +//! +//! Once you're done setting up the parameters, you can verify the +//! parameters: +//! +//! ```rust,ignore +//! let contributions = params.verify(CubeRoot { +//! cube_root: None +//! }).expect("parameters should be valid!"); +//! +//! // We need to check the `contributions` to see if our `hash` +//! // is in it (see above, when we first contributed) +//! assert!(phase21::contains_contribution(&contributions, &hash)); +//! ``` +//! +//! Great, now if you're happy, grab the Groth16 `Parameters` with +//! `params.params()`, so that you can interact with the bellman APIs +//! just as before. +#![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)] + +pub mod small; + +use std::{ + fmt::{self, Debug, Formatter}, + fs::File, + io::{self, BufReader, Read, Write}, + sync::Arc, +}; + +use bellperson::bls::{ + Bls12, Engine, Fr, G1Affine, G1Projective, G1Uncompressed, G2Affine, G2Projective, + G2Uncompressed, PairingCurveAffine, +}; +use bellperson::{ + groth16::{Parameters, VerifyingKey}, + multicore::Worker, + Circuit, ConstraintSystem, Index, LinearCombination, SynthesisError, Variable, +}; +use blake2b_simd::State as Blake2b; +use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; +use fff::{Field, PrimeField}; +use groupy::{CurveAffine, CurveProjective, EncodedPoint, Wnaf}; +use log::{error, info}; +use rand::{Rng, SeedableRng}; +use rand_chacha::ChaChaRng; +use rayon::prelude::*; + +use crate::small::MPCSmall; + +/// This is our assembly structure that we'll use to synthesize the +/// circuit into a QAP. +struct KeypairAssembly { + num_inputs: usize, + num_aux: usize, + num_constraints: usize, + at_inputs: Vec>, + bt_inputs: Vec>, + ct_inputs: Vec>, + at_aux: Vec>, + bt_aux: Vec>, + ct_aux: Vec>, +} + +impl KeypairAssembly { + /// Returns the size (stack plus heap) of the `KeypairAssembly` in bytes. + fn size(&self) -> usize { + use std::mem::{size_of, size_of_val}; + + let mut size = 3 * size_of::(); + size += 6 * size_of::>>(); + size += size_of_val::<[Vec<(E::Fr, usize)>]>(&self.at_inputs); + size += size_of_val::<[Vec<(E::Fr, usize)>]>(&self.bt_inputs); + size += size_of_val::<[Vec<(E::Fr, usize)>]>(&self.ct_inputs); + size += size_of_val::<[Vec<(E::Fr, usize)>]>(&self.at_aux); + size += size_of_val::<[Vec<(E::Fr, usize)>]>(&self.bt_aux); + size += size_of_val::<[Vec<(E::Fr, usize)>]>(&self.ct_aux); + + for el in self.at_inputs.iter() { + size += size_of_val::<[(E::Fr, usize)]>(el); + } + for el in self.bt_inputs.iter() { + size += size_of_val::<[(E::Fr, usize)]>(el); + } + for el in self.ct_inputs.iter() { + size += size_of_val::<[(E::Fr, usize)]>(el); + } + for el in self.at_aux.iter() { + size += size_of_val::<[(E::Fr, usize)]>(el); + } + for el in self.bt_aux.iter() { + size += size_of_val::<[(E::Fr, usize)]>(el); + } + for el in self.ct_aux.iter() { + size += size_of_val::<[(E::Fr, usize)]>(el); + } + + size + } +} + +impl ConstraintSystem for KeypairAssembly { + type Root = Self; + + fn alloc(&mut self, _: A, _: F) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, + { + // There is no assignment, so we don't even invoke the + // function for obtaining one. + + let index = self.num_aux; + self.num_aux += 1; + + self.at_aux.push(vec![]); + self.bt_aux.push(vec![]); + self.ct_aux.push(vec![]); + + Ok(Variable::new_unchecked(Index::Aux(index))) + } + + fn alloc_input(&mut self, _: A, _: F) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, + { + // There is no assignment, so we don't even invoke the + // function for obtaining one. + + let index = self.num_inputs; + self.num_inputs += 1; + + self.at_inputs.push(vec![]); + self.bt_inputs.push(vec![]); + self.ct_inputs.push(vec![]); + + Ok(Variable::new_unchecked(Index::Input(index))) + } + + fn enforce(&mut self, _: A, a: LA, b: LB, c: LC) + where + A: FnOnce() -> AR, + AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination, + { + fn eval( + l: LinearCombination, + inputs: &mut [Vec<(E::Fr, usize)>], + aux: &mut [Vec<(E::Fr, usize)>], + this_constraint: usize, + ) { + for (&var, &coeff) in l.iter() { + match var.get_unchecked() { + Index::Input(id) => inputs[id].push((coeff, this_constraint)), + Index::Aux(id) => aux[id].push((coeff, this_constraint)), + } + } + } + + eval( + a(LinearCombination::zero()), + &mut self.at_inputs, + &mut self.at_aux, + self.num_constraints, + ); + eval( + b(LinearCombination::zero()), + &mut self.bt_inputs, + &mut self.bt_aux, + self.num_constraints, + ); + eval( + c(LinearCombination::zero()), + &mut self.ct_inputs, + &mut self.ct_aux, + self.num_constraints, + ); + + self.num_constraints += 1; + } + + fn push_namespace(&mut self, _: N) + where + NR: Into, + N: FnOnce() -> NR, + { + // Do nothing; we don't care about namespaces in this context. + } + + fn pop_namespace(&mut self) { + // Do nothing; we don't care about namespaces in this context. + } + + fn get_root(&mut self) -> &mut Self::Root { + self + } +} + +/// MPC parameters are just like bellman `Parameters` except, when serialized, +/// they contain a transcript of contributions at the end, which can be verified. +#[derive(Clone)] +pub struct MPCParameters { + params: Parameters, + cs_hash: [u8; 64], + contributions: Vec, +} + +// Required by `assert_eq!()`. +impl Debug for MPCParameters { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("MPCParameters") + .field("params", &"") + .field("cs_hash", &self.cs_hash.to_vec()) + .field("contributions", &self.contributions.to_vec()) + .finish() + } +} + +impl PartialEq for MPCParameters { + fn eq(&self, other: &MPCParameters) -> bool { + self.params == other.params + && self.cs_hash[..] == other.cs_hash[..] + && self.contributions == other.contributions + } +} + +impl MPCParameters { + /// Create new Groth16 parameters (compatible with bellman) for a + /// given circuit. The resulting parameters are unsafe to use + /// until there are contributions (see `contribute()`). + pub fn new(circuit: C) -> Result + where + C: Circuit, + { + let mut assembly = KeypairAssembly { + num_inputs: 0, + num_aux: 0, + num_constraints: 0, + at_inputs: vec![], + bt_inputs: vec![], + ct_inputs: vec![], + at_aux: vec![], + bt_aux: vec![], + ct_aux: vec![], + }; + + // Allocate the "one" input variable + assembly.alloc_input(|| "", || Ok(Fr::one()))?; + + // Synthesize the circuit. + circuit.synthesize(&mut assembly)?; + + // Input constraints to ensure full density of IC query + // x * 0 = 0 + for i in 0..assembly.num_inputs { + assembly.enforce( + || "", + |lc| lc + Variable::new_unchecked(Index::Input(i)), + |lc| lc, + |lc| lc, + ); + } + + info!( + "phase2::MPCParameters::new() Constraint System: n_constraints={}, n_inputs={}, n_aux={}, memsize={}b", + assembly.num_constraints, + assembly.num_inputs, + assembly.num_aux, + assembly.size() + ); + + // Compute the size of our evaluation domain, `m = 2^exp`. + let mut m = 1; + let mut exp = 0; + while m < assembly.num_constraints { + m *= 2; + exp += 1; + + // Powers of Tau ceremony can't support more than 2^30 + if exp > 30 { + return Err(SynthesisError::PolynomialDegreeTooLarge); + } + } + + // Try to load "phase1radix2m{}" + info!( + "phase2::MPCParameters::new() phase1.5_file=phase1radix2m{}", + exp + ); + let f = match File::open(format!("phase1radix2m{}", exp)) { + Ok(f) => f, + Err(e) => { + panic!("Couldn't load phase1radix2m{}: {:?}", exp, e); + } + }; + let f = &mut BufReader::with_capacity(1024 * 1024, f); + + let read_g1 = |reader: &mut BufReader| -> io::Result { + let mut repr = G1Uncompressed::empty(); + reader.read_exact(repr.as_mut())?; + + repr.into_affine_unchecked() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| { + if e.is_zero() { + Err(io::Error::new( + io::ErrorKind::InvalidData, + "point at infinity", + )) + } else { + Ok(e) + } + }) + }; + + let read_g2 = |reader: &mut BufReader| -> io::Result { + let mut repr = G2Uncompressed::empty(); + reader.read_exact(repr.as_mut())?; + + repr.into_affine_unchecked() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| { + if e.is_zero() { + Err(io::Error::new( + io::ErrorKind::InvalidData, + "point at infinity", + )) + } else { + Ok(e) + } + }) + }; + + let alpha = read_g1(f)?; + let beta_g1 = read_g1(f)?; + let beta_g2 = read_g2(f)?; + + info!("phase2::MPCParameters::new() reading coeffs_g1 from phase1.5 file"); + let mut coeffs_g1 = Vec::with_capacity(m); + for _ in 0..m { + coeffs_g1.push(read_g1(f)?); + } + + info!("phase2::MPCParameters::new() reading coeffs_g2 from phase1.5 file"); + let mut coeffs_g2 = Vec::with_capacity(m); + for _ in 0..m { + coeffs_g2.push(read_g2(f)?); + } + + info!("phase2::MPCParameters::new() reading alpha_coeffs_g1 from phase1.5 file"); + let mut alpha_coeffs_g1 = Vec::with_capacity(m); + for _ in 0..m { + alpha_coeffs_g1.push(read_g1(f)?); + } + + info!("phase2::MPCParameters::new() reading beta_coeffs_g1 from phase1.5 file"); + let mut beta_coeffs_g1 = Vec::with_capacity(m); + for _ in 0..m { + beta_coeffs_g1.push(read_g1(f)?); + } + + // These are `Arc` so that later it'll be easier + // to use multiexp during QAP evaluation (which + // requires a futures-based API) + let coeffs_g1 = Arc::new(coeffs_g1); + let coeffs_g2 = Arc::new(coeffs_g2); + let alpha_coeffs_g1 = Arc::new(alpha_coeffs_g1); + let beta_coeffs_g1 = Arc::new(beta_coeffs_g1); + + let mut ic = vec![G1Projective::zero(); assembly.num_inputs]; + info!("phase2::MPCParameters::new() initialized ic vector"); + let mut l = vec![G1Projective::zero(); assembly.num_aux]; + info!("phase2::MPCParameters::new() initialized l vector"); + let mut a_g1 = vec![G1Projective::zero(); assembly.num_inputs + assembly.num_aux]; + info!("phase2::MPCParameters::new() initialized a_g1 vector"); + let mut b_g1 = vec![G1Projective::zero(); assembly.num_inputs + assembly.num_aux]; + info!("phase2::MPCParameters::new() initialized b_g1 vector"); + let mut b_g2 = vec![G2Projective::zero(); assembly.num_inputs + assembly.num_aux]; + info!("phase2::MPCParameters::new() initialized b_g2 vector"); + + #[allow(clippy::too_many_arguments)] + fn eval( + // Lagrange coefficients for tau + coeffs_g1: Arc>, + coeffs_g2: Arc>, + alpha_coeffs_g1: Arc>, + beta_coeffs_g1: Arc>, + + // QAP polynomials + at: &[Vec<(Fr, usize)>], + bt: &[Vec<(Fr, usize)>], + ct: &[Vec<(Fr, usize)>], + + // Resulting evaluated QAP polynomials + a_g1: &mut [G1Projective], + b_g1: &mut [G1Projective], + b_g2: &mut [G2Projective], + ext: &mut [G1Projective], + + // Worker + worker: &Worker, + ) { + // Sanity check + assert_eq!(a_g1.len(), at.len()); + assert_eq!(a_g1.len(), bt.len()); + assert_eq!(a_g1.len(), ct.len()); + assert_eq!(a_g1.len(), b_g1.len()); + assert_eq!(a_g1.len(), b_g2.len()); + assert_eq!(a_g1.len(), ext.len()); + + // Evaluate polynomials in multiple threads + worker.scope(a_g1.len(), |scope, chunk| { + for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in a_g1 + .chunks_mut(chunk) + .zip(b_g1.chunks_mut(chunk)) + .zip(b_g2.chunks_mut(chunk)) + .zip(ext.chunks_mut(chunk)) + .zip(at.chunks(chunk)) + .zip(bt.chunks(chunk)) + .zip(ct.chunks(chunk)) + { + let coeffs_g1 = coeffs_g1.clone(); + let coeffs_g2 = coeffs_g2.clone(); + let alpha_coeffs_g1 = alpha_coeffs_g1.clone(); + let beta_coeffs_g1 = beta_coeffs_g1.clone(); + + scope.spawn(move |_| { + for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in a_g1 + .iter_mut() + .zip(b_g1.iter_mut()) + .zip(b_g2.iter_mut()) + .zip(ext.iter_mut()) + .zip(at.iter()) + .zip(bt.iter()) + .zip(ct.iter()) + { + for &(coeff, lag) in at { + a_g1.add_assign(&coeffs_g1[lag].mul(coeff)); + ext.add_assign(&beta_coeffs_g1[lag].mul(coeff)); + } + + for &(coeff, lag) in bt { + b_g1.add_assign(&coeffs_g1[lag].mul(coeff)); + b_g2.add_assign(&coeffs_g2[lag].mul(coeff)); + ext.add_assign(&alpha_coeffs_g1[lag].mul(coeff)); + } + + for &(coeff, lag) in ct { + ext.add_assign(&coeffs_g1[lag].mul(coeff)); + } + } + + // Batch normalize + G1Projective::batch_normalization(a_g1); + G1Projective::batch_normalization(b_g1); + G2Projective::batch_normalization(b_g2); + G1Projective::batch_normalization(ext); + }); + } + }); + } + + let worker = Worker::new(); + + // Evaluate for inputs. + info!("phase2::MPCParameters::new() evaluating polynomials for inputs"); + eval( + coeffs_g1.clone(), + coeffs_g2.clone(), + alpha_coeffs_g1.clone(), + beta_coeffs_g1.clone(), + &assembly.at_inputs, + &assembly.bt_inputs, + &assembly.ct_inputs, + &mut a_g1[0..assembly.num_inputs], + &mut b_g1[0..assembly.num_inputs], + &mut b_g2[0..assembly.num_inputs], + &mut ic, + &worker, + ); + + // Evaluate for auxillary variables. + info!("phase2::MPCParameters::new() evaluating polynomials for auxillary variables"); + eval( + coeffs_g1.clone(), + coeffs_g2.clone(), + alpha_coeffs_g1.clone(), + beta_coeffs_g1.clone(), + &assembly.at_aux, + &assembly.bt_aux, + &assembly.ct_aux, + &mut a_g1[assembly.num_inputs..], + &mut b_g1[assembly.num_inputs..], + &mut b_g2[assembly.num_inputs..], + &mut l, + &worker, + ); + + // Don't allow any elements be unconstrained, so that + // the L query is always fully dense. + for e in l.iter() { + if e.is_zero() { + return Err(SynthesisError::UnconstrainedVariable); + } + } + + let vk = VerifyingKey { + alpha_g1: alpha, + beta_g1, + beta_g2, + gamma_g2: G2Affine::one(), + delta_g1: G1Affine::one(), + delta_g2: G2Affine::one(), + ic: ic.into_par_iter().map(|e| e.into_affine()).collect(), + }; + + // Reclaim the memory used by these vectors prior to reading in `h`. + drop(coeffs_g1); + drop(coeffs_g2); + drop(alpha_coeffs_g1); + drop(beta_coeffs_g1); + + info!("phase2::MPCParameters::new() reading h from phase1.5 file"); + let mut h = Vec::with_capacity(m - 1); + for _ in 0..(m - 1) { + h.push(read_g1(f)?); + } + + let params = Parameters { + vk, + h: Arc::new(h), + l: Arc::new(l.into_par_iter().map(|e| e.into_affine()).collect()), + + // Filter points at infinity away from A/B queries + a: Arc::new( + a_g1.into_par_iter() + .filter(|e| !e.is_zero()) + .map(|e| e.into_affine()) + .collect(), + ), + b_g1: Arc::new( + b_g1.into_par_iter() + .filter(|e| !e.is_zero()) + .map(|e| e.into_affine()) + .collect(), + ), + b_g2: Arc::new( + b_g2.into_par_iter() + .filter(|e| !e.is_zero()) + .map(|e| e.into_affine()) + .collect(), + ), + }; + + info!( + "phase2::MPCParameters::new() vector lengths: ic={}, h={}, l={}, a={}, b_g1={}, b_g2={}", + params.vk.ic.len(), + params.h.len(), + params.l.len(), + params.a.len(), + params.b_g1.len(), + params.b_g2.len() + ); + + let cs_hash = { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + + params.write(&mut sink).unwrap(); + + sink.into_hash() + }; + + Ok(MPCParameters { + params, + cs_hash, + contributions: vec![], + }) + } + + /// Get the underlying Groth16 `Parameters` + pub fn get_params(&self) -> &Parameters { + &self.params + } + + pub fn n_contributions(&self) -> usize { + self.contributions.len() + } + + /// Contributes some randomness to the parameters. Only one + /// contributor needs to be honest for the parameters to be + /// secure. + /// + /// This function returns a "hash" that is bound to the + /// contribution. Contributors can use this hash to make + /// sure their contribution is in the final parameters, by + /// checking to see if it appears in the output of + /// `MPCParameters::verify`. + pub fn contribute(&mut self, rng: &mut R) -> [u8; 64] { + // Generate a keypair + let (pubkey, privkey) = keypair(rng, self); + + fn batch_exp(bases: &mut [C], coeff: C::Scalar) { + let coeff = coeff.into_repr(); + + let mut projective = vec![C::Projective::zero(); bases.len()]; + let cpus = num_cpus::get(); + let chunk_size = if bases.len() < cpus { + 1 + } else { + bases.len() / cpus + }; + + // Perform wNAF over multiple cores, placing results into `projective`. + crossbeam::thread::scope(|scope| { + for (bases, projective) in bases + .chunks_mut(chunk_size) + .zip(projective.chunks_mut(chunk_size)) + { + scope.spawn(move |_| { + let mut wnaf = Wnaf::new(); + + for (base, projective) in bases.iter_mut().zip(projective.iter_mut()) { + *projective = wnaf.base(base.into_projective(), 1).scalar(coeff); + } + + C::Projective::batch_normalization(projective); + projective + .iter() + .zip(bases.iter_mut()) + .for_each(|(projective, affine)| { + *affine = projective.into_affine(); + }); + }); + } + }) + .unwrap(); + } + + let delta_inv = privkey.delta.inverse().expect("nonzero"); + info!("phase2::MPCParameters::contribute() copying l"); + let mut l = (&self.params.l[..]).to_vec(); + info!("phase2::MPCParameters::contribute() copying h"); + let mut h = (&self.params.h[..]).to_vec(); + info!("phase2::MPCParameters::contribute() performing batch exponentiation of l"); + batch_exp(&mut l, delta_inv); + info!("phase2::MPCParameters::contribute() performing batch exponentiation of h"); + batch_exp(&mut h, delta_inv); + info!("phase2::MPCParameters::contribute() finished batch exponentiations"); + self.params.l = Arc::new(l); + self.params.h = Arc::new(h); + + self.params.vk.delta_g1 = self.params.vk.delta_g1.mul(privkey.delta).into_affine(); + self.params.vk.delta_g2 = self.params.vk.delta_g2.mul(privkey.delta).into_affine(); + + self.contributions.push(pubkey.clone()); + + // Calculate the hash of the public key and return it + { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + pubkey.write(&mut sink).unwrap(); + sink.into_hash() + } + } + + /// Verify the correctness of the parameters, given a circuit + /// instance. This will return all of the hashes that + /// contributors obtained when they ran + /// `MPCParameters::contribute`, for ensuring that contributions + /// exist in the final parameters. + pub fn verify>(&self, circuit: C) -> Result, ()> { + let initial_params = MPCParameters::new(circuit).map_err(|_| ())?; + + // H/L will change, but should have same length + if initial_params.params.h.len() != self.params.h.len() { + error!("phase2::MPCParameters::verify() h's length has changed"); + return Err(()); + } + if initial_params.params.l.len() != self.params.l.len() { + error!("phase2::MPCParameters::verify() l's length has changed"); + return Err(()); + } + + // A/B_G1/B_G2 doesn't change at all + if initial_params.params.a != self.params.a { + error!("phase2::MPCParameters::verify() evaluated QAP a polynomial has changed"); + return Err(()); + } + if initial_params.params.b_g1 != self.params.b_g1 { + error!("phase2::MPCParameters::verify() evaluated QAP b_g1 polynomial has changed"); + return Err(()); + } + if initial_params.params.b_g2 != self.params.b_g2 { + error!("phase2::MPCParameters::verify() evaluated QAP b_g2 polynomial has changed"); + return Err(()); + } + + // alpha/beta/gamma don't change + if initial_params.params.vk.alpha_g1 != self.params.vk.alpha_g1 { + error!("phase2::MPCParameters::verify() vk's alpha has changed"); + return Err(()); + } + if initial_params.params.vk.beta_g1 != self.params.vk.beta_g1 { + error!("phase2::MPCParameters::verify() vk's beta_g1 has changed"); + return Err(()); + } + if initial_params.params.vk.beta_g2 != self.params.vk.beta_g2 { + error!("phase2::MPCParameters::verify() vk's beta_g2 has changed"); + return Err(()); + } + if initial_params.params.vk.gamma_g2 != self.params.vk.gamma_g2 { + error!("phase2::MPCParameters::verify() vk's gamma has changed"); + return Err(()); + } + + // IC shouldn't change, as gamma doesn't change + if initial_params.params.vk.ic != self.params.vk.ic { + error!("phase2::MPCParameters::verify() vk's ic has changed"); + return Err(()); + } + + // cs_hash should be the same + if initial_params.cs_hash[..] != self.cs_hash[..] { + error!("phase2::MPCParameters::verify() cs_hash has changed"); + return Err(()); + } + + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + sink.write_all(&initial_params.cs_hash[..]).unwrap(); + + let mut current_delta = G1Affine::one(); + let mut result = vec![]; + + for pubkey in &self.contributions { + let mut our_sink = sink.clone(); + our_sink + .write_all(pubkey.s.into_uncompressed().as_ref()) + .unwrap(); + our_sink + .write_all(pubkey.s_delta.into_uncompressed().as_ref()) + .unwrap(); + + pubkey.write(&mut sink).unwrap(); + + let h = our_sink.into_hash(); + + // The transcript must be consistent + if &pubkey.transcript[..] != h.as_ref() { + error!("phase2::MPCParameters::verify() transcripts differ"); + return Err(()); + } + + let r = hash_to_g2(h.as_ref()).into_affine(); + + // Check the signature of knowledge + if !same_ratio((r, pubkey.r_delta), (pubkey.s, pubkey.s_delta)) { + error!("phase2::MPCParameters::verify() pubkey's r and s were shifted by different deltas"); + return Err(()); + } + + // Check the change from the old delta is consistent + if !same_ratio((current_delta, pubkey.delta_after), (r, pubkey.r_delta)) { + error!("phase2::MPCParameters::verify() contribution's delta and r where shifted differently"); + return Err(()); + } + + current_delta = pubkey.delta_after; + + { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + pubkey.write(&mut sink).unwrap(); + result.push(sink.into_hash()); + } + } + + // Current parameters should have consistent delta in G1 + if current_delta != self.params.vk.delta_g1 { + error!("phase2::MPCParameters::verify() vk's delta_g1 differs from calculated delta"); + return Err(()); + } + + // Current parameters should have consistent delta in G2 + if !same_ratio( + (G1Affine::one(), current_delta), + (G2Affine::one(), self.params.vk.delta_g2), + ) { + error!("phase2::MPCParameters::verify() shift in vk's delta_g2 is inconsistent with calculated delta"); + return Err(()); + } + + // H and L queries should be updated with delta^-1 + if !same_ratio( + merge_pairs(&initial_params.params.h, &self.params.h), + (self.params.vk.delta_g2, G2Affine::one()), // reversed for inverse + ) { + error!("phase2::MPCParameters::verify() h queries have not shifted by delta^-1"); + return Err(()); + } + + if !same_ratio( + merge_pairs(&initial_params.params.l, &self.params.l), + (self.params.vk.delta_g2, G2Affine::one()), // reversed for inverse + ) { + error!("phase2::MPCParameters::verify() l queries have not shifted by delta^-1"); + return Err(()); + } + + Ok(result) + } + + /// Serialize these parameters. The serialized parameters + /// can be read by bellman as Groth16 `Parameters`. + pub fn write(&self, mut writer: W) -> io::Result<()> { + self.params.write(&mut writer)?; + writer.write_all(&self.cs_hash)?; + + writer.write_u32::(self.contributions.len() as u32)?; + for pubkey in &self.contributions { + pubkey.write(&mut writer)?; + } + + Ok(()) + } + + /// Serializes these parameters as `MPCSmall`. + pub fn write_small(&self, mut writer: W) -> io::Result<()> { + writer.write_all(self.params.vk.delta_g1.into_uncompressed().as_ref())?; + writer.write_all(self.params.vk.delta_g2.into_uncompressed().as_ref())?; + + writer.write_u32::(self.params.h.len() as u32)?; + for h in &*self.params.h { + writer.write_all(h.into_uncompressed().as_ref())?; + } + + writer.write_u32::(self.params.l.len() as u32)?; + for l in &*self.params.l { + writer.write_all(l.into_uncompressed().as_ref())?; + } + + writer.write_all(&self.cs_hash)?; + + writer.write_u32::(self.contributions.len() as u32)?; + for pubkey in &self.contributions { + pubkey.write(&mut writer)?; + } + + Ok(()) + } + + /// Deserialize these parameters. If `checked` is false, + /// we won't perform curve validity and group order + /// checks. + pub fn read(mut reader: R, checked: bool) -> io::Result { + let params = Parameters::read(&mut reader, checked)?; + + let mut cs_hash = [0u8; 64]; + reader.read_exact(&mut cs_hash)?; + + let contributions_len = reader.read_u32::()? as usize; + + let mut contributions = vec![]; + for _ in 0..contributions_len { + contributions.push(PublicKey::read(&mut reader)?); + } + + info!( + "phase2::MPCParameters::read() vector lengths: ic={}, h={}, l={}, a={}, b_g1={}, \ + b_g2={}, contributions={}", + params.vk.ic.len(), + params.h.len(), + params.l.len(), + params.a.len(), + params.b_g1.len(), + params.b_g2.len(), + contributions.len(), + ); + + Ok(MPCParameters { + params, + cs_hash, + contributions, + }) + } + + // memcpy's the potentially large vectors behind Arc's (duplicates the arrays on the stack, + // does not increment ref-counts in `self`). + pub fn copy(&self) -> Self { + let mut params = self.clone(); + params.params.h = Arc::new((*self.params.h).clone()); + params.params.l = Arc::new((*self.params.l).clone()); + params.params.a = Arc::new((*self.params.a).clone()); + params.params.b_g1 = Arc::new((*self.params.b_g1).clone()); + params.params.b_g2 = Arc::new((*self.params.b_g2).clone()); + params + } + + // memcpy's the potentially large h and l vectors behind Arc's into a new `MPCSmall` (duplicates + // the h and l arrays on the stack, does not increment ref-counts for the h and l Arc's in `self`). + pub fn copy_small(&self) -> MPCSmall { + MPCSmall { + delta_g1: self.params.vk.delta_g1, + delta_g2: self.params.vk.delta_g2, + h: (*self.params.h).clone(), + l: (*self.params.l).clone(), + cs_hash: self.cs_hash, + contributions: self.contributions.clone(), + } + } + + // Updates `self` with a contribution (or contributions) that is in the `MPCSmall` params form. + // `MPCSmall` must contain at least one new contribution. This decrements the strong ref-counts + // by one for any Arc clones that were made from `self.h` and `self.l`. If either of `self`'s h + // and l Arc's have ref-count 1, then they will be dropped. + pub fn add_contrib(&mut self, contrib: MPCSmall) { + assert_eq!( + self.cs_hash[..], + contrib.cs_hash[..], + "large and small params have different cs_hash" + ); + + assert_eq!( + self.params.h.len(), + contrib.h.len(), + "large and small params have different h length" + ); + assert_eq!( + self.params.l.len(), + contrib.l.len(), + "large and small params have different l length" + ); + + assert!( + self.contributions.len() < contrib.contributions.len(), + "small params do not contain additional contributions" + ); + assert_eq!( + &self.contributions[..], + &contrib.contributions[..self.contributions.len()], + "small params cannot change prior contributions in large params" + ); + + // Unwrapping here is safe because we have already asserted that `contrib` contains at least + // one (new) contribution. + assert_eq!( + contrib.delta_g1, + contrib.contributions.last().unwrap().delta_after, + "small params are internally inconsistent wrt. G1 deltas" + ); + + let MPCSmall { + delta_g1, + delta_g2, + h, + l, + contributions, + .. + } = contrib; + self.params.vk.delta_g1 = delta_g1; + self.params.vk.delta_g2 = delta_g2; + self.params.h = Arc::new(h); + self.params.l = Arc::new(l); + self.contributions = contributions; + } + + // Returns true if a pair of large and small MPC params contain equal values. It is not required + // that `self`'s h and l Arc's point to the same memory locations as `small`'s non-Arc h and l + // vectors. + pub fn has_last_contrib(&self, small: &MPCSmall) -> bool { + self.params.vk.delta_g1 == small.delta_g1 + && self.params.vk.delta_g2 == small.delta_g2 + && *self.params.h == small.h + && *self.params.l == small.l + && self.cs_hash[..] == small.cs_hash[..] + && self.contributions == small.contributions + } +} + +/// This allows others to verify that you contributed. The hash produced +/// by `MPCParameters::contribute` is just a BLAKE2b hash of this object. +#[derive(Clone)] +struct PublicKey { + /// This is the delta (in G1) after the transformation, kept so that we + /// can check correctness of the public keys without having the entire + /// interstitial parameters for each contribution. + delta_after: G1Affine, + + /// Random element chosen by the contributor. + s: G1Affine, + + /// That element, taken to the contributor's secret delta. + s_delta: G1Affine, + + /// r is H(last_pubkey | s | s_delta), r_delta proves knowledge of delta + r_delta: G2Affine, + + /// Hash of the transcript (used for mapping to r) + transcript: [u8; 64], +} + +// Required by `assert_eq!()`. +impl Debug for PublicKey { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("PublicKey") + .field("delta_after", &self.delta_after) + .field("s", &self.s) + .field("s_delta", &self.s_delta) + .field("r_delta", &self.r_delta) + .field("transcript", &self.transcript.to_vec()) + .finish() + } +} + +impl PublicKey { + fn write(&self, mut writer: W) -> io::Result<()> { + writer.write_all(self.delta_after.into_uncompressed().as_ref())?; + writer.write_all(self.s.into_uncompressed().as_ref())?; + writer.write_all(self.s_delta.into_uncompressed().as_ref())?; + writer.write_all(self.r_delta.into_uncompressed().as_ref())?; + writer.write_all(&self.transcript)?; + + Ok(()) + } + + fn read(mut reader: R) -> io::Result { + let mut g1_repr = G1Uncompressed::empty(); + let mut g2_repr = G2Uncompressed::empty(); + + reader.read_exact(g1_repr.as_mut())?; + let delta_after = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + if delta_after.is_zero() { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "point at infinity", + )); + } + + reader.read_exact(g1_repr.as_mut())?; + let s = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + if s.is_zero() { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "point at infinity", + )); + } + + reader.read_exact(g1_repr.as_mut())?; + let s_delta = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + if s_delta.is_zero() { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "point at infinity", + )); + } + + reader.read_exact(g2_repr.as_mut())?; + let r_delta = g2_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + if r_delta.is_zero() { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "point at infinity", + )); + } + + let mut transcript = [0u8; 64]; + reader.read_exact(&mut transcript)?; + + Ok(PublicKey { + delta_after, + s, + s_delta, + r_delta, + transcript, + }) + } +} + +impl PartialEq for PublicKey { + fn eq(&self, other: &PublicKey) -> bool { + self.delta_after == other.delta_after + && self.s == other.s + && self.s_delta == other.s_delta + && self.r_delta == other.r_delta + && self.transcript[..] == other.transcript[..] + } +} + +/// Verify a contribution, given the old parameters and +/// the new parameters. Returns the hash of the contribution. +pub fn verify_contribution(before: &MPCParameters, after: &MPCParameters) -> Result<[u8; 64], ()> { + if after.contributions.len() != (before.contributions.len() + 1) { + error!( + "phase2::verify_contribution() 'after' params do not contain exactly one more \ + contribution than the 'before' params: n_contributions_before={}, \ + n_contributions_after={}", + before.contributions.len(), + after.contributions.len() + ); + return Err(()); + } + + // None of the previous transformations should change + if before.contributions[..] != after.contributions[0..before.contributions.len()] { + error!("phase2::verify_contribution() 'after' params contributions differ from 'before' params contributions"); + return Err(()); + } + + // H/L will change, but should have same length + if before.params.h.len() != after.params.h.len() { + error!("phase2::verify_contribution() length of h has changed"); + return Err(()); + } + if before.params.l.len() != after.params.l.len() { + error!("phase2::verify_contribution() length of l has changed"); + return Err(()); + } + + // A/B_G1/B_G2 doesn't change at all + if before.params.a != after.params.a { + error!("phase2::verify_contribution() evaluated QAP a polynomial has changed"); + return Err(()); + } + if before.params.b_g1 != after.params.b_g1 { + error!("phase2::verify_contribution() evaluated QAP b_g1 polynomial has changed"); + return Err(()); + } + if before.params.b_g2 != after.params.b_g2 { + error!("phase2::verify_contribution() evaluated QAP b_g2 polynomial has changed"); + return Err(()); + } + + // alpha/beta/gamma don't change + if before.params.vk.alpha_g1 != after.params.vk.alpha_g1 { + error!("phase2::verify_contribution() vk's alpha_g1 hash changed"); + return Err(()); + } + if before.params.vk.beta_g1 != after.params.vk.beta_g1 { + error!("phase2::verify_contribution() vk's beta_g1 has changed"); + return Err(()); + } + if before.params.vk.beta_g2 != after.params.vk.beta_g2 { + error!("phase2::verify_contribution() vk's beta_g2 changed"); + return Err(()); + } + if before.params.vk.gamma_g2 != after.params.vk.gamma_g2 { + error!("phase2::verify_contribution() vk's gamma_g2 has changed"); + return Err(()); + } + + // IC shouldn't change, as gamma doesn't change + if before.params.vk.ic != after.params.vk.ic { + error!("phase2::verify_contribution() vk's ic has changed"); + return Err(()); + } + + // cs_hash should be the same + if before.cs_hash[..] != after.cs_hash[..] { + error!("phase2::verify_contribution() cs_hash has changed"); + return Err(()); + } + + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + sink.write_all(&before.cs_hash[..]).unwrap(); + + for pubkey in &before.contributions { + pubkey.write(&mut sink).unwrap(); + } + + let pubkey = after.contributions.last().unwrap(); + sink.write_all(pubkey.s.into_uncompressed().as_ref()) + .unwrap(); + sink.write_all(pubkey.s_delta.into_uncompressed().as_ref()) + .unwrap(); + + let h = sink.into_hash(); + + // The transcript must be consistent + if &pubkey.transcript[..] != h.as_ref() { + error!("phase2::verify_contribution() inconsistent transcript"); + return Err(()); + } + + let r = hash_to_g2(h.as_ref()).into_affine(); + + // Check the signature of knowledge + if !same_ratio((r, pubkey.r_delta), (pubkey.s, pubkey.s_delta)) { + error!("phase2::verify_contribution() contribution's r and s were shifted with different deltas"); + return Err(()); + } + + // Check the change from the old delta is consistent + if !same_ratio( + (before.params.vk.delta_g1, pubkey.delta_after), + (r, pubkey.r_delta), + ) { + error!("phase2::verify_contribution() contribution's delta and r where shifted with different delta"); + return Err(()); + } + + // Current parameters should have consistent delta in G1 + if pubkey.delta_after != after.params.vk.delta_g1 { + error!( + "phase2::verify_contribution() contribution's delta in G1 differs from vk's delta_g1" + ); + return Err(()); + } + + // Current parameters should have consistent delta in G2 + if !same_ratio( + (G1Affine::one(), pubkey.delta_after), + (G2Affine::one(), after.params.vk.delta_g2), + ) { + error!("phase2::verify_contribution() contribution's shift in delta (G1) is inconsistent with vk's shift in delta (G2)"); + return Err(()); + } + + // H and L queries should be updated with delta^-1 + if !same_ratio( + merge_pairs(&before.params.h, &after.params.h), + (after.params.vk.delta_g2, before.params.vk.delta_g2), // reversed for inverse + ) { + error!("phase2::verify_contribution() h was not updated by delta^-1"); + return Err(()); + } + if !same_ratio( + merge_pairs(&before.params.l, &after.params.l), + (after.params.vk.delta_g2, before.params.vk.delta_g2), // reversed for inverse + ) { + error!("phase2::verify_contribution() l was not updated by delta^-1"); + return Err(()); + } + + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + pubkey.write(&mut sink).unwrap(); + + Ok(sink.into_hash()) +} + +/// Checks if pairs have the same ratio. +pub(crate) fn same_ratio(g1: (G1, G1), g2: (G1::Pair, G1::Pair)) -> bool { + g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0) +} + +/// Computes a random linear combination over v1/v2. +/// +/// Checking that many pairs of elements are exponentiated by +/// the same `x` can be achieved (with high probability) with +/// the following technique: +/// +/// Given v1 = [a, b, c] and v2 = [as, bs, cs], compute +/// (a*r1 + b*r2 + c*r3, (as)*r1 + (bs)*r2 + (cs)*r3) for some +/// random r1, r2, r3. Given (g, g^s)... +/// +/// e(g, (as)*r1 + (bs)*r2 + (cs)*r3) = e(g^s, a*r1 + b*r2 + c*r3) +/// +/// ... with high probability. +pub(crate) fn merge_pairs(v1: &[G], v2: &[G]) -> (G, G) { + use rand::thread_rng; + use std::sync::Mutex; + + assert_eq!(v1.len(), v2.len()); + + let chunk = (v1.len() / num_cpus::get()) + 1; + + let s = Arc::new(Mutex::new(G::Projective::zero())); + let sx = Arc::new(Mutex::new(G::Projective::zero())); + + crossbeam::thread::scope(|scope| { + for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) { + let s = s.clone(); + let sx = sx.clone(); + + scope.spawn(move |_| { + // We do not need to be overly cautious of the RNG + // used for this check. + let rng = &mut thread_rng(); + + let mut wnaf = Wnaf::new(); + let mut local_s = G::Projective::zero(); + let mut local_sx = G::Projective::zero(); + + for (v1, v2) in v1.iter().zip(v2.iter()) { + let rho = G::Scalar::random(rng); + let mut wnaf = wnaf.scalar(rho.into_repr()); + let v1 = wnaf.base(v1.into_projective()); + let v2 = wnaf.base(v2.into_projective()); + + local_s.add_assign(&v1); + local_sx.add_assign(&v2); + } + + s.lock().unwrap().add_assign(&local_s); + sx.lock().unwrap().add_assign(&local_sx); + }); + } + }) + .unwrap(); + + let s = s.lock().unwrap().into_affine(); + let sx = sx.lock().unwrap().into_affine(); + + (s, sx) +} + +/// This needs to be destroyed by at least one participant +/// for the final parameters to be secure. +struct PrivateKey { + delta: Fr, +} + +/// Compute a keypair, given the current parameters. Keypairs +/// cannot be reused for multiple contributions or contributions +/// in different parameters. +fn keypair(rng: &mut R, current: &MPCParameters) -> (PublicKey, PrivateKey) { + // Sample random delta + let delta: Fr = Fr::random(rng); + + // Compute delta s-pair in G1 + let s = G1Projective::random(rng).into_affine(); + let s_delta = s.mul(delta).into_affine(); + + // H(cs_hash | | s | s_delta) + let h = { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + + sink.write_all(¤t.cs_hash[..]).unwrap(); + for pubkey in ¤t.contributions { + pubkey.write(&mut sink).unwrap(); + } + sink.write_all(s.into_uncompressed().as_ref()).unwrap(); + sink.write_all(s_delta.into_uncompressed().as_ref()) + .unwrap(); + + sink.into_hash() + }; + + // This avoids making a weird assumption about the hash into the + // group. + let transcript = h; + + // Compute delta s-pair in G2 + let r = hash_to_g2(&h).into_affine(); + let r_delta = r.mul(delta).into_affine(); + + ( + PublicKey { + delta_after: current.params.vk.delta_g1.mul(delta).into_affine(), + s, + s_delta, + r_delta, + transcript, + }, + PrivateKey { delta }, + ) +} + +/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less +/// than 32 bytes. +pub(crate) fn hash_to_g2(digest: &[u8]) -> G2Projective { + assert!(digest.len() >= 32); + + let mut seed = [0u8; 32]; + seed.copy_from_slice(&digest[..32]); + + G2Projective::random(&mut ChaChaRng::from_seed(seed)) +} + +/// Abstraction over a writer which hashes the data being written. +pub(crate) struct HashWriter { + writer: W, + hasher: Blake2b, +} + +impl Clone for HashWriter { + fn clone(&self) -> HashWriter { + HashWriter { + writer: io::sink(), + hasher: self.hasher.clone(), + } + } +} + +impl HashWriter { + /// Construct a new `HashWriter` given an existing `writer` by value. + pub fn new(writer: W) -> Self { + HashWriter { + writer, + hasher: Blake2b::new(), + } + } + + /// Destroy this writer and return the hash of what was written. + pub fn into_hash(self) -> [u8; 64] { + let mut tmp = [0u8; 64]; + tmp.copy_from_slice(self.hasher.finalize().as_ref()); + tmp + } +} + +impl Write for HashWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + let bytes = self.writer.write(buf)?; + + if bytes > 0 { + self.hasher.update(&buf[0..bytes]); + } + + Ok(bytes) + } + + fn flush(&mut self) -> io::Result<()> { + self.writer.flush() + } +} + +/// This is a cheap helper utility that exists purely +/// because Rust still doesn't have type-level integers +/// and so doesn't implement `PartialEq` for `[T; 64]` +pub fn contains_contribution(contributions: &[[u8; 64]], my_contribution: &[u8; 64]) -> bool { + for contrib in contributions { + if contrib[..] == my_contribution[..] { + return true; + } + } + + false +} diff --git a/filecoin-proofs/src/bin/phase2.rs b/phase2/src/main.rs similarity index 99% rename from filecoin-proofs/src/bin/phase2.rs rename to phase2/src/main.rs index 5055c6cf3..20762593d 100644 --- a/filecoin-proofs/src/bin/phase2.rs +++ b/phase2/src/main.rs @@ -1,3 +1,5 @@ +#![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)] + use std::fmt::{self, Debug, Formatter}; use std::fs::{self, File, OpenOptions}; use std::io::{self, BufReader, BufWriter, Read, Seek, SeekFrom, Write}; @@ -13,6 +15,8 @@ use bellperson::bls::{Bls12, G1Affine, G1Uncompressed, G2Affine, G2Uncompressed} use bellperson::groth16; use byteorder::{BigEndian, ReadBytesExt}; use clap::{App, AppSettings, Arg, ArgGroup, SubCommand}; +use filecoin_phase2::small::{read_small_params_from_large_file, MPCSmall, Streamer}; +use filecoin_phase2::MPCParameters; use filecoin_proofs::constants::*; use filecoin_proofs::parameters::{ setup_params, window_post_public_params, winning_post_public_params, @@ -23,8 +27,6 @@ use filecoin_proofs::types::{ use filecoin_proofs::with_shape; use groupy::{CurveAffine, EncodedPoint}; use log::{error, info, warn}; -use phase2::small::{read_small_params_from_large_file, MPCSmall, Streamer}; -use phase2::MPCParameters; use rand::rngs::OsRng; use rand::{RngCore, SeedableRng}; use rand_chacha::ChaChaRng; @@ -313,7 +315,7 @@ fn blank_sdr_poseidon_params(sector_size: u64) -> PoRepPu }; let public_params = as CompoundProof< - StackedDrg, + StackedDrg<'_, Tree, Sha256Hasher>, _, >>::setup(&setup_params) .expect("public param setup failed"); @@ -377,7 +379,7 @@ fn create_initial_params( let start = Instant::now(); let public_params = blank_sdr_poseidon_params(sector_size.as_u64()); let circuit = as CompoundProof< - StackedDrg, + StackedDrg<'_, Tree, Sha256Hasher>, _, >>::blank_circuit(&public_params); dt_create_circuit = start.elapsed().as_secs(); @@ -390,7 +392,7 @@ fn create_initial_params( let start = Instant::now(); let public_params = blank_winning_post_poseidon_params::(sector_size.as_u64()); let circuit = as CompoundProof< - FallbackPoSt, + FallbackPoSt<'_, Tree>, FallbackPoStCircuit, >>::blank_circuit(&public_params); dt_create_circuit = start.elapsed().as_secs(); @@ -403,7 +405,7 @@ fn create_initial_params( let start = Instant::now(); let public_params = blank_window_post_poseidon_params::(sector_size.as_u64()); let circuit = as CompoundProof< - FallbackPoSt, + FallbackPoSt<'_, Tree>, FallbackPoStCircuit, >>::blank_circuit(&public_params); dt_create_circuit = start.elapsed().as_secs(); @@ -814,7 +816,7 @@ fn verify_contribution( info!("verifying contribution"); let start_verification = Instant::now(); - let calculated_contrib = phase2::small::verify_contribution_small( + let calculated_contrib = filecoin_phase2::small::verify_contribution_small( &before_params.expect("before params failure"), &after_params.expect("after params failure"), ) @@ -901,7 +903,7 @@ struct FileInfo { } impl Debug for FileInfo { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("FileInfo") .field("delta_g1_offset", &self.delta_g1_offset) .field("delta_g1", &self.delta_g1) @@ -1065,7 +1067,7 @@ fn parameter_identifier(sector_size: u64, proof let public_params = blank_sdr_poseidon_params::(sector_size); as CacheableParameters< - StackedCircuit, + StackedCircuit<'_, Tree, Sha256Hasher>, _, >>::cache_identifier(&public_params) } diff --git a/phase2/src/small.rs b/phase2/src/small.rs new file mode 100644 index 000000000..4ea40c5ba --- /dev/null +++ b/phase2/src/small.rs @@ -0,0 +1,900 @@ +use std::fmt::{self, Debug, Formatter}; +use std::fs::File; +use std::io::{self, BufReader, BufWriter, Read, Seek, SeekFrom, Write}; +use std::mem::size_of; + +use bellperson::bls::{Fr, G1Affine, G1Projective, G1Uncompressed, G2Affine, G2Uncompressed}; +use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; +use fff::{Field, PrimeField}; +use groupy::{CurveAffine, CurveProjective, EncodedPoint, Wnaf}; +use log::{error, info}; +use rand::Rng; + +use crate::{hash_to_g2, merge_pairs, same_ratio, HashWriter, PrivateKey, PublicKey}; + +#[derive(Clone)] +pub struct MPCSmall { + // The Groth16 verification-key's deltas G1 and G2. For all non-initial parameters + // `delta_g1 == contributions.last().delta_after`. + pub(crate) delta_g1: G1Affine, + pub(crate) delta_g2: G2Affine, + + // The Groth16 parameter's h and l vectors. + pub(crate) h: Vec, + pub(crate) l: Vec, + + // The MPC parameter's constraint system digest and participant public-key set. + pub(crate) cs_hash: [u8; 64], + pub(crate) contributions: Vec, +} + +pub struct Streamer<'a> { + delta_g1: G1Affine, + delta_g2: G2Affine, + h_len_offset: u64, + cs_hash: [u8; 64], + contributions: Vec, + path: &'a str, + read_raw: bool, + write_raw: bool, +} + +impl<'a> Streamer<'a> { + // Create a new `Streamer` from small params file. + pub fn new(path: &'a str, read_raw: bool, write_raw: bool) -> io::Result> { + let mut file = File::open(path)?; + + let delta_g1: G1Affine = read_g1(&mut file)?; + let delta_g2: G2Affine = read_g2(&mut file)?; + let g1_size = size_of::(); + let g2_size = size_of::(); + + let chunk_element_read_size = if read_raw { + G1Affine::raw_fmt_size() + } else { + size_of::() + }; + let h_len_offset = g1_size + g2_size; + let h_len = file.read_u32::()? as usize; + file.seek(SeekFrom::Current((h_len * chunk_element_read_size) as i64))?; + + let l_len = file.read_u32::()? as usize; + file.seek(SeekFrom::Current((l_len * chunk_element_read_size) as i64))?; + let mut cs_hash = [0u8; 64]; + file.read_exact(&mut cs_hash)?; + + let contributions_len = file.read_u32::()? as usize; + let mut contributions = Vec::::with_capacity(contributions_len); + for _ in 0..contributions_len { + contributions.push(PublicKey::read(&mut file)?); + } + + let streamer = Streamer { + delta_g1, + delta_g2, + h_len_offset: h_len_offset as u64, + cs_hash, + contributions, + path, + read_raw, + write_raw, + }; + + Ok(streamer) + } + + // Create a new `Streamer` from large params file. + pub fn new_from_large_file( + path: &'a str, + read_raw: bool, + write_raw: bool, + ) -> io::Result> { + let mut file = File::open(path)?; + + /* + `MPCParameters` are serialized in the order: + vk.alpha_g1 + vk.beta_g1 + vk.beta_g2 + vk.gamma_g2 + vk.delta_g1 + vk.delta_g2 + vk.ic length (4 bytes) + vk.ic (G1) + h length (4 bytes) + h (G1) + l length (4 bytes) + l (G1) + a length (4 bytes) + a (G1) + b_g1 length (4 bytes) + b_g1 (G1) + b_g2 length (4 bytes) + b_g2 (G2) + cs_hash (64 bytes) + contributions length (4 bytes) + contributions (544 bytes per PublicKey) + */ + + let g1_size = size_of::() as u64; // 96 bytes + let g2_size = size_of::() as u64; // 192 bytes + + let chunk_element_read_size = if read_raw { + G1Affine::raw_fmt_size() as u64 + } else { + size_of::() as u64 + }; + + // Read delta_g1, delta_g2, and ic's length. + let delta_g1_offset = g1_size + g1_size + g2_size + g2_size; // + vk.alpha_g1 + vk.beta_g1 + vk.beta_g2 + vk.gamma_g2 + file.seek(SeekFrom::Start(delta_g1_offset)).unwrap(); + let delta_g1 = read_g1(&mut file)?; + let delta_g2 = read_g2(&mut file)?; + let ic_len = file.read_u32::()? as u64; + + // Read h's length. + let h_len_offset = delta_g1_offset + g1_size + g2_size + 4 + ic_len * g1_size; // + vk.delta_g1 + vk.delta_g2 + ic length + ic + file.seek(SeekFrom::Start(h_len_offset)).unwrap(); + let h_len = file.read_u32::()? as u64; + + // Read l's length. + let l_len_offset = h_len_offset + 4 + h_len * chunk_element_read_size; // + h length + h + file.seek(SeekFrom::Start(l_len_offset)).unwrap(); + let l_len = file.read_u32::()? as u64; + + // Read a's length. + let a_len_offset = l_len_offset + 4 + l_len * chunk_element_read_size; // + l length + l + file.seek(SeekFrom::Start(a_len_offset)).unwrap(); + let a_len = file.read_u32::()? as u64; + + // Read b_g1's length. + let b_g1_len_offset = a_len_offset + 4 + a_len * g1_size; // + a length + a + file.seek(SeekFrom::Start(b_g1_len_offset)).unwrap(); + let b_g1_len = file.read_u32::()? as u64; + + // Read b_g2's length. + let b_g2_len_offset = b_g1_len_offset + 4 + b_g1_len * g1_size; // + b_g1 length + b_g1 + file.seek(SeekFrom::Start(b_g2_len_offset)).unwrap(); + let b_g2_len = file.read_u32::()? as u64; + + // Read cs_hash. + let cs_hash_offset = b_g2_len_offset + 4 + b_g2_len * g2_size; // + b_g2 length + b_g2 + file.seek(SeekFrom::Start(cs_hash_offset)).unwrap(); + let mut cs_hash = [0u8; 64]; + file.read_exact(&mut cs_hash)?; + + // Read contribution's length. + let contributions_len = file.read_u32::()? as u64; + + // Read the contributions. + let contributions_offset = cs_hash_offset + 64 + 4; // + 64-byte cs_hash + contributions length + file.seek(SeekFrom::Start(contributions_offset)).unwrap(); + let mut contributions = Vec::::with_capacity(contributions_len as usize); + for _ in 0..contributions_len { + contributions.push(PublicKey::read(&mut file)?); + } + + let streamer = Streamer { + delta_g1, + delta_g2, + h_len_offset, + cs_hash, + contributions, + path, + read_raw, + write_raw, + }; + + Ok(streamer) + } + + pub fn contribute( + &mut self, + rng: &mut RR, + out_file: File, + chunk_size: usize, + ) -> io::Result<[u8; 64]> { + let chunk_element_read_size = if self.read_raw { + G1Affine::raw_fmt_size() + } else { + size_of::() + }; + let chunk_element_write_size = if self.write_raw { + G1Affine::raw_fmt_size() + } else { + size_of::() + }; + + let read_buf_size = chunk_element_read_size * chunk_size; + let write_buf_size = chunk_element_write_size * chunk_size; + + let file = File::open(self.path)?; + let mut reader = BufReader::with_capacity(read_buf_size, file); + let mut writer = BufWriter::with_capacity(write_buf_size, out_file); + + let (pubkey, privkey) = keypair(rng, &self.cs_hash, &self.contributions, &self.delta_g1); + + self.delta_g1 = self.delta_g1.mul(privkey.delta).into_affine(); + self.delta_g2 = self.delta_g2.mul(privkey.delta).into_affine(); + + let delta_inv = privkey.delta.inverse().expect("nonzero"); + + writer.write_all(self.delta_g1.into_uncompressed().as_ref())?; + writer.write_all(self.delta_g2.into_uncompressed().as_ref())?; + + { + reader.seek(SeekFrom::Start(self.h_len_offset))?; + let h_len = reader.read_u32::()?; + writer.write_u32::(h_len)?; + + let chunks_to_read = h_len as usize; + let mut chunks_read = 0; + let mut this_chunk_size = usize::min(chunk_size, chunks_to_read - chunks_read); + + let mut h_chunk = Vec::::with_capacity(this_chunk_size); + + info!("phase2::MPCParameters::contribute() beginning streaming h"); + while this_chunk_size > 0 { + for _ in 0..this_chunk_size { + h_chunk.push(load_g1(&mut reader, self.read_raw, false)?); + } + chunks_read += this_chunk_size; + + batch_exp(&mut h_chunk, delta_inv); + + for h in &h_chunk { + dump_g1(&mut writer, h, self.write_raw)?; + } + + this_chunk_size = usize::min(chunk_size, chunks_to_read - chunks_read); + h_chunk.truncate(0); + } + info!("phase2::MPCParameters::contribute() finished streaming h"); + } + { + let l_len = reader.read_u32::()?; + writer.write_u32::(l_len)?; + + let chunks_to_read = l_len as usize; + let mut chunks_read = 0; + let mut this_chunk_size = usize::min(chunk_size, chunks_to_read - chunks_read); + + let mut l_chunk = Vec::::new(); + info!("phase2::MPCParameters::contribute() beginning streaming l"); + while this_chunk_size > 0 { + for _ in 0..this_chunk_size { + l_chunk.push(load_g1(&mut reader, self.read_raw, false)?); + } + chunks_read += this_chunk_size; + + batch_exp(&mut l_chunk, delta_inv); + + for l in &l_chunk { + dump_g1(&mut writer, l, self.write_raw)?; + } + + this_chunk_size = usize::min(chunk_size, chunks_to_read - chunks_read); + l_chunk.truncate(0); + } + info!("phase2::MPCParameters::contribute() finished streaming l"); + } + + self.contributions.push(pubkey.clone()); + + writer.write_all(&self.cs_hash)?; + + writer.write_u32::(self.contributions.len() as u32)?; + + for pubkey in &self.contributions { + pubkey.write(&mut writer)?; + } + + { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + pubkey.write(&mut sink).unwrap(); + Ok(sink.into_hash()) + } + } + + /// Read from self and write out to `writer`, respecting own `read_raw` and `write_raw` flags but without otherwise changing data. + /// Useful for converting to and from raw format. + pub fn process(&mut self, out_file: File, chunk_size: usize) -> io::Result<()> { + let chunk_element_read_size = if self.read_raw { + G1Affine::raw_fmt_size() + } else { + size_of::() + }; + let chunk_element_write_size = if self.write_raw { + G1Affine::raw_fmt_size() + } else { + size_of::() + }; + + let read_buf_size = chunk_element_read_size * chunk_size; + let write_buf_size = chunk_element_write_size * chunk_size; + + let file = File::open(self.path)?; + let mut reader = BufReader::with_capacity(read_buf_size, file); + let mut writer = BufWriter::with_capacity(write_buf_size, out_file); + + writer.write_all(self.delta_g1.into_uncompressed().as_ref())?; + writer.write_all(self.delta_g2.into_uncompressed().as_ref())?; + + reader.seek(SeekFrom::Start(self.h_len_offset))?; + { + let h_len = reader.read_u32::()?; + writer.write_u32::(h_len)?; + + let chunks_to_read = h_len as usize; + let mut chunks_read = 0; + let mut this_chunk_size = usize::min(chunk_size, chunks_to_read - chunks_read); + + let mut h_chunk = Vec::::with_capacity(this_chunk_size); + + info!("phase2::MPCParameters::convert() beginning streaming h"); + while this_chunk_size > 0 { + for _ in 0..this_chunk_size { + h_chunk.push(load_g1(&mut reader, self.read_raw, false)?); + } + chunks_read += this_chunk_size; + + for h in &h_chunk { + dump_g1(&mut writer, h, self.write_raw)?; + } + + this_chunk_size = usize::min(chunk_size, chunks_to_read - chunks_read); + h_chunk.truncate(0); + } + info!("phase2::MPCParameters::convert() finished streaming h"); + } + + { + let l_len = reader.read_u32::()?; + writer.write_u32::(l_len)?; + + let chunks_to_read = l_len as usize; + let mut chunks_read = 0; + let mut this_chunk_size = usize::min(chunk_size, chunks_to_read - chunks_read); + + let mut l_chunk = Vec::::new(); + info!("phase2::MPCParameters::convert() beginning streaming l"); + while this_chunk_size > 0 { + for _ in 0..this_chunk_size { + l_chunk.push(load_g1(&mut reader, self.read_raw, false)?); + } + chunks_read += this_chunk_size; + + for l in &l_chunk { + dump_g1(&mut writer, l, self.write_raw)?; + } + + this_chunk_size = usize::min(chunk_size, chunks_to_read - chunks_read); + l_chunk.truncate(0); + } + info!("phase2::MPCParameters::convert() finished streaming l"); + } + + writer.write_all(&self.cs_hash)?; + + writer.write_u32::(self.contributions.len() as u32)?; + + for pubkey in &self.contributions { + pubkey.write(&mut writer)?; + } + Ok(()) + } +} + +// Required by `assert_eq!()`. +impl Debug for MPCSmall { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("MPCSmall") + .field("delta_g1", &self.delta_g1) + .field("delta_g2", &self.delta_g2) + .field("h", &format!("", self.h.len())) + .field("l", &format!("", self.l.len())) + .field("cs_hash", &self.cs_hash.to_vec()) + .field( + "contributions", + &format!("", self.contributions.len()), + ) + .finish() + } +} + +impl PartialEq for MPCSmall { + fn eq(&self, other: &Self) -> bool { + self.h == other.h + && self.l == other.l + && self.delta_g1 == other.delta_g1 + && self.delta_g2 == other.delta_g2 + && self.cs_hash[..] == other.cs_hash[..] + && self.contributions == other.contributions + } +} + +impl MPCSmall { + pub fn contribute(&mut self, rng: &mut R) -> [u8; 64] { + let (pubkey, privkey) = keypair(rng, &self.cs_hash, &self.contributions, &self.delta_g1); + + self.delta_g1 = self.delta_g1.mul(privkey.delta).into_affine(); + self.delta_g2 = self.delta_g2.mul(privkey.delta).into_affine(); + + let delta_inv = privkey.delta.inverse().expect("nonzero"); + + info!("phase2::MPCParameters::contribute() batch_exp of h"); + batch_exp(&mut self.h, delta_inv); + info!("phase2::MPCParameters::contribute() finished batch_exp of h"); + + info!("phase2::MPCParameters::contribute() batch_exp of l"); + batch_exp(&mut self.l, delta_inv); + info!("phase2::MPCParameters::contribute() finished batch_exp of l"); + + self.contributions.push(pubkey.clone()); + + { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + pubkey.write(&mut sink).unwrap(); + sink.into_hash() + } + } + + /// Deserialize these parameters. + pub fn read(mut reader: R, raw: bool, check_raw: bool) -> io::Result { + let delta_g1: G1Affine = read_g1(&mut reader)?; + let delta_g2: G2Affine = read_g2(&mut reader)?; + + let h_len = reader.read_u32::()? as usize; + let mut h = Vec::::with_capacity(h_len); + for _ in 0..h_len { + h.push(load_g1(&mut reader, raw, check_raw)?); + } + + let l_len = reader.read_u32::()? as usize; + let mut l = Vec::::with_capacity(l_len); + for _ in 0..l_len { + l.push(load_g1(&mut reader, raw, check_raw)?); + } + + let mut cs_hash = [0u8; 64]; + reader.read_exact(&mut cs_hash)?; + + let contributions_len = reader.read_u32::()? as usize; + let mut contributions = Vec::::with_capacity(contributions_len); + for _ in 0..contributions_len { + contributions.push(PublicKey::read(&mut reader)?); + } + + info!( + "phase2::MPCSmall::read() read vector lengths: h={}, l={}, contributions={}", + h.len(), + l.len(), + contributions.len(), + ); + + Ok(MPCSmall { + delta_g1, + delta_g2, + h, + l, + cs_hash, + contributions, + }) + } + + pub fn write(&self, mut writer: W) -> io::Result<()> { + writer.write_all(self.delta_g1.into_uncompressed().as_ref())?; + writer.write_all(self.delta_g2.into_uncompressed().as_ref())?; + + writer.write_u32::(self.h.len() as u32)?; + for h in &*self.h { + writer.write_all(h.into_uncompressed().as_ref())?; + } + + writer.write_u32::(self.l.len() as u32)?; + for l in &*self.l { + writer.write_all(l.into_uncompressed().as_ref())?; + } + + writer.write_all(&self.cs_hash)?; + + writer.write_u32::(self.contributions.len() as u32)?; + for pubkey in &self.contributions { + pubkey.write(&mut writer)?; + } + + Ok(()) + } +} + +fn keypair( + rng: &mut R, + prev_cs_hash: &[u8; 64], + prev_contributions: &[PublicKey], + prev_delta_g1: &G1Affine, +) -> (PublicKey, PrivateKey) { + // Sample random delta + let delta: Fr = Fr::random(rng); + + // Compute delta s-pair in G1 + let s = G1Projective::random(rng).into_affine(); + let s_delta: G1Affine = s.mul(delta).into_affine(); + + // H(cs_hash | | s | s_delta) + let h = { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + + sink.write_all(&prev_cs_hash[..]).unwrap(); + for pubkey in prev_contributions { + pubkey.write(&mut sink).unwrap(); + } + sink.write_all(s.into_uncompressed().as_ref()).unwrap(); + sink.write_all(s_delta.into_uncompressed().as_ref()) + .unwrap(); + + sink.into_hash() + }; + + // This avoids making a weird assumption about the hash into the + // group. + let transcript = h; + + // Compute delta s-pair in G2 + let r: G2Affine = hash_to_g2(&h).into_affine(); + let r_delta: G2Affine = r.mul(delta).into_affine(); + + ( + PublicKey { + delta_after: prev_delta_g1.mul(delta).into_affine(), + s, + s_delta, + r_delta, + transcript, + }, + PrivateKey { delta }, + ) +} + +// Multiplies each element of `bases` by `coeff` (`coeff` is the number of times each base is added +// to itself when the curve group is written additively). +fn batch_exp(bases: &mut [G1Affine], coeff: Fr) { + let coeff = coeff.into_repr(); + + let cpus = num_cpus::get(); + let chunk_size = if bases.len() < cpus { + 1 + } else { + bases.len() / cpus + }; + + let mut products = vec![G1Projective::zero(); bases.len()]; + + // Multiply each base by `coeff`. + crossbeam::thread::scope(|scope| { + for (bases, products) in bases + .chunks_mut(chunk_size) + .zip(products.chunks_mut(chunk_size)) + { + scope.spawn(move |_| { + let mut wnaf = Wnaf::new(); + + for (base, products) in bases.iter_mut().zip(products.iter_mut()) { + *products = wnaf.base(base.into_projective(), 1).scalar(coeff); + } + // Normalize the projective products. + G1Projective::batch_normalization(products); + + bases + .iter_mut() + .zip(products.iter()) + .for_each(|(affine, projective)| { + *affine = projective.into_affine(); + }); + }); + } + }) + .unwrap(); +} + +pub fn verify_contribution_small(before: &MPCSmall, after: &MPCSmall) -> Result<[u8; 64], ()> { + // The after params must contain exactly one additonal contribution. + if before.contributions.len() + 1 != after.contributions.len() { + error!( + "phase2::verify_contribution_small() non-sequential contributions: + before.contributions.len()={}, \ + after.contributions.len()={}", + before.contributions.len(), + after.contributions.len() + ); + return Err(()); + } + + // Previous participant public keys should not change. + if before.contributions[..] != after.contributions[..after.contributions.len() - 1] { + error!("phase2::verify_contribution_small() previous public keys have changed"); + return Err(()); + } + + let before_is_initial = before.contributions.is_empty(); + let after_pubkey = after.contributions.last().unwrap(); + + // Check that the before params' `delta_g1` and `delta_after` are the same value. + if before_is_initial { + if before.delta_g1 != G1Affine::one() || before.delta_g2 != G2Affine::one() { + error!( + "phase2::verify_contribution_small() initial params do not have identity deltas" + ); + } + } else { + let before_pubkey = before.contributions.last().unwrap(); + if before.delta_g1 != before_pubkey.delta_after { + error!("phase2::verify_contribution_small() before params' delta_g1 and delta_after are not equal"); + return Err(()); + } + }; + // Check that the after params' `delta_g1` and `delta_after` are the same value. + if after.delta_g1 != after_pubkey.delta_after { + error!("phase2::verify_contribution_small() after params' delta_g1 and delta_after are not equal"); + return Err(()); + } + + // h and l will change from the contribution, but should have same length. + if before.h.len() != after.h.len() { + error!("phase2::verify_contribution_small() length of h has changed"); + return Err(()); + } + if before.l.len() != after.l.len() { + error!("phase2::verify_contribution_small() length of l has changed"); + return Err(()); + } + + // cs_hash should be the same. + if before.cs_hash[..] != after.cs_hash[..] { + error!("phase2::verify_contribution_small() cs_hash has changed"); + return Err(()); + } + + // Calculate the expected after params transcript. + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + sink.write_all(&before.cs_hash[..]).unwrap(); + for pubkey in &before.contributions { + pubkey.write(&mut sink).unwrap(); + } + sink.write_all(after_pubkey.s.into_uncompressed().as_ref()) + .unwrap(); + sink.write_all(after_pubkey.s_delta.into_uncompressed().as_ref()) + .unwrap(); + let calculated_after_transcript = sink.into_hash(); + + // Check the after params transcript against its calculated transcript. + if &after_pubkey.transcript[..] != calculated_after_transcript.as_ref() { + error!("phase2::verify_contribution_small() inconsistent transcript"); + return Err(()); + } + + let after_r = hash_to_g2(&after_pubkey.transcript[..]).into_affine(); + + // Check the signature of knowledge. Check that the participant's r and s were shifted by the + // same factor. + if !same_ratio( + (after_r, after_pubkey.r_delta), + (after_pubkey.s, after_pubkey.s_delta), + ) { + error!("phase2::verify_contribution_small() participant's r and s were shifted by different deltas"); + return Err(()); + } + + // Check that delta_g1 and r were shifted by the same factor. + if !same_ratio( + (before.delta_g1, after.delta_g1), + (after_r, after_pubkey.r_delta), + ) { + error!("phase2::verify_contribution_small() participant's delta_g1 and r where shifted by different deltas"); + return Err(()); + } + + // Check that delta_g1 and delta_g2 were shifted by the same factor. + if !same_ratio( + (G1Affine::one(), after.delta_g1), + (G2Affine::one(), after.delta_g2), + ) { + error!("phase2::verify_contribution_small() delta_g1 and delta_g2 were shifted by different deltas"); + return Err(()); + } + + // h and l queries should be updated with `delta^-1`. + if !same_ratio( + merge_pairs(&before.h, &after.h), + (after.delta_g2, before.delta_g2), // reversed for inverse + ) { + error!("phase2::verify_contribution_small() h was not updated by delta^-1"); + return Err(()); + } + if !same_ratio( + merge_pairs(&before.l, &after.l), + (after.delta_g2, before.delta_g2), // reversed for inverse + ) { + error!("phase2::verify_contribution_small() l was not updated by delta^-1"); + return Err(()); + } + + // Calculate the "after" participant's contribution hash. + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + after_pubkey.write(&mut sink).unwrap(); + Ok(sink.into_hash()) +} + +#[inline] +pub fn read_g1(mut reader: R) -> io::Result { + let mut affine_bytes = G1Uncompressed::empty(); + reader.read_exact(affine_bytes.as_mut())?; + let affine = affine_bytes + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + if affine.is_zero() { + let e = io::Error::new( + io::ErrorKind::InvalidData, + "deserialized G1Affine is point at infinity", + ); + Err(e) + } else { + Ok(affine) + } +} + +#[inline] +fn load_g1(mut reader: R, raw: bool, check_raw: bool) -> io::Result { + if raw { + if check_raw { + G1Affine::read_raw_checked(&mut reader) + } else { + G1Affine::read_raw(&mut reader) + } + } else { + read_g1(reader) + } +} + +#[inline] +fn dump_g1(mut writer: W, g1: &G1Affine, raw: bool) -> io::Result { + if raw { + g1.write_raw(&mut writer) + } else { + writer.write(g1.into_uncompressed().as_ref()) + } +} + +#[inline] +pub fn read_g2(mut reader: R) -> io::Result { + let mut affine_bytes = G2Uncompressed::empty(); + reader.read_exact(affine_bytes.as_mut())?; + let affine = affine_bytes + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + if affine.is_zero() { + Err(io::Error::new( + io::ErrorKind::InvalidData, + "deserialized G2Affine is point at infinity", + )) + } else { + Ok(affine) + } +} + +pub fn read_small_params_from_large_file(large_path: &str) -> io::Result { + /* + `MPCParameters` are serialized in the order: + vk.alpha_g1 + vk.beta_g1 + vk.beta_g2 + vk.gamma_g2 + vk.delta_g1 + vk.delta_g2 + vk.ic length (4 bytes) + vk.ic (G1) + h length (4 bytes) + h (G1) + l length (4 bytes) + l (G1) + a length (4 bytes) + a (G1) + b_g1 length (4 bytes) + b_g1 (G1) + b_g2 length (4 bytes) + b_g2 (G2) + cs_hash (64 bytes) + contributions length (4 bytes) + contributions (544 bytes per PublicKey) + */ + + let g1_size = size_of::() as u64; // 96 bytes + let g2_size = size_of::() as u64; // 192 bytes + + let mut file = File::open(large_path)?; + + // Read delta_g1, delta_g2, and ic's length. + let delta_g1_offset = g1_size + g1_size + g2_size + g2_size; // + vk.alpha_g1 + vk.beta_g1 + vk.beta_g2 + vk.gamma_g2 + file.seek(SeekFrom::Start(delta_g1_offset)).unwrap(); + let delta_g1 = read_g1(&mut file)?; + let delta_g2 = read_g2(&mut file)?; + let ic_len = file.read_u32::()? as u64; + + // Read h's length. + let h_len_offset = delta_g1_offset + g1_size + g2_size + 4 + ic_len * g1_size; // + vk.delta_g1 + vk.delta_g2 + ic length + ic + file.seek(SeekFrom::Start(h_len_offset)).unwrap(); + let h_len = file.read_u32::()? as u64; + + // Read l's length. + let l_len_offset = h_len_offset + 4 + h_len * g1_size; // + h length + h + file.seek(SeekFrom::Start(l_len_offset)).unwrap(); + let l_len = file.read_u32::()? as u64; + + // Read a's length. + let a_len_offset = l_len_offset + 4 + l_len * g1_size; // + l length + l + file.seek(SeekFrom::Start(a_len_offset)).unwrap(); + let a_len = file.read_u32::()? as u64; + + // Read b_g1's length. + let b_g1_len_offset = a_len_offset + 4 + a_len * g1_size; // + a length + a + file.seek(SeekFrom::Start(b_g1_len_offset)).unwrap(); + let b_g1_len = file.read_u32::()? as u64; + + // Read b_g2's length. + let b_g2_len_offset = b_g1_len_offset + 4 + b_g1_len * g1_size; // + b_g1 length + b_g1 + file.seek(SeekFrom::Start(b_g2_len_offset)).unwrap(); + let b_g2_len = file.read_u32::()? as u64; + + // Read cs_hash. + let cs_hash_offset = b_g2_len_offset + 4 + b_g2_len * g2_size; // + b_g2 length + b_g2 + file.seek(SeekFrom::Start(cs_hash_offset)).unwrap(); + let mut cs_hash = [0u8; 64]; + file.read_exact(&mut cs_hash)?; + + // Read contribution's length. + let contributions_len = file.read_u32::()? as u64; + + drop(file); + + // Read the (potentially large) h, l, and contributions arrays using buffered io. + let file = File::open(large_path)?; + let mut reader = BufReader::with_capacity(1024 * 1024, file); + + // Read h. + let h_offset = h_len_offset + 4; // + h length + reader.seek(SeekFrom::Start(h_offset)).unwrap(); + let mut h = Vec::::with_capacity(h_len as usize); + for _ in 0..h_len { + h.push(read_g1(&mut reader)?); + } + + // Read l. Skip l's length because it was already read. + let _ = reader.read_u32::()? as u64; + let mut l = Vec::::with_capacity(l_len as usize); + for _ in 0..l_len { + l.push(read_g1(&mut reader)?); + } + + // Read the contributions. + let contributions_offset = cs_hash_offset + 64 + 4; // + 64-byte cs_hash + contributions length + reader.seek(SeekFrom::Start(contributions_offset)).unwrap(); + let mut contributions = Vec::::with_capacity(contributions_len as usize); + for _ in 0..contributions_len { + contributions.push(PublicKey::read(&mut reader)?); + } + + Ok(MPCSmall { + delta_g1, + delta_g2, + h, + l, + cs_hash, + contributions, + }) +} diff --git a/phase2/tests/large.rs b/phase2/tests/large.rs new file mode 100644 index 000000000..d04760c25 --- /dev/null +++ b/phase2/tests/large.rs @@ -0,0 +1,75 @@ +mod mimc; + +use std::path::Path; + +use bellperson::bls::{Bls12, Fr}; +use bellperson::groth16::{create_random_proof, prepare_verifying_key, verify_proof}; +use fff::Field; +use filecoin_phase2::{contains_contribution, verify_contribution, MPCParameters}; +use rand::thread_rng; + +use mimc::{mimc as mimc_hash, MiMCDemo, MIMC_ROUNDS}; + +// This test is marked as ignore because we haven't checked-in the phase1 file required for this +// test to pass when run via CI. To run this test you must have the correct phase1 params file in +// the top level directory of this crate. +#[test] +#[ignore] +fn test_large_params() { + assert!( + Path::new("./phase1radix2m10").exists(), + "the phase1 file `phase1radix2m10` must be in the crate's top level directory" + ); + + let rng = &mut thread_rng(); + + let constants = (0..MIMC_ROUNDS) + .map(|_| Fr::random(rng)) + .collect::>(); + + let circuit = MiMCDemo:: { + xl: None, + xr: None, + constants: &constants, + }; + + let mut params = MPCParameters::new(circuit).unwrap(); + let old_params = params.copy(); + params.contribute(rng); + + let first_contrib = verify_contribution(&old_params, ¶ms).expect("should verify"); + + let old_params = params.copy(); + params.contribute(rng); + + let second_contrib = verify_contribution(&old_params, ¶ms).expect("should verify"); + + let all_contributions = params + .verify(MiMCDemo:: { + xl: None, + xr: None, + constants: &constants, + }) + .unwrap(); + + assert!(contains_contribution(&all_contributions, &first_contrib)); + assert!(contains_contribution(&all_contributions, &second_contrib)); + + // Create a Groth16 proof using the generated parameters and verfy that the proof is valid. + let groth_params = params.get_params(); + + // Generate a random preimage and compute the image. + let xl = Fr::random(rng); + let xr = Fr::random(rng); + let image = mimc_hash::(xl, xr, &constants); + + let circuit = MiMCDemo { + xl: Some(xl), + xr: Some(xr), + constants: &constants, + }; + let proof = create_random_proof(circuit, groth_params, rng).unwrap(); + + let pvk = prepare_verifying_key(&groth_params.vk); + assert!(verify_proof(&pvk, &proof, &[image]).unwrap()); +} diff --git a/phase2/tests/mimc/mod.rs b/phase2/tests/mimc/mod.rs new file mode 100644 index 000000000..913ff2866 --- /dev/null +++ b/phase2/tests/mimc/mod.rs @@ -0,0 +1,112 @@ +use bellperson::bls::Engine; +use bellperson::{Circuit, ConstraintSystem, SynthesisError}; +use fff::Field; + +pub const MIMC_ROUNDS: usize = 322; + +pub fn mimc(mut xl: E::Fr, mut xr: E::Fr, constants: &[E::Fr]) -> E::Fr { + assert_eq!(constants.len(), MIMC_ROUNDS); + + for i in 0..MIMC_ROUNDS { + let mut tmp1 = xl; + tmp1.add_assign(&constants[i]); + let mut tmp2 = tmp1; + tmp2.square(); + tmp2.mul_assign(&tmp1); + tmp2.add_assign(&xr); + xr = xl; + xl = tmp2; + } + + xl +} + +pub struct MiMCDemo<'a, E: Engine> { + pub xl: Option, + pub xr: Option, + pub constants: &'a [E::Fr], +} + +impl<'a, E: Engine> Circuit for MiMCDemo<'a, E> { + fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { + assert_eq!(self.constants.len(), MIMC_ROUNDS); + + // Allocate the first component of the preimage. + let mut xl_value = self.xl; + let mut xl = cs.alloc( + || "preimage xl", + || xl_value.ok_or(SynthesisError::AssignmentMissing), + )?; + + // Allocate the second component of the preimage. + let mut xr_value = self.xr; + let mut xr = cs.alloc( + || "preimage xr", + || xr_value.ok_or(SynthesisError::AssignmentMissing), + )?; + + for i in 0..MIMC_ROUNDS { + // xL, xR := xR + (xL + Ci)^3, xL + let cs = &mut cs.namespace(|| format!("round {}", i)); + + // tmp = (xL + Ci)^2 + let tmp_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.square(); + e + }); + let tmp = cs.alloc( + || "tmp", + || tmp_value.ok_or(SynthesisError::AssignmentMissing), + )?; + + cs.enforce( + || "tmp = (xL + Ci)^2", + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + tmp, + ); + + // new_xL = xR + (xL + Ci)^3 + // new_xL = xR + tmp * (xL + Ci) + // new_xL - xR = tmp * (xL + Ci) + let new_xl_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.mul_assign(&tmp_value.unwrap()); + e.add_assign(&xr_value.unwrap()); + e + }); + + let new_xl = if i == (MIMC_ROUNDS - 1) { + // This is the last round, xL is our image and so + // we allocate a public input. + cs.alloc_input( + || "image", + || new_xl_value.ok_or(SynthesisError::AssignmentMissing), + )? + } else { + cs.alloc( + || "new_xl", + || new_xl_value.ok_or(SynthesisError::AssignmentMissing), + )? + }; + + cs.enforce( + || "new_xL = xR + (xL + Ci)^3", + |lc| lc + tmp, + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + new_xl - xr, + ); + + // xR = xL + xr = xl; + xr_value = xl_value; + + // xL = new_xL + xl = new_xl; + xl_value = new_xl_value; + } + + Ok(()) + } +} diff --git a/phase2/tests/small.rs b/phase2/tests/small.rs new file mode 100644 index 000000000..0907da3f4 --- /dev/null +++ b/phase2/tests/small.rs @@ -0,0 +1,223 @@ +mod mimc; + +use std::fs::{remove_file, File}; +use std::io::{BufReader, BufWriter}; +use std::path::Path; + +use bellperson::bls::{Bls12, Fr}; +use bellperson::groth16::{create_random_proof, prepare_verifying_key, verify_proof}; +use fff::Field; +use filecoin_phase2::small::{ + read_small_params_from_large_file, verify_contribution_small, MPCSmall, +}; +use filecoin_phase2::{verify_contribution, MPCParameters}; +use rand::{thread_rng, SeedableRng}; +use rand_chacha::ChaChaRng; + +use mimc::{mimc as mimc_hash, MiMCDemo, MIMC_ROUNDS}; + +// This test is marked as ignore because we haven't checked-in the phase1 file required for this +// test to pass when run via CI. To run this test you must have the correct phase1 params file in +// the top level directory of this crate. +#[test] +#[ignore] +fn test_mimc_small_params() { + assert!( + Path::new("./phase1radix2m10").exists(), + "the phase1 file `phase1radix2m10` must be in the crate's top level directory" + ); + + let constants = (0..MIMC_ROUNDS) + .map(|_| Fr::random(&mut thread_rng())) + .collect::>(); + + let circuit = MiMCDemo:: { + xl: None, + xr: None, + constants: &constants, + }; + + let mut rng_large = ChaChaRng::from_seed([0u8; 32]); + let mut rng_small = ChaChaRng::from_seed([0u8; 32]); + + // Create the initial params. + let initial_large = MPCParameters::new(circuit).unwrap(); + let initial_small = initial_large.copy_small(); + + let mut large_added = initial_large.copy(); + + // Make the first contributions. + let (first_large, first_large_contrib) = { + let mut params = initial_large.copy(); + let contrib = params.contribute(&mut rng_large); + (params, contrib) + }; + let (first_small, first_small_contrib) = { + let mut params = initial_small.clone(); + let contrib = params.contribute(&mut rng_small); + (params, contrib) + }; + + // Verify the first contributions. + assert_eq!( + &first_small_contrib[..], + &first_large_contrib[..], + "first small and large contributions are not equal" + ); + + let verified_large = verify_contribution(&initial_large, &first_large) + .expect("first large verify_contribution() failed"); + assert_eq!( + &first_large_contrib[..], + &verified_large[..], + "first large contribution does not match verified contribution" + ); + + let verified_small = verify_contribution_small(&initial_small, &first_small) + .expect("first small verify_contribution_small() failed"); + assert_eq!( + &first_small_contrib[..], + &verified_small[..], + "first small contribution does not match verified contribution" + ); + + // Verify that the first large and small params are consistent. + assert!(first_large.has_last_contrib(&first_small)); + large_added.add_contrib(first_small.clone()); + assert_eq!(large_added, first_large); + + // Make the second contributions. + let (second_large, second_large_contrib) = { + let mut params = first_large.copy(); + let contrib = params.contribute(&mut rng_large); + (params, contrib) + }; + let (second_small, second_small_contrib) = { + let mut params = first_small.clone(); + let contrib = params.contribute(&mut rng_small); + (params, contrib) + }; + + // Verify the second contributions. + assert_eq!( + &second_small_contrib[..], + &second_large_contrib[..], + "second small and large contributions are not equal" + ); + + let verified_large = verify_contribution(&first_large, &second_large) + .expect("second large verify_contribution() failed"); + assert_eq!( + &second_large_contrib[..], + &verified_large[..], + "second large contribution does not match verified contribution" + ); + + let verified_small = verify_contribution_small(&first_small, &second_small) + .expect("second small verify_contribution_small() failed"); + assert_eq!( + &second_small_contrib[..], + &verified_small[..], + "second small contribution does not match verified contribution" + ); + + // Verify that the second large and small params are consistent. + assert!(second_large.has_last_contrib(&second_small)); + large_added.add_contrib(second_small.clone()); + assert_eq!(large_added, second_large); + + // Verify large params against circuit. + let all_contributions = large_added + .verify(MiMCDemo:: { + xl: None, + xr: None, + constants: &constants, + }) + .unwrap(); + assert_eq!(all_contributions.len(), 2); + assert_eq!(&all_contributions[0][..], &first_large_contrib[..]); + assert_eq!(&all_contributions[1][..], &second_large_contrib[..]); + + // Verify the generated params against the circuit. + let groth_params = large_added.get_params(); + let pvk = prepare_verifying_key(&groth_params.vk); + // Generate a random preimage and compute the image. + let xl = Fr::random(&mut thread_rng()); + let xr = Fr::random(&mut thread_rng()); + let image = mimc_hash::(xl, xr, &constants); + // Create an instance of the circuit (with the witness). + let circuit = MiMCDemo { + xl: Some(xl), + xr: Some(xr), + constants: &constants, + }; + // Create a groth16 proof using the generated parameters. + let proof = create_random_proof(circuit, groth_params, &mut thread_rng()) + .expect("failed to create Groth16 proof using MPC params"); + assert!(verify_proof(&pvk, &proof, &[image]).unwrap()); +} + +// This test is marked as ignore because we haven't checked-in the phase1 file required for this +// test to pass when run via CI. To run this test you must have the correct phase1 params file in +// the top level directory of this crate. +#[test] +#[ignore] +fn test_small_file_io() { + const LARGE_PATH: &str = "./tests/large_params"; + const SMALL_PATH: &str = "./tests/small_params"; + + struct TestCleanup; + + impl Drop for TestCleanup { + fn drop(&mut self) { + remove_file(LARGE_PATH).unwrap(); + remove_file(SMALL_PATH).unwrap(); + } + } + + let _cleanup = TestCleanup; + + assert!( + Path::new("./phase1radix2m10").exists(), + "the phase1 file `phase1radix2m10` must be in the crate's top level directory" + ); + + let constants = (0..MIMC_ROUNDS) + .map(|_| Fr::random(&mut thread_rng())) + .collect::>(); + + let circuit = MiMCDemo:: { + xl: None, + xr: None, + constants: &constants, + }; + + let large_params = MPCParameters::new(circuit).unwrap(); + let small_params = large_params.copy_small(); + + { + let file = File::create(LARGE_PATH).unwrap(); + let mut writer = BufWriter::with_capacity(1024 * 1024, file); + large_params.write(&mut writer).unwrap(); + } + { + let file = File::create(SMALL_PATH).unwrap(); + let mut writer = BufWriter::with_capacity(1024 * 1024, file); + small_params.write(&mut writer).unwrap(); + } + + // Test small param deserialisation. + { + let file = File::open(SMALL_PATH).unwrap(); + let mut reader = BufReader::with_capacity(1024 * 1024, file); + let small_read = MPCSmall::read(&mut reader, false, false).unwrap(); + assert_eq!(small_read, small_params); + assert!(large_params.has_last_contrib(&small_read)); + }; + + // Test `read_small_params_from_large_file()`. + { + let small_read = read_small_params_from_large_file(LARGE_PATH).unwrap(); + assert_eq!(small_read, small_params); + } +}