diff --git a/src/liblelantus/lelantus_primitives.cpp b/src/liblelantus/lelantus_primitives.cpp index 5221833fe4..77d31955dc 100644 --- a/src/liblelantus/lelantus_primitives.cpp +++ b/src/liblelantus/lelantus_primitives.cpp @@ -84,9 +84,9 @@ GroupElement LelantusPrimitives::double_commit( } void LelantusPrimitives::convert_to_sigma( - uint64_t num, - uint64_t n, - uint64_t m, + std::size_t num, + std::size_t n, + std::size_t m, std::vector& out) { out.reserve(n * m); Scalar one(uint64_t(1)); @@ -105,11 +105,11 @@ void LelantusPrimitives::convert_to_sigma( } } -std::vector LelantusPrimitives::convert_to_nal( - uint64_t num, - uint64_t n, - uint64_t m) { - std::vector result; +std::vector LelantusPrimitives::convert_to_nal( + std::size_t num, + std::size_t n, + std::size_t m) { + std::vector result; result.reserve(m); while (num != 0) { diff --git a/src/liblelantus/lelantus_primitives.h b/src/liblelantus/lelantus_primitives.h index a570246d35..7f28c34d0d 100644 --- a/src/liblelantus/lelantus_primitives.h +++ b/src/liblelantus/lelantus_primitives.h @@ -66,9 +66,9 @@ class LelantusPrimitives { const Scalar& r, GroupElement& result_out); - static void convert_to_sigma(uint64_t num, uint64_t n, uint64_t m, std::vector& out); + static void convert_to_sigma(std::size_t num, std::size_t n, std::size_t m, std::vector& out); - static std::vector convert_to_nal(uint64_t num, uint64_t n, uint64_t m); + static std::vector convert_to_nal(std::size_t num, std::size_t n, std::size_t m); static void generate_Lelantus_challenge( const std::vector& proofs, diff --git a/src/liblelantus/sigmaextended_prover.cpp b/src/liblelantus/sigmaextended_prover.cpp index 9b5b9e32aa..0c8db5b86f 100644 --- a/src/liblelantus/sigmaextended_prover.cpp +++ b/src/liblelantus/sigmaextended_prover.cpp @@ -5,17 +5,19 @@ namespace lelantus { SigmaExtendedProver::SigmaExtendedProver( const GroupElement& g, const std::vector& h_gens, - uint64_t n, - uint64_t m) + std::size_t n, + std::size_t m) : g_(g) , h_(h_gens) , n_(n) , m_(m) { } +// Generate the initial portion of a one-of-many proof +// Internal prover state is maintained separately, in order to facilitate the last part of the proof later void SigmaExtendedProver::sigma_commit( const std::vector& commits, - int l, + std::size_t l, const Scalar& rA, const Scalar& rB, const Scalar& rC, @@ -26,8 +28,25 @@ void SigmaExtendedProver::sigma_commit( std::vector& Yk, std::vector& sigma, SigmaExtendedProof& proof_out) { + // Sanity checks + if (n_ < 2 || m_ < 2) { + throw std::invalid_argument("Prover parameters are invalid"); + } + std::size_t N = (std::size_t)pow(n_, m_); std::size_t setSize = commits.size(); - assert(setSize > 0); + if (setSize == 0) { + throw std::invalid_argument("Cannot have empty commitment set"); + } + if (setSize > N) { + throw std::invalid_argument("Commitment set is too large"); + } + if (l >= setSize) { + throw std::invalid_argument("Signing index is out of range"); + } + if (h_.size() != n_ * m_) { + throw std::invalid_argument("Generator vector size is invalid"); + } + LelantusPrimitives::convert_to_sigma(l, n_, m_, sigma); for (std::size_t k = 0; k < m_; ++k) { @@ -70,13 +89,12 @@ void SigmaExtendedProver::sigma_commit( } LelantusPrimitives::commit(g_,h_, d, rD, proof_out.D_); - std::size_t N = setSize; std::vector> P_i_k; - P_i_k.resize(N); - for (std::size_t i = 0; i < N - 1; ++i) + P_i_k.resize(setSize); + for (std::size_t i = 0; i < setSize - 1; ++i) { std::vector& coefficients = P_i_k[i]; - std::vector I = LelantusPrimitives::convert_to_nal(i, n_, m_); + std::vector I = LelantusPrimitives::convert_to_nal(i, n_, m_); coefficients.push_back(a[I[0]]); coefficients.push_back(sigma[I[0]]); for (std::size_t j = 1; j < m_; ++j) { @@ -102,17 +120,17 @@ void SigmaExtendedProver::sigma_commit( * \right] */ - std::vector I = LelantusPrimitives::convert_to_nal(N-1, n_, m_); - std::vector lj = LelantusPrimitives::convert_to_nal(l, n_, m_); + std::vector I = LelantusPrimitives::convert_to_nal(setSize - 1, n_, m_); + std::vector lj = LelantusPrimitives::convert_to_nal(l, n_, m_); std::vector p_i_sum; p_i_sum.emplace_back(one); std::vector> partial_p_s; // Pre-calculate product parts and calculate p_s(x) at the same time, put the latter into p_i_sum - for (int j = m_ - 1; j >= 0; j--) { + for (std::ptrdiff_t j = m_ - 1; j >= 0; j--) { partial_p_s.push_back(p_i_sum); - LelantusPrimitives::new_factor(sigma[j * n_ + I[j]], a[j * n_ + I[j]], p_i_sum); + LelantusPrimitives::new_factor(sigma[j*n_ + I[j]], a[j*n_ + I[j]], p_i_sum); } for (std::size_t j = 0; j < m_; j++) { @@ -131,15 +149,15 @@ void SigmaExtendedProver::sigma_commit( p_i_sum[j + k] += polynomial[k]; } - P_i_k[N-1] = p_i_sum; + P_i_k[setSize - 1] = p_i_sum; proof_out.Gk_.reserve(m_); proof_out.Qk.reserve(m_); for (std::size_t k = 0; k < m_; ++k) { std::vector P_i; - P_i.reserve(N); - for (std::size_t i = 0; i < N; ++i){ + P_i.reserve(setSize); + for (std::size_t i = 0; i < setSize; ++i){ P_i.emplace_back(P_i_k[i][k]); } secp_primitives::MultiExponent mult(commits, P_i); @@ -150,6 +168,7 @@ void SigmaExtendedProver::sigma_commit( } } +// Generate the rest of the one-of-many proof void SigmaExtendedProver::sigma_response( const std::vector& sigma, const std::vector& a, diff --git a/src/liblelantus/sigmaextended_prover.h b/src/liblelantus/sigmaextended_prover.h index e923d7778c..fe2e79c914 100644 --- a/src/liblelantus/sigmaextended_prover.h +++ b/src/liblelantus/sigmaextended_prover.h @@ -9,11 +9,11 @@ class SigmaExtendedProver{ public: SigmaExtendedProver(const GroupElement& g, - const std::vector& h_gens, uint64_t n, uint64_t m); + const std::vector& h_gens, std::size_t n, std::size_t m); void sigma_commit( const std::vector& commits, - int l, + std::size_t l, const Scalar& rA, const Scalar& rB, const Scalar& rC, @@ -43,8 +43,8 @@ class SigmaExtendedProver{ private: GroupElement g_; std::vector h_; - uint64_t n_; - uint64_t m_; + std::size_t n_; + std::size_t m_; }; }//namespace lelantus diff --git a/src/liblelantus/sigmaextended_verifier.cpp b/src/liblelantus/sigmaextended_verifier.cpp index bda112101c..46dcceeb08 100644 --- a/src/liblelantus/sigmaextended_verifier.cpp +++ b/src/liblelantus/sigmaextended_verifier.cpp @@ -6,257 +6,311 @@ namespace lelantus { SigmaExtendedVerifier::SigmaExtendedVerifier( const GroupElement& g, const std::vector& h_gens, - uint64_t n, - uint64_t m) + std::size_t n, + std::size_t m) : g_(g) , h_(h_gens) , n(n) , m(m){ } +// Verify a single one-of-many proof +// In this case, there is an implied input set size +bool SigmaExtendedVerifier::singleverify( + const std::vector& commits, + const Scalar& x, + const Scalar& serial, + const SigmaExtendedProof& proof) const { + std::vector challenges = { x }; + std::vector serials = { serial }; + std::vector setSizes = { }; + std::vector proofs = { proof }; + + return verify( + commits, + challenges, + serials, + setSizes, + true, + false, + proofs + ); +} + +// Verify a single one-of-many proof +// In this case, there is a specified set size +bool SigmaExtendedVerifier::singleverify( + const std::vector& commits, + const Scalar& x, + const Scalar& serial, + const std::size_t setSize, + const SigmaExtendedProof& proof) const { + std::vector challenges = { x }; + std::vector serials = { serial }; + std::vector setSizes = { setSize }; + std::vector proofs = { proof }; + + return verify( + commits, + challenges, + serials, + setSizes, + true, + true, + proofs + ); +} + +// Verify a batch of one-of-many proofs from the same transaction +// In this case, there is a single common challenge and implied input set size bool SigmaExtendedVerifier::batchverify( const std::vector& commits, const Scalar& x, const std::vector& serials, const std::vector& proofs) const { - int M = proofs.size(); - int N = commits.size(); + std::vector challenges = { x }; + std::vector setSizes = { }; + + return verify( + commits, + challenges, + serials, + setSizes, + true, + false, + proofs + ); +} - if (commits.empty()) { - LogPrintf("Sigma verification failed due to commits are empty."); +// Verify a general batch of one-of-many proofs +// In this case, each proof has a separate challenge and specified set size +bool SigmaExtendedVerifier::batchverify( + const std::vector& commits, + const std::vector& challenges, + const std::vector& serials, + const std::vector& setSizes, + const std::vector& proofs) const { + + return verify( + commits, + challenges, + serials, + setSizes, + false, + true, + proofs + ); +} + +// Verify a batch of one-of-many proofs +bool SigmaExtendedVerifier::verify( + const std::vector& commits, + const std::vector& challenges, + const std::vector& serials, + const std::vector& setSizes, + const bool commonChallenge, + const bool specifiedSetSizes, + const std::vector& proofs) const { + // Sanity checks + if (n < 2 || m < 2) { + LogPrintf("Verifier parameters are invalid"); return false; } + std::size_t M = proofs.size(); + std::size_t N = (std::size_t)pow(n, m); - for(int t = 0; t < M; ++t) { - if(!membership_checks(proofs[t])) { - LogPrintf("Sigma verification failed due to membership checks failed."); - return false; - } + if (commits.size() == 0) { + LogPrintf("Cannot have empty commitment set"); + return false; } - - std::vector> f_; - f_.resize(M); - for (int t = 0; t < M; ++t) - { - if(!compute_fs(proofs[t], x, f_[t]) || !abcd_checks(proofs[t], x, f_[t])) { - LogPrintf("Sigma verification failed due to f computations or abcd checks failed."); - return false; - } + if (commits.size() > N) { + LogPrintf("Commitment set is too large"); + return false; + } + if (h_.size() != n * m) { + LogPrintf("Generator vector size is invalid"); + return false; + } + if (serials.size() != M) { + LogPrintf("Invalid number of serials provided"); + return false; } - std::vector y; - y.resize(M); - for (int t = 0; t < M; ++t) - y[t].randomize(); - - std::vector f_i_t; - f_i_t.resize(N); - GroupElement right; - Scalar exp; - - std::vector > I_; - I_.resize(N); - for (int i = 0; i < N ; ++i) - I_[i] = LelantusPrimitives::convert_to_nal(i, n, m); - - for (int t = 0; t < M; ++t) - { - right += (LelantusPrimitives::double_commit(g_, Scalar(uint64_t(0)), h_[1], proofs[t].zV_, h_[0], proofs[t].zR_)) * y[t]; - Scalar e; - - Scalar f_i(uint64_t(1)); - std::vector::iterator ptr = f_i_t.begin(); - compute_batch_fis(f_i, m, f_[t], y[t], e, ptr, ptr, ptr + N - 1); - /* - * Optimization for getting power for last 'commits' array element is done similarly to the one used in creating - * a proof. The fact that sum of any row in 'f' array is 'x' (challenge value) is used. - * - * Math (in TeX notation): - * - * \sum_{i=s+1}^{N-1} \prod_{j=0}^{m-1}f_{j,i_j} = - * \sum_{j=0}^{m-1} - * \left[ - * \left( \sum_{i=s_j+1}^{n-1}f_{j,i} \right) - * \left( \prod_{k=j}^{m-1}f_{k,s_k} \right) - * x^j - * \right] - */ + // For separate challenges, we must have enough + if (!commonChallenge && challenges.size() != M) { + LogPrintf("Invalid challenge vector size"); + return false; + } - Scalar pow(uint64_t(1)); - std::vector f_part_product; // partial product of f array elements for lastIndex - for (int j = m - 1; j >= 0; j--) { - f_part_product.push_back(pow); - pow *= f_[t][j * n + I_[N - 1][j]]; - } + // If we have specified set sizes, we must have enough + if (specifiedSetSizes && setSizes.size() != M) { + LogPrintf("Invalid set size vector size"); + return false; + } - NthPower xj(x); - for (std::size_t j = 0; j < m; j++) { - Scalar fi_sum(uint64_t(0)); - for (std::size_t i = I_[N - 1][j] + 1; i < n; i++) - fi_sum += f_[t][j*n + i]; - pow += fi_sum * xj.pow * f_part_product[m - j - 1]; - xj.go_next(); + // All proof elements must be valid + for (std::size_t t = 0; t < M; ++t) { + if (!membership_checks(proofs[t])) { + LogPrintf("Sigma verification failed due to membership checks failed."); + return false; } - - f_i_t[N - 1] += pow * y[t]; - e += pow; - - e *= serials[t] * y[t]; - exp += e; } - secp_primitives::MultiExponent mult(commits, f_i_t); - GroupElement t1 = mult.get_multiple(); - - std::vector x_k_neg; - x_k_neg.reserve(m); - NthPower x_k(x); - for (uint64_t k = 0; k < m; ++k) { - x_k_neg.emplace_back(x_k.pow.negate()); - x_k.go_next(); + // Final batch multiscalar multiplication + Scalar g_scalar = Scalar(uint64_t(0)); // associated to g_ + Scalar h1_scalar = Scalar(uint64_t(0)); // associated to h1 + Scalar h2_scalar = Scalar(uint64_t(0)); // associated to h2 + std::vector h_scalars; // associated to h_ + std::vector commit_scalars; // associated to commitment list + h_scalars.reserve(n * m); + h_scalars.resize(n * m); + for (std::size_t i = 0; i < n * m; i++) { + h_scalars[i] = Scalar(uint64_t(0)); } - - GroupElement t2; - for (int t = 0; t < M; ++t) { - const std::vector & Gk = proofs[t].Gk_; - const std::vector & Qk = proofs[t].Qk; - GroupElement term; - for (std::size_t k = 0; k < m; ++k) - { - term += ((Gk[k] + Qk[k]) * x_k_neg[k]); - } - term *= y[t]; - t2 += term; + commit_scalars.reserve(commits.size()); + commit_scalars.resize(commits.size()); + for (size_t i = 0; i < commits.size(); i++) { + commit_scalars[i] = Scalar(uint64_t(0)); } - GroupElement left(t1 + t2); - right += g_ * exp; - if(left != right) { - LogPrintf("Sigma verification failed due to last check failed."); - return false; + // Set up the final batch elements + std::vector points; + std::vector scalars; + std::size_t final_size = 3 + m * n + commits.size(); // g, h1, h2, (h_), (commits) + for (std::size_t t = 0; t < M; t++) { + final_size += 4 + proofs[t].Gk_.size() + proofs[t].Qk.size(); // A, B, C, D, (G), (Q) + } + points.reserve(final_size); + scalars.reserve(final_size); + + // Index decomposition, which is common among all proofs + std::vector > I_; + I_.reserve(commits.size()); + I_.resize(commits.size()); + for (std::size_t i = 0; i < commits.size(); i++) { + I_[i] = LelantusPrimitives::convert_to_nal(i, n, m); } - return true; -} + // Process all proofs + for (std::size_t t = 0; t < M; t++) { + SigmaExtendedProof proof = proofs[t]; -bool SigmaExtendedVerifier::batchverify( - const std::vector& commits, - const std::vector& challenges, - const std::vector& serials, - const std::vector& setSizes, - const std::vector& proofs) const { - int M = proofs.size(); - int N = commits.size(); + // The challenge depends on whether or not we're in common mode + Scalar x; + if (commonChallenge) { + x = challenges[0]; + } + else { + x = challenges[t]; + } - if (commits.empty()) - return false; + // Generate random verifier weights + Scalar w1, w2, w3; + w1.randomize(); + w2.randomize(); + w3.randomize(); - for(int t = 0; t < M; ++t) - if(!membership_checks(proofs[t])) - return false; - std::vector> f_; - f_.resize(M); - for (int t = 0; t < M; ++t) - { - if(!compute_fs(proofs[t], challenges[t], f_[t]) || !abcd_checks(proofs[t], challenges[t], f_[t])) + // Reconstruct f-matrix + std::vector f_; + if (!compute_fs(proof, x, f_)) { + LogPrintf("Invalid matrix reconstruction"); return false; - } - - std::vector y; - y.resize(M); - for (int t = 0; t < M; ++t) - y[t].randomize(); + } - std::vector f_i_t; - f_i_t.resize(N); - GroupElement right; - Scalar exp; + // Effective set size + std::size_t setSize; + if (!specifiedSetSizes) { + setSize = commits.size(); + } + else { + setSize = setSizes[t]; + } - std::vector > I_; - I_.resize(N); - for (int i = 0; i < N ; ++i) - I_[i] = LelantusPrimitives::convert_to_nal(i, n, m); + // A, B, C, D (and associated commitments) + points.emplace_back(proof.A_); + scalars.emplace_back(w1.negate()); + points.emplace_back(proof.B_); + scalars.emplace_back(x.negate() * w1); + points.emplace_back(proof.C_); + scalars.emplace_back(x.negate() * w2); + points.emplace_back(proof.D_); + scalars.emplace_back(w2.negate()); + + g_scalar += proof.ZA_ * w1 + proof.ZC_ * w2; + for (std::size_t i = 0; i < m * n; i++) { + h_scalars[i] += f_[i] * (w1 + (x - f_[i]) * w2); + } - for (int t = 0; t < M; ++t) - { - right += (LelantusPrimitives::double_commit(g_, Scalar(uint64_t(0)), h_[1], proofs[t].zV_, h_[0], proofs[t].zR_)) * y[t]; - Scalar e; - size_t size = setSizes[t]; - size_t start = N - size; + // Input sets + h1_scalar += proof.zV_ * w3.negate(); + h2_scalar += proof.zR_ * w3.negate(); Scalar f_i(uint64_t(1)); - std::vector::iterator ptr = f_i_t.begin() + start; - compute_batch_fis(f_i, m, f_[t], y[t], e, ptr, ptr, ptr + size - 1); - - /* - * Optimization for getting power for last 'commits' array element is done similarly to the one used in creating - * a proof. The fact that sum of any row in 'f' array is 'x' (challenge value) is used. - * - * Math (in TeX notation): - * - * \sum_{i=s+1}^{N-1} \prod_{j=0}^{m-1}f_{j,i_j} = - * \sum_{j=0}^{m-1} - * \left[ - * \left( \sum_{i=s_j+1}^{n-1}f_{j,i} \right) - * \left( \prod_{k=j}^{m-1}f_{k,s_k} \right) - * x^j - * \right] - */ + Scalar e; + std::vector::iterator ptr; + if (!specifiedSetSizes) { + ptr = commit_scalars.begin(); + compute_batch_fis(f_i, m, f_, w3, e, ptr, ptr, ptr + setSize - 1); + } + else { + ptr = commit_scalars.begin() + commits.size() - setSize; + compute_batch_fis(f_i, m, f_, w3, e, ptr, ptr, ptr + setSize - 1); + } Scalar pow(uint64_t(1)); - std::vector f_part_product; // partial product of f array elements for lastIndex - for (int j = m - 1; j >= 0; j--) { + std::vector f_part_product; + for (std::ptrdiff_t j = m - 1; j >= 0; j--) { f_part_product.push_back(pow); - pow *= f_[t][j * n + I_[size - 1][j]]; + pow *= f_[j*n + I_[setSize - 1][j]]; } - NthPower xj(challenges[t]); + NthPower xj(x); for (std::size_t j = 0; j < m; j++) { Scalar fi_sum(uint64_t(0)); - for (std::size_t i = I_[size - 1][j] + 1; i < n; i++) - fi_sum += f_[t][j*n + i]; + for (std::size_t i = I_[setSize - 1][j] + 1; i < n; i++) + fi_sum += f_[j*n + i]; pow += fi_sum * xj.pow * f_part_product[m - j - 1]; xj.go_next(); } - f_i_t[N - 1] += pow * y[t]; + commit_scalars[commits.size() - 1] += pow * w3; e += pow; - e *= serials[t] * y[t]; - exp += e; - } - - secp_primitives::MultiExponent mult(commits, f_i_t); - GroupElement t1 = mult.get_multiple(); + e *= serials[t] * w3.negate(); + g_scalar += e; - std::vector> x_t_k_neg; - x_t_k_neg.resize(M); - for (int t = 0; t < M; ++t) { - x_t_k_neg[t].reserve(m); - NthPower x_k(challenges[t]); - for (uint64_t k = 0; k < m; ++k) { - x_t_k_neg[t].emplace_back(x_k.pow.negate()); + NthPower x_k(x); + for (std::size_t k = 0; k < m; k++) { + points.emplace_back(proof.Gk_[k]); + scalars.emplace_back(x_k.pow.negate() * w3); + points.emplace_back(proof.Qk[k]); + scalars.emplace_back(x_k.pow.negate() * w3); x_k.go_next(); } } - GroupElement t2; - for (int t = 0; t < M; ++t) { - const std::vector & Gk = proofs[t].Gk_; - const std::vector & Qk = proofs[t].Qk; - GroupElement term; - for (std::size_t k = 0; k < m; ++k) - { - term += ((Gk[k] + Qk[k]) * x_t_k_neg[t][k]); - } - term *= y[t]; - t2 += term; + + // Add common generators + points.emplace_back(g_); + scalars.emplace_back(g_scalar); + points.emplace_back(h_[1]); + scalars.emplace_back(h1_scalar); + points.emplace_back(h_[0]); + scalars.emplace_back(h2_scalar); + for (std::size_t i = 0; i < m * n; i++) { + points.emplace_back(h_[i]); + scalars.emplace_back(h_scalars[i]); + } + for (std::size_t i = 0; i < commits.size(); i++) { + points.emplace_back(commits[i]); + scalars.emplace_back(commit_scalars[i]); } - GroupElement left(t1 + t2); - right += g_ * exp; - if (left != right) - return false; - return true; + // Verify the batch + secp_primitives::MultiExponent result(points, scalars); + if (result.get_multiple().isInfinity()) { + return true; + } + return false; } bool SigmaExtendedVerifier::membership_checks(const SigmaExtendedProof& proof) const { @@ -299,7 +353,7 @@ bool SigmaExtendedVerifier::compute_fs( const SigmaExtendedProof& proof, const Scalar& x, std::vector& f_) const { - for(unsigned int j = 0; j < proof.f_.size(); ++j) { + for (std::size_t j = 0; j < proof.f_.size(); ++j) { if(proof.f_[j] == x) return false; } @@ -309,8 +363,8 @@ bool SigmaExtendedVerifier::compute_fs( { f_.push_back(Scalar(uint64_t(0))); Scalar temp; - int k = n - 1; - for (int i = 0; i < k; ++i) + std::size_t k = n - 1; + for (std::size_t i = 0; i < k; ++i) { temp += proof.f_[j * k + i]; f_.emplace_back(proof.f_[j * k + i]); @@ -320,26 +374,6 @@ bool SigmaExtendedVerifier::compute_fs( return true; } -bool SigmaExtendedVerifier::abcd_checks( - const SigmaExtendedProof& proof, - const Scalar& x, - const std::vector& f_) const { - Scalar c; - c.randomize(); - - // Aggregating two checks into one, B^x * A = Comm(..) and C^x * D = Comm(..) - std::vector f_plus_f_prime; - f_plus_f_prime.reserve(f_.size()); - for (std::size_t i = 0; i < f_.size(); i++) - f_plus_f_prime.emplace_back(f_[i] * c + f_[i] * (x - f_[i])); - - GroupElement right; - LelantusPrimitives::commit(g_, h_, f_plus_f_prime, proof.ZA_ * c + proof.ZC_, right); - if (((proof.B_ * x + proof.A_) * c + proof.C_ * x + proof.D_) != right) - return false; - return true; -} - void SigmaExtendedVerifier::compute_fis(int j, const std::vector& f, std::vector& f_i_) const { Scalar f_i(uint64_t(1)); std::vector::iterator ptr = f_i_.begin(); @@ -362,7 +396,7 @@ void SigmaExtendedVerifier::compute_fis( Scalar t; - for (int i = 0; i < n; i++) + for (std::size_t i = 0; i < n; i++) { t = f[j * n + i]; t *= f_i; @@ -392,7 +426,7 @@ void SigmaExtendedVerifier::compute_batch_fis( Scalar t; - for (int i = 0; i < n; i++) + for (std::size_t i = 0; i < n; i++) { t = f[j * n + i]; t *= f_i; diff --git a/src/liblelantus/sigmaextended_verifier.h b/src/liblelantus/sigmaextended_verifier.h index 260d73d34c..f0521a41da 100644 --- a/src/liblelantus/sigmaextended_verifier.h +++ b/src/liblelantus/sigmaextended_verifier.h @@ -10,16 +10,31 @@ class SigmaExtendedVerifier{ public: SigmaExtendedVerifier(const GroupElement& g, const std::vector& h_gens, - uint64_t n, uint64_t m_); + std::size_t n_, std::size_t m_); - //gets initial double-blinded Pedersen commitments, - //verifies proofs from single transaction, where set size and challenge are the same + // Verify a single one-of-many proof + // In this case, there is an implied input set size + bool singleverify(const std::vector& commits, + const Scalar& x, + const Scalar& serial, + const SigmaExtendedProof& proof) const; + + // Verify a single one-of-many proof + // In this case, there is a specified set size + bool singleverify(const std::vector& commits, + const Scalar& x, + const Scalar& serial, + const size_t setSize, + const SigmaExtendedProof& proof) const; + + // Verify a batch of one-of-many proofs from the same transaction + // In this case, there is a single common challenge and implied input set size bool batchverify(const std::vector& commits, const Scalar& x, const std::vector& serials, const std::vector& proofs) const; - //gets initial double-blinded Pedersen commitments - //verifies proofs from different transactions, where set sizes and challenges are different + // Verify a general batch of one-of-many proofs + // In this case, each proof has a separate challenge and specified set size bool batchverify(const std::vector& commits, const std::vector& challenges, const std::vector& serials, @@ -27,16 +42,20 @@ class SigmaExtendedVerifier{ const std::vector& proofs) const; private: + // Utility function that actually performs verification + bool verify(const std::vector& commits, + const std::vector& challenges, + const std::vector& serials, + const std::vector& setSizes, + const bool commonChallenge, + const bool specifiedSetSizes, + const std::vector& proofs) const; //auxiliary functions bool membership_checks(const SigmaExtendedProof& proof) const; bool compute_fs( const SigmaExtendedProof& proof, const Scalar& x, std::vector& f_) const; - bool abcd_checks( - const SigmaExtendedProof& proof, - const Scalar& x, - const std::vector& f_) const; void compute_fis(int j, const std::vector& f, std::vector& f_i_) const; void compute_fis( @@ -58,8 +77,8 @@ class SigmaExtendedVerifier{ private: GroupElement g_; std::vector h_; - uint64_t n; - uint64_t m; + std::size_t n; + std::size_t m; }; } // namespace lelantus diff --git a/src/liblelantus/test/sigma_extended_test.cpp b/src/liblelantus/test/sigma_extended_test.cpp index 25d8628444..14ffbe5eb5 100644 --- a/src/liblelantus/test/sigma_extended_test.cpp +++ b/src/liblelantus/test/sigma_extended_test.cpp @@ -11,14 +11,14 @@ class SigmaExtendedTests : public LelantusTestingSetup { public: struct Secret { public: - Secret(int l) : l(l) { + Secret(std::size_t l) : l(l) { s.randomize(); v.randomize(); r.randomize(); } public: - int l; + std::size_t l; Scalar s, v, r; }; @@ -31,7 +31,7 @@ class SigmaExtendedTests : public LelantusTestingSetup { SigmaExtendedTests() {} public: - void GenerateParams(int _N, int _n, int _m = 0) { + void GenerateParams(std::size_t _N, std::size_t _n, std::size_t _m = 0) { N = _N; n = _n; m = _m; @@ -40,7 +40,7 @@ class SigmaExtendedTests : public LelantusTestingSetup { throw std::logic_error("Try to get value of m from invalid n"); } - m = std::round(log(N) / log(n)); + m = (std::size_t)std::round(log(N) / log(n)); } h_gens = RandomizeGroupElements(n * m); @@ -50,7 +50,7 @@ class SigmaExtendedTests : public LelantusTestingSetup { void GenerateBatchProof( Prover &prover, std::vector const &coins, - int l, + std::size_t l, Scalar const &s, Scalar const &v, Scalar const &r, @@ -86,9 +86,9 @@ class SigmaExtendedTests : public LelantusTestingSetup { } public: - int N; - int n; - int m; + std::size_t N; + std::size_t n; + std::size_t m; std::vector h_gens; GroupElement g; @@ -96,6 +96,60 @@ class SigmaExtendedTests : public LelantusTestingSetup { BOOST_FIXTURE_TEST_SUITE(lelantus_sigma_tests, SigmaExtendedTests) +BOOST_AUTO_TEST_CASE(one_out_of_N_variable_batch) +{ + GenerateParams(64, 4); + + std::size_t commit_size = 60; // require padding + auto commits = RandomizeGroupElements(commit_size); + + // Generate + std::vector secrets; + std::vector indexes = { 0, 1, 3, 59 }; + std::vector set_sizes = { 60, 60, 59, 16 }; + + for (auto index : indexes) { + secrets.emplace_back(index); + + auto &s = secrets.back(); + + commits[index] = Primitives::double_commit( + g, s.s, h_gens[1], s.v, h_gens[0], s.r + ); + } + + Prover prover(g, h_gens, n, m); + Verifier verifier(g, h_gens, n, m); + std::vector proofs; + std::vector serials; + std::vector challenges; + + for (std::size_t i = 0; i < indexes.size(); i++) { + Scalar x; + x.randomize(); + proofs.emplace_back(); + serials.push_back(secrets[i].s); + std::vector commits_(commits.begin() + commit_size - set_sizes[i], commits.end()); + GenerateBatchProof( + prover, + commits_, + secrets[i].l - (commit_size - set_sizes[i]), + secrets[i].s, + secrets[i].v, + secrets[i].r, + x, + proofs.back() + ); + challenges.emplace_back(x); + + // Verify individual proofs as a sanity check + BOOST_CHECK(verifier.singleverify(commits, x, secrets[i].s, set_sizes[i], proofs.back())); + BOOST_CHECK(verifier.singleverify(commits_, x, secrets[i].s, proofs.back())); + } + + BOOST_CHECK(verifier.batchverify(commits, challenges, serials, set_sizes, proofs)); +} + BOOST_AUTO_TEST_CASE(one_out_of_N_batch) { GenerateParams(16, 4);