Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor/remove query proof serialize #415

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 33 additions & 1 deletion crates/proof-of-sql/src/base/commitment/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ mod vec_commitment_ext;
pub use vec_commitment_ext::{NumColumnsMismatch, VecCommitmentExt};

mod column_bounds;
use super::scalar::Curve25519Scalar;
use super::{proof::Transcript, scalar::Curve25519Scalar};
pub use column_bounds::{Bounds, ColumnBounds, NegativeBounds};

mod column_commitment_metadata;
Expand Down Expand Up @@ -89,6 +89,13 @@ pub trait Commitment:
offset: usize,
setup: &Self::PublicSetup<'_>,
) -> Vec<Self>;

/// Appends the commitment to the given transcript.
///
/// # Arguments
///
/// * `transcript` - The transcript to append the commitment to.
fn append_to_transcript(&self, transcript: &mut impl Transcript);
}

impl Commitment for RistrettoPoint {
Expand Down Expand Up @@ -127,9 +134,34 @@ impl Commitment for RistrettoPoint {
) -> Vec<Self> {
unimplemented!()
}

fn append_to_transcript(&self, transcript: &mut impl Transcript) {
transcript.extend_as_le([self.compress().to_bytes()]);
}
}

mod commitment_evaluation_proof;
pub use commitment_evaluation_proof::CommitmentEvaluationProof;
#[cfg(test)]
pub(crate) mod commitment_evaluation_proof_test;

#[cfg(test)]
mod tests {
use super::*;
use crate::base::proof::{Keccak256Transcript, Transcript};
use curve25519_dalek::{constants::RISTRETTO_BASEPOINT_POINT, ristretto::RistrettoPoint};

#[test]
fn we_can_append_different_ristretto_point_commitments_and_get_different_transcripts() {
let commitment1 = RistrettoPoint::default();
let commitment2 = RISTRETTO_BASEPOINT_POINT;

let mut transcript1 = Keccak256Transcript::new();
let mut transcript2 = Keccak256Transcript::new();

commitment1.append_to_transcript(&mut transcript1);
commitment2.append_to_transcript(&mut transcript2);

assert_ne!(transcript1.challenge_as_le(), transcript2.challenge_as_le());
}
}
24 changes: 24 additions & 0 deletions crates/proof-of-sql/src/base/commitment/naive_commitment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,10 @@ impl Commitment for NaiveCommitment {
})
.collect()
}

fn append_to_transcript(&self, transcript: &mut impl crate::base::proof::Transcript) {
transcript.extend_as_le(self.0.iter().map(Into::<[u64; 4]>::into));
}
}

#[allow(clippy::similar_names)]
Expand Down Expand Up @@ -194,3 +198,23 @@ fn we_can_compute_commitments_from_commitable_columns_with_offset() {
let commitments = NaiveCommitment::compute_commitments(committable_columns, 1, &());
assert_eq!(commitments[0].0, column_a_scalars);
}

#[cfg(test)]
mod tests {
use super::*;
use crate::base::proof::{Keccak256Transcript, Transcript};

#[test]
fn we_can_append_different_naive_commitments_and_get_different_transcripts() {
let commitment1 = NaiveCommitment(vec![TestScalar::from(1), TestScalar::from(2)]);
let commitment2 = NaiveCommitment(vec![TestScalar::from(3), TestScalar::from(4)]);

let mut transcript1 = Keccak256Transcript::new();
let mut transcript2 = Keccak256Transcript::new();

commitment1.append_to_transcript(&mut transcript1);
commitment2.append_to_transcript(&mut transcript2);

assert_ne!(transcript1.challenge_as_le(), transcript2.challenge_as_le());
}
}
13 changes: 11 additions & 2 deletions crates/proof-of-sql/src/base/proof/transcript.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,15 @@ pub trait Transcript {
fn new() -> Self;
/// Appends the provided messages by appending the reversed raw bytes (i.e. assuming the message is bigendian)
fn extend_as_be<M: FromBytes + AsBytes>(&mut self, messages: impl IntoIterator<Item = M>);
/// Appends the provided messages by appending the reversed raw bytes (i.e. assuming the message is bigendian)
fn extend_as_be_from_refs<'a, M: FromBytes + AsBytes + 'a + Copy>(
&mut self,
messages: impl IntoIterator<Item = &'a M>,
) {
self.extend_as_be(messages.into_iter().copied());
}
/// Appends the provided messages by appending the raw bytes (i.e. assuming the message is littleendian)
fn extend_as_le<M: AsBytes>(&mut self, messages: impl IntoIterator<Item = M>);
/// Appends the provided messages by appending the raw bytes (i.e. assuming the message is littleendian)
fn extend_as_le_from_refs<'a, M: AsBytes + 'a + ?Sized>(
&mut self,
Expand Down Expand Up @@ -51,9 +60,9 @@ pub trait Transcript {
/// This allows for interopability between transcript types.
fn wrap_transcript<T: Transcript, R>(&mut self, op: impl FnOnce(&mut T) -> R) -> R {
let mut transcript = T::new();
transcript.extend_as_le_from_refs([&self.challenge_as_le()]);
transcript.extend_as_le([self.challenge_as_le()]);
let result = op(&mut transcript);
self.extend_as_le_from_refs([&transcript.challenge_as_le()]);
self.extend_as_le([transcript.challenge_as_le()]);
result
}
}
Expand Down
5 changes: 5 additions & 0 deletions crates/proof-of-sql/src/base/proof/transcript_core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,11 @@ impl<T: TranscriptCore> Transcript for T {
self.raw_append(bytes);
});
}
fn extend_as_le<M: AsBytes>(&mut self, messages: impl IntoIterator<Item = M>) {
messages
.into_iter()
.for_each(|message| self.raw_append(message.as_bytes()));
}
fn extend_as_le_from_refs<'a, M: AsBytes + 'a + ?Sized>(
&mut self,
messages: impl IntoIterator<Item = &'a M>,
Expand Down
13 changes: 13 additions & 0 deletions crates/proof-of-sql/src/base/proof/transcript_core_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,19 @@ fn we_can_add_values_to_the_transcript_in_big_endian_form() {

#[test]
fn we_can_add_values_to_the_transcript_in_little_endian_form() {
let mut transcript1: T = TranscriptCore::new();
transcript1.extend_as_le([1u16, 1000, 2]);

let mut transcript2: T = TranscriptCore::new();
transcript2.raw_append(&[1, 0]);
transcript2.raw_append(&[232, 3]);
transcript2.raw_append(&[2, 0]);

assert_eq!(transcript1.raw_challenge(), transcript2.raw_challenge());
}

#[test]
fn we_can_add_values_to_the_transcript_in_little_endian_form_from_refs() {
let mut transcript1: T = TranscriptCore::new();
transcript1.extend_as_le_from_refs(&[1u16, 1000, 2]);

Expand Down
26 changes: 24 additions & 2 deletions crates/proof-of-sql/src/proof_primitive/dory/dory_commitment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,19 +96,26 @@ impl Commitment for DoryCommitment {
) -> Vec<Self> {
super::compute_dory_commitments(committable_columns, offset, setup)
}

fn append_to_transcript(&self, transcript: &mut impl crate::base::proof::Transcript) {
transcript.extend_canonical_serialize_as_le(&self.0);
}
}

#[cfg(test)]
mod tests {
use super::*;
use super::{DoryCommitment, DoryProverPublicSetup, DoryScalar, GT};
use crate::{
base::{
commitment::{NumColumnsMismatch, VecCommitmentExt},
commitment::{Commitment, NumColumnsMismatch, VecCommitmentExt},
database::{Column, OwnedColumn},
proof::{Keccak256Transcript, Transcript},
},
proof_primitive::dory::{rand_util::test_rng, ProverSetup, PublicParameters},
};
use ark_ec::pairing::Pairing;
use ark_ff::UniformRand;
use rand::{rngs::StdRng, SeedableRng};

#[test]
fn we_can_convert_from_columns() {
Expand Down Expand Up @@ -484,4 +491,19 @@ mod tests {
Err(NumColumnsMismatch)
));
}

#[test]
fn we_can_append_different_dory_commitments_and_get_different_transcripts() {
let mut rng = StdRng::seed_from_u64(42);
let commitment1 = DoryCommitment(GT::rand(&mut rng));
let commitment2 = DoryCommitment(GT::rand(&mut rng));

let mut transcript1 = Keccak256Transcript::new();
let mut transcript2 = Keccak256Transcript::new();

commitment1.append_to_transcript(&mut transcript1);
commitment2.append_to_transcript(&mut transcript2);

assert_ne!(transcript1.challenge_as_le(), transcript2.challenge_as_le());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -82,4 +82,34 @@ impl Commitment for DynamicDoryCommitment {
) -> Vec<Self> {
super::compute_dynamic_dory_commitments(committable_columns, offset, setup)
}

fn append_to_transcript(&self, transcript: &mut impl crate::base::proof::Transcript) {
transcript.extend_canonical_serialize_as_le(&self.0);
}
}

#[cfg(test)]
mod tests {
use super::{DynamicDoryCommitment, GT};
use crate::base::{
commitment::Commitment,
proof::{Keccak256Transcript, Transcript},
};
use ark_ff::UniformRand;
use rand::{rngs::StdRng, SeedableRng};

#[test]
fn we_can_append_different_dynamic_dory_commitments_and_get_different_transcripts() {
let mut rng = StdRng::seed_from_u64(42);
let commitment1 = DynamicDoryCommitment(GT::rand(&mut rng));
let commitment2 = DynamicDoryCommitment(GT::rand(&mut rng));

let mut transcript1 = Keccak256Transcript::new();
let mut transcript2 = Keccak256Transcript::new();

commitment1.append_to_transcript(&mut transcript1);
commitment2.append_to_transcript(&mut transcript2);

assert_ne!(transcript1.challenge_as_le(), transcript2.challenge_as_le());
}
}
64 changes: 47 additions & 17 deletions crates/proof-of-sql/src/sql/proof/query_proof.rs
Original file line number Diff line number Diff line change
@@ -1,18 +1,20 @@
use super::{
CountBuilder, FinalRoundBuilder, ProofCounts, ProofPlan, ProvableQueryResult, QueryResult,
SumcheckMleEvaluations, SumcheckRandomScalars, VerificationBuilder,
CountBuilder, FinalRoundBuilder, ProofCounts, ProofPlan, QueryResult, SumcheckMleEvaluations,
SumcheckRandomScalars, VerificationBuilder,
};
use crate::{
base::{
bit::BitDistribution,
commitment::CommitmentEvaluationProof,
commitment::{Commitment, CommitmentEvaluationProof},
database::{
ColumnRef, CommitmentAccessor, DataAccessor, MetadataAccessor, Table, TableRef,
ColumnRef, CommitmentAccessor, DataAccessor, MetadataAccessor, OwnedColumn, OwnedTable,
Table, TableRef,
},
map::{IndexMap, IndexSet},
math::log2_up,
polynomial::{compute_evaluation_vector, CompositePolynomialInfo},
proof::{Keccak256Transcript, ProofError, Transcript},
scalar::Scalar,
},
proof_primitive::sumcheck::SumcheckProof,
sql::proof::{FirstRoundBuilder, QueryData},
Expand Down Expand Up @@ -73,7 +75,7 @@ impl<CP: CommitmentEvaluationProof> QueryProof<CP> {
expr: &(impl ProofPlan + Serialize),
accessor: &impl DataAccessor<CP::Scalar>,
setup: &CP::ProverPublicSetup<'_>,
) -> (Self, ProvableQueryResult) {
) -> (Self, OwnedTable<CP::Scalar>) {
let (min_row_num, max_row_num) = get_index_range(accessor, &expr.get_table_references());
let initial_range_length = max_row_num - min_row_num;
let alloc = Bump::new();
Expand All @@ -95,6 +97,7 @@ impl<CP: CommitmentEvaluationProof> QueryProof<CP> {
// Prover First Round: Evaluate the query && get the right number of post result challenges
let mut first_round_builder = FirstRoundBuilder::new();
let query_result = expr.first_round_evaluate(&mut first_round_builder, &alloc, &table_map);
let owned_table_result = OwnedTable::from(&query_result);
let provable_result = query_result.into();
let one_evaluation_lengths = first_round_builder.one_evaluation_lengths();

Expand All @@ -111,7 +114,7 @@ impl<CP: CommitmentEvaluationProof> QueryProof<CP> {
// construct a transcript for the proof
let mut transcript: Keccak256Transcript = make_transcript(
expr,
&provable_result,
&owned_table_result,
range_length,
min_row_num,
one_evaluation_lengths,
Expand Down Expand Up @@ -207,10 +210,9 @@ impl<CP: CommitmentEvaluationProof> QueryProof<CP> {
self,
expr: &(impl ProofPlan + Serialize),
accessor: &impl CommitmentAccessor<CP::Commitment>,
result: ProvableQueryResult,
result: OwnedTable<CP::Scalar>,
setup: &CP::VerifierPublicSetup<'_>,
) -> QueryResult<CP::Scalar> {
let owned_table_result = result.to_owned_table(&expr.get_column_result_fields())?;
let table_refs = expr.get_table_references();
let (min_row_num, _) = get_index_range(accessor, &table_refs);
let num_sumcheck_variables = cmp::max(log2_up(self.range_length), 1);
Expand Down Expand Up @@ -340,11 +342,11 @@ impl<CP: CommitmentEvaluationProof> QueryProof<CP> {
let verifier_evaluations = expr.verifier_evaluate(
&mut builder,
&evaluation_accessor,
Some(&owned_table_result),
Some(&result),
&one_eval_map,
)?;
// compute the evaluation of the result MLEs
let result_evaluations = owned_table_result.mle_evaluations(&subclaim.evaluation_point);
let result_evaluations = result.mle_evaluations(&subclaim.evaluation_point);
// check the evaluation of the result MLEs
if verifier_evaluations.column_evals() != result_evaluations {
Err(ProofError::VerificationError {
Expand Down Expand Up @@ -378,7 +380,7 @@ impl<CP: CommitmentEvaluationProof> QueryProof<CP> {

let verification_hash = transcript.challenge_as_le();
Ok(QueryData {
table: owned_table_result,
table: result,
verification_hash,
})
}
Expand Down Expand Up @@ -414,27 +416,55 @@ impl<CP: CommitmentEvaluationProof> QueryProof<CP> {
/// This function returns a `merlin::Transcript`. The transcript is a record
/// of all the operations and data involved in creating a proof.
/// ```
fn make_transcript<T: Transcript>(
fn make_transcript<S: Scalar, T: Transcript>(
expr: &(impl ProofPlan + Serialize),
result: &ProvableQueryResult,
result: &OwnedTable<S>,
range_length: usize,
min_row_num: usize,
one_evaluation_lengths: &[usize],
) -> T {
let mut transcript = T::new();
transcript.extend_serialize_as_le(result);
for (name, column) in result.inner_table() {
transcript.extend_as_le_from_refs([name.as_str()]);
match column {
OwnedColumn::Boolean(col) => transcript.extend_as_be(col.iter().map(|&b| u8::from(b))),
OwnedColumn::TinyInt(col) => transcript.extend_as_be_from_refs(col),
OwnedColumn::SmallInt(col) => transcript.extend_as_be_from_refs(col),
OwnedColumn::Int(col) => transcript.extend_as_be_from_refs(col),
OwnedColumn::BigInt(col) => transcript.extend_as_be_from_refs(col),
OwnedColumn::VarChar(col) => {
transcript.extend_as_le_from_refs(col.iter().map(String::as_str));
}
OwnedColumn::Int128(col) => transcript.extend_as_be_from_refs(col),
OwnedColumn::Decimal75(precision, scale, col) => {
transcript.extend_as_be([precision.value()]);
transcript.extend_as_be([*scale]);
transcript.extend_as_be(col.iter().map(|&s| Into::<[u64; 4]>::into(s)));
}
OwnedColumn::Scalar(col) => {
transcript.extend_as_be(col.iter().map(|&s| Into::<[u64; 4]>::into(s)));
}
OwnedColumn::TimestampTZ(po_sqltime_unit, po_sqltime_zone, col) => {
transcript.extend_as_be([u64::from(*po_sqltime_unit)]);
transcript.extend_as_be([po_sqltime_zone.offset()]);
transcript.extend_as_be_from_refs(col);
}
}
}
transcript.extend_serialize_as_le(expr);
transcript.extend_serialize_as_le(&range_length);
transcript.extend_serialize_as_le(&min_row_num);
transcript.extend_serialize_as_le(one_evaluation_lengths);
transcript
}

fn extend_transcript<C: serde::Serialize>(
fn extend_transcript<C: Commitment>(
transcript: &mut impl Transcript,
commitments: &C,
commitments: &[C],
bit_distributions: &[BitDistribution],
) {
transcript.extend_serialize_as_le(commitments);
for commitment in commitments {
commitment.append_to_transcript(transcript);
}
transcript.extend_serialize_as_le(bit_distributions);
}
Loading
Loading