From 43f6bee34b62ee7a11fe51fe5c91ad1832fd4663 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 30 Nov 2023 00:21:26 +0000 Subject: [PATCH 1/6] working batch verification --- core/data.go | 7 + core/encoding.go | 3 + core/encoding/encoder.go | 46 +++++++ core/encoding/mock_encoder.go | 6 + core/validator.go | 105 ++++++++++++++ node/node.go | 20 +-- pkg/encoding/kzgEncoder/multiframe.go | 191 ++++++++++++++++++++++++++ 7 files changed, 359 insertions(+), 19 deletions(-) create mode 100644 pkg/encoding/kzgEncoder/multiframe.go diff --git a/core/data.go b/core/data.go index 2bcac82bfc..6017722d6c 100644 --- a/core/data.go +++ b/core/data.go @@ -160,3 +160,10 @@ func (cb Bundles) Serialize() ([][][]byte, error) { } return data, nil } + +type Sample struct { + Commitment *Commitment + Chunk *Chunk // contain proof and coeffs + EvalIndex ChunkNumber + BlobIndex int +} diff --git a/core/encoding.go b/core/encoding.go index ea1b07eaeb..089e007e7b 100644 --- a/core/encoding.go +++ b/core/encoding.go @@ -38,6 +38,9 @@ type Encoder interface { // VerifyChunks takes in the chunks, indices, commitments, and encoding parameters and returns an error if the chunks are invalid. VerifyChunks(chunks []*Chunk, indices []ChunkNumber, commitments BlobCommitments, params EncodingParams) error + // VerifyBatch takes in + UniversalVerifyChunks(params EncodingParams, samples []Sample, numBlobs int) error + // VerifyBlobLength takes in the commitments and returns an error if the blob length is invalid. VerifyBlobLength(commitments BlobCommitments) error diff --git a/core/encoding/encoder.go b/core/encoding/encoder.go index d5cad8fb3f..98bf29b868 100644 --- a/core/encoding/encoder.go +++ b/core/encoding/encoder.go @@ -2,6 +2,9 @@ package encoding import ( "crypto/sha256" + "errors" + "fmt" + "log" "github.com/Layr-Labs/eigenda/core" "github.com/Layr-Labs/eigenda/pkg/encoding/encoder" @@ -60,6 +63,7 @@ func (e *Encoder) Encode(data []byte, params core.EncodingParams) (core.BlobComm } } encParams := toEncParams(params) + fmt.Println("encParams", encParams) enc, err := e.EncoderGroup.GetKzgEncoder(encParams) if err != nil { @@ -78,6 +82,24 @@ func (e *Encoder) Encode(data []byte, params core.EncodingParams) (core.BlobComm Coeffs: frame.Coeffs, Proof: frame.Proof, } + + q, _ := encoder.GetLeadingCosetIndex(uint64(ind), uint64(len(chunks))) + lc := enc.Fs.ExpandedRootsOfUnity[uint64(q)] + ok := frame.Verify(enc.Ks, commit, &lc) + if !ok { + log.Fatalf("Proof %v failed\n", ind) + } else { + + fmt.Println("proof", frame.Proof.String()) + fmt.Println("commitment", commit.String()) + for i := 0; i < len(frame.Coeffs); i++ { + fmt.Printf("%v ", frame.Coeffs[i].String()) + } + fmt.Println("q", q, lc.String()) + + fmt.Println("***************tested frame and pass") + } + } length := uint(len(encoder.ToFrArray(data))) @@ -131,6 +153,30 @@ func (e *Encoder) VerifyChunks(chunks []*core.Chunk, indices []core.ChunkNumber, } +// convert struct understandable by the crypto library +func (e *Encoder) UniversalVerifyChunks(params core.EncodingParams, samplesCore []core.Sample, numBlobs int) error { + encParams := toEncParams(params) + + samples := make([]kzgEncoder.Sample, len(samplesCore)) + + for i, sc := range samplesCore { + sample := kzgEncoder.Sample{ + Commitment: *sc.Commitment.G1Point, + Proof: sc.Chunk.Proof, + Row: sc.BlobIndex, + Coeffs: sc.Chunk.Coeffs, + X: sc.EvalIndex, + } + samples[i] = sample + } + + if e.EncoderGroup.UniversalVerify(encParams, samples, numBlobs) { + return nil + } else { + return errors.New("Universal Verify wrong") + } +} + // Decode takes in the chunks, indices, and encoding parameters and returns the decoded blob // The result is trimmed to the given maxInputSize. func (e *Encoder) Decode(chunks []*core.Chunk, indices []core.ChunkNumber, params core.EncodingParams, maxInputSize uint64) ([]byte, error) { diff --git a/core/encoding/mock_encoder.go b/core/encoding/mock_encoder.go index 4dc1d4f1ea..be36b30a26 100644 --- a/core/encoding/mock_encoder.go +++ b/core/encoding/mock_encoder.go @@ -27,6 +27,12 @@ func (e *MockEncoder) VerifyChunks(chunks []*core.Chunk, indices []core.ChunkNum return args.Error(0) } +func (e *MockEncoder) UniversalVerifyChunks(params core.EncodingParams, samples []core.Sample, numBlobs int) error { + args := e.Called(params, samples, numBlobs) + time.Sleep(e.Delay) + return args.Error(0) +} + func (e *MockEncoder) VerifyBlobLength(commitments core.BlobCommitments) error { args := e.Called(commitments) diff --git a/core/validator.go b/core/validator.go index e5ebc83bbe..dcb5ddd1a9 100644 --- a/core/validator.go +++ b/core/validator.go @@ -2,6 +2,7 @@ package core import ( "errors" + "fmt" ) var ( @@ -10,6 +11,7 @@ var ( ) type ChunkValidator interface { + ValidateBatch([]*BlobMessage, *OperatorState) error ValidateBlob(*BlobMessage, *OperatorState) error UpdateOperatorID(OperatorID) } @@ -114,3 +116,106 @@ func (v *chunkValidator) ValidateBlob(blob *BlobMessage, operatorState *Operator func (v *chunkValidator) UpdateOperatorID(operatorID OperatorID) { v.operatorID = operatorID } + +func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *OperatorState) error { + + batchGroup := make(map[EncodingParams][]Sample) + numBlobMap := make(map[EncodingParams]int) + + for i, blob := range blobs { + if len(blob.Bundles) != len(blob.BlobHeader.QuorumInfos) { + return errors.New("number of bundles does not match number of quorums") + } + + // Validate the blob length + err := v.encoder.VerifyBlobLength(blob.BlobHeader.BlobCommitments) + if err != nil { + return err + } + // for each quorum + for _, quorumHeader := range blob.BlobHeader.QuorumInfos { + // Check if the operator is a member of the quorum + if _, ok := operatorState.Operators[quorumHeader.QuorumID]; !ok { + continue + } + + // Get the assignments for the quorum + assignment, info, err := v.assignment.GetOperatorAssignment( + operatorState, + quorumHeader.QuorumID, + quorumHeader.QuantizationFactor, + v.operatorID, + ) + if err != nil { + return err + } + + // Validate the number of chunks + if assignment.NumChunks == 0 { + continue + } + if assignment.NumChunks != uint(len(blob.Bundles[quorumHeader.QuorumID])) { + return errors.New("number of chunks does not match assignment") + } + + chunkLength, err := v.assignment.GetChunkLengthFromHeader(operatorState, quorumHeader) + if err != nil { + return err + } + + // Get the chunk length + chunks := blob.Bundles[quorumHeader.QuorumID] + for _, chunk := range chunks { + if uint(chunk.Length()) != chunkLength { + return ErrChunkLengthMismatch + } + } + + // Validate the chunk length + numOperators := uint(len(operatorState.Operators[quorumHeader.QuorumID])) + if chunkLength*quorumHeader.QuantizationFactor*numOperators != quorumHeader.EncodedBlobLength { + return ErrInvalidHeader + } + + // Get Encoding Params + params := EncodingParams{ChunkLength: chunkLength, NumChunks: info.TotalChunks} + + // ToDo add a struct + _, ok := batchGroup[params] + if !ok { + batchGroup[params] = make([]Sample, 0) + numBlobMap[params] = 1 + } else { + numBlobMap[params] += 1 + } + + // Check the received chunks against the commitment + indices := assignment.GetIndices() + fmt.Println("indices", indices) + samples := make([]Sample, 0) + for ind := range chunks { + sample := Sample{ + Commitment: blob.BlobHeader.BlobCommitments.Commitment, + Chunk: chunks[ind], + EvalIndex: uint(indices[ind]), + BlobIndex: i, + } + samples = append(samples, sample) + } + batchGroup[params] = append(batchGroup[params], samples...) + } + } + + // ToDo parallelize + fmt.Println("num batchGroup", len(batchGroup)) + for params, samples := range batchGroup { + numBlobs, _ := numBlobMap[params] + err := v.encoder.UniversalVerifyChunks(params, samples, numBlobs) + if err != nil { + return err + } + } + + return nil + +} diff --git a/node/node.go b/node/node.go index a43037bb70..654c21a61e 100644 --- a/node/node.go +++ b/node/node.go @@ -25,7 +25,6 @@ import ( "github.com/Layr-Labs/eigensdk-go/nodeapi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/gammazero/workerpool" ) const ( @@ -322,24 +321,7 @@ func (n *Node) ValidateBatch(ctx context.Context, header *core.BatchHeader, blob return err } - pool := workerpool.New(n.Config.NumBatchValidators) - out := make(chan error, len(blobs)) - for _, blob := range blobs { - blob := blob - pool.Submit(func() { - n.validateBlob(ctx, blob, operatorState, out) - }) - } - - for i := 0; i < len(blobs); i++ { - err := <-out - if err != nil { - return err - } - } - - return nil - + return n.Validator.ValidateBatch(blobs, operatorState) } func (n *Node) updateSocketAddress(ctx context.Context, newSocketAddr string) { diff --git a/pkg/encoding/kzgEncoder/multiframe.go b/pkg/encoding/kzgEncoder/multiframe.go new file mode 100644 index 0000000000..ec82bc60b8 --- /dev/null +++ b/pkg/encoding/kzgEncoder/multiframe.go @@ -0,0 +1,191 @@ +package kzgEncoder + +import ( + "fmt" + "log" + + rs "github.com/Layr-Labs/eigenda/pkg/encoding/encoder" + kzg "github.com/Layr-Labs/eigenda/pkg/kzg" + bls "github.com/Layr-Labs/eigenda/pkg/kzg/bn254" +) + +type Sample struct { + Commitment bls.G1Point + Proof bls.G1Point + Row int + Coeffs []bls.Fr + X uint // X is int , at which index is evaluated +} + +// m is number of blob +func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples []Sample, m int) bool { + verifier, _ := group.GetKzgVerifier(params) + ks := verifier.Ks + + for ind, s := range samples { + q, err := rs.GetLeadingCosetIndex( + uint64(s.X), + params.NumChunks, + ) + if err != nil { + return false + } + + lc := ks.FFTSettings.ExpandedRootsOfUnity[uint64(q)] + + ok := SingleVerify(ks, &s.Commitment, &lc, s.Coeffs, s.Proof) + if !ok { + fmt.Println("proof", s.Proof.String()) + fmt.Println("commitment", s.Commitment.String()) + + for i := 0; i < len(s.Coeffs); i++ { + fmt.Printf("%v ", s.Coeffs[i].String()) + } + fmt.Println("q", q, lc.String()) + + log.Fatalf("Proof %v failed\n", ind) + } else { + + fmt.Println("&&&&&&&&&&&&&&&&&&tested frame and pass", ind) + } + } + + D := len(samples[0].Coeffs) // chunkLen + + n := len(samples) + + rInt := uint64(22894) + var r bls.Fr + bls.AsFr(&r, rInt) + + randomsFr := make([]bls.Fr, n) + bls.AsFr(&randomsFr[0], rInt) + + // lhs + var tmp bls.Fr + + // power of r + for j := 0; j < n-1; j++ { + bls.MulModFr(&randomsFr[j+1], &randomsFr[j], &r) + } + + // array of proofs + proofs := make([]bls.G1Point, n) + for i := 0; i < n; i++ { + bls.CopyG1(&proofs[i], &samples[i].Proof) + } + + fmt.Printf("len proof %v len ran %v\n", len(proofs), len(randomsFr)) + // lhs g1 + lhsG1 := bls.LinCombG1(proofs, randomsFr) + + // lhs g2 + lhsG2 := &ks.Srs.G2[D] + + // rhs g2 + rhsG2 := &bls.GenG2 + + // rhs g1 + // get commitments + commits := make([]bls.G1Point, m) + //for k := 0 ; k < n ; k++ { + // commits[k] = samples[k].Commitment + //} + // get coeffs + ftCoeffs := make([]bls.Fr, m) + for k := 0; k < n; k++ { + s := samples[k] + row := s.Row + bls.AddModFr(&ftCoeffs[row], &ftCoeffs[row], &randomsFr[k]) + bls.CopyG1(&commits[row], &s.Commitment) + } + fmt.Printf("len commit %v len coeff %v\n", len(commits), len(ftCoeffs)) + + ftG1 := bls.LinCombG1(commits, ftCoeffs) + + // second term + stCoeffs := make([]bls.Fr, D) + for k := 0; k < n; k++ { + coeffs := samples[k].Coeffs + + rk := randomsFr[k] + for j := 0; j < D; j++ { + bls.MulModFr(&tmp, &coeffs[j], &rk) + bls.AddModFr(&stCoeffs[j], &stCoeffs[j], &tmp) + } + } + stG1 := bls.LinCombG1(ks.Srs.G1[:D], stCoeffs) + + // third term + ttCoeffs := make([]bls.Fr, n) + + // get leading coset powers + leadingDs := make([]bls.Fr, n) + + for k := 0; k < n; k++ { + x, err := rs.GetLeadingCosetIndex( + uint64(samples[k].X), + params.NumChunks, + ) + if err != nil { + return false + } + + h := ks.ExpandedRootsOfUnity[x] + var hPow bls.Fr + bls.CopyFr(&hPow, &bls.ONE) + + for j := 0; j < D; j++ { + bls.MulModFr(&tmp, &hPow, &h) + bls.CopyFr(&hPow, &tmp) + } + bls.CopyFr(&leadingDs[k], &hPow) + } + + // + for k := 0; k < n; k++ { + rk := randomsFr[k] + bls.MulModFr(&ttCoeffs[k], &rk, &leadingDs[k]) + } + ttG1 := bls.LinCombG1(proofs, ttCoeffs) + + var rhsG1 bls.G1Point + bls.SubG1(&rhsG1, ftG1, stG1) + bls.AddG1(&rhsG1, &rhsG1, ttG1) + + return bls.PairingsVerify(lhsG1, lhsG2, &rhsG1, rhsG2) +} + +func SingleVerify(ks *kzg.KZGSettings, commitment *bls.G1Point, x *bls.Fr, coeffs []bls.Fr, proof bls.G1Point) bool { + var xPow bls.Fr + bls.CopyFr(&xPow, &bls.ONE) + + var tmp bls.Fr + for i := 0; i < len(coeffs); i++ { + bls.MulModFr(&tmp, &xPow, x) + bls.CopyFr(&xPow, &tmp) + } + + // [x^n]_2 + var xn2 bls.G2Point + bls.MulG2(&xn2, &bls.GenG2, &xPow) + + // [s^n - x^n]_2 + var xnMinusYn bls.G2Point + bls.SubG2(&xnMinusYn, &ks.Srs.G2[len(coeffs)], &xn2) + + // [interpolation_polynomial(s)]_1 + is1 := bls.LinCombG1(ks.Srs.G1[:len(coeffs)], coeffs) + // [commitment - interpolation_polynomial(s)]_1 = [commit]_1 - [interpolation_polynomial(s)]_1 + var commitMinusInterpolation bls.G1Point + bls.SubG1(&commitMinusInterpolation, commitment, is1) + + // Verify the pairing equation + // + // e([commitment - interpolation_polynomial(s)], [1]) = e([proof], [s^n - x^n]) + // equivalent to + // e([commitment - interpolation_polynomial]^(-1), [1]) * e([proof], [s^n - x^n]) = 1_T + // + + return bls.PairingsVerify(&commitMinusInterpolation, &bls.GenG2, &proof, &xnMinusYn) +} From 0352b05e4b3e9980ca4e08137bae99a91b218522 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 30 Nov 2023 02:29:43 +0000 Subject: [PATCH 2/6] organize data struct and add fiat shamir --- core/data.go | 9 +- core/encoding/encoder.go | 26 +---- core/validator.go | 33 +++---- pkg/encoding/kzgEncoder/multiframe.go | 137 ++++++++++++++------------ pkg/kzg/bn254/bignum_gnark.go | 8 ++ 5 files changed, 103 insertions(+), 110 deletions(-) diff --git a/core/data.go b/core/data.go index 6017722d6c..cd518e7354 100644 --- a/core/data.go +++ b/core/data.go @@ -161,9 +161,16 @@ func (cb Bundles) Serialize() ([][][]byte, error) { return data, nil } +// Sample is a chunk with associated metadata used by the Universal Batch Verifier type Sample struct { Commitment *Commitment - Chunk *Chunk // contain proof and coeffs + Chunk *Chunk EvalIndex ChunkNumber BlobIndex int } + +// SubBatch is a part of the whole Batch with identical Encoding Parameters, i.e. (ChunkLen, NumChunk) +type SubBatch struct { + Samples []Sample + NumBlobs int +} diff --git a/core/encoding/encoder.go b/core/encoding/encoder.go index 98bf29b868..241dd709ea 100644 --- a/core/encoding/encoder.go +++ b/core/encoding/encoder.go @@ -2,9 +2,7 @@ package encoding import ( "crypto/sha256" - "errors" "fmt" - "log" "github.com/Layr-Labs/eigenda/core" "github.com/Layr-Labs/eigenda/pkg/encoding/encoder" @@ -82,24 +80,6 @@ func (e *Encoder) Encode(data []byte, params core.EncodingParams) (core.BlobComm Coeffs: frame.Coeffs, Proof: frame.Proof, } - - q, _ := encoder.GetLeadingCosetIndex(uint64(ind), uint64(len(chunks))) - lc := enc.Fs.ExpandedRootsOfUnity[uint64(q)] - ok := frame.Verify(enc.Ks, commit, &lc) - if !ok { - log.Fatalf("Proof %v failed\n", ind) - } else { - - fmt.Println("proof", frame.Proof.String()) - fmt.Println("commitment", commit.String()) - for i := 0; i < len(frame.Coeffs); i++ { - fmt.Printf("%v ", frame.Coeffs[i].String()) - } - fmt.Println("q", q, lc.String()) - - fmt.Println("***************tested frame and pass") - } - } length := uint(len(encoder.ToFrArray(data))) @@ -170,11 +150,7 @@ func (e *Encoder) UniversalVerifyChunks(params core.EncodingParams, samplesCore samples[i] = sample } - if e.EncoderGroup.UniversalVerify(encParams, samples, numBlobs) { - return nil - } else { - return errors.New("Universal Verify wrong") - } + return e.EncoderGroup.UniversalVerify(encParams, samples, numBlobs) } // Decode takes in the chunks, indices, and encoding parameters and returns the decoded blob diff --git a/core/validator.go b/core/validator.go index dcb5ddd1a9..5f2457eecd 100644 --- a/core/validator.go +++ b/core/validator.go @@ -2,7 +2,6 @@ package core import ( "errors" - "fmt" ) var ( @@ -119,8 +118,7 @@ func (v *chunkValidator) UpdateOperatorID(operatorID OperatorID) { func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *OperatorState) error { - batchGroup := make(map[EncodingParams][]Sample) - numBlobMap := make(map[EncodingParams]int) + subBatchMap := make(map[EncodingParams]SubBatch) for i, blob := range blobs { if len(blob.Bundles) != len(blob.BlobHeader.QuorumInfos) { @@ -180,18 +178,8 @@ func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *Oper // Get Encoding Params params := EncodingParams{ChunkLength: chunkLength, NumChunks: info.TotalChunks} - // ToDo add a struct - _, ok := batchGroup[params] - if !ok { - batchGroup[params] = make([]Sample, 0) - numBlobMap[params] = 1 - } else { - numBlobMap[params] += 1 - } - // Check the received chunks against the commitment indices := assignment.GetIndices() - fmt.Println("indices", indices) samples := make([]Sample, 0) for ind := range chunks { sample := Sample{ @@ -202,15 +190,22 @@ func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *Oper } samples = append(samples, sample) } - batchGroup[params] = append(batchGroup[params], samples...) + + // Sort into subBatch + subBatch, ok := subBatchMap[params] + if !ok { + subBatch.Samples = samples + subBatch.NumBlobs = 1 + } else { + subBatch.Samples = append(subBatch.Samples, samples...) + subBatch.NumBlobs += 1 + } } } - // ToDo parallelize - fmt.Println("num batchGroup", len(batchGroup)) - for params, samples := range batchGroup { - numBlobs, _ := numBlobMap[params] - err := v.encoder.UniversalVerifyChunks(params, samples, numBlobs) + // ToDo add parallelization for verification for each subBatch + for params, subBatch := range subBatchMap { + err := v.encoder.UniversalVerifyChunks(params, subBatch.Samples, subBatch.NumBlobs) if err != nil { return err } diff --git a/pkg/encoding/kzgEncoder/multiframe.go b/pkg/encoding/kzgEncoder/multiframe.go index ec82bc60b8..f1e80e0e6f 100644 --- a/pkg/encoding/kzgEncoder/multiframe.go +++ b/pkg/encoding/kzgEncoder/multiframe.go @@ -1,11 +1,12 @@ package kzgEncoder import ( + "bytes" + "encoding/gob" + "errors" "fmt" - "log" rs "github.com/Layr-Labs/eigenda/pkg/encoding/encoder" - kzg "github.com/Layr-Labs/eigenda/pkg/kzg" bls "github.com/Layr-Labs/eigenda/pkg/kzg/bn254" ) @@ -17,49 +18,58 @@ type Sample struct { X uint // X is int , at which index is evaluated } -// m is number of blob -func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples []Sample, m int) bool { - verifier, _ := group.GetKzgVerifier(params) - ks := verifier.Ks - - for ind, s := range samples { - q, err := rs.GetLeadingCosetIndex( - uint64(s.X), - params.NumChunks, - ) - if err != nil { - return false - } +// generate a random value using Fiat Shamir transform +func GenRandomness(params rs.EncodingParams, samples []Sample, m int) (bls.Fr, error) { - lc := ks.FFTSettings.ExpandedRootsOfUnity[uint64(q)] + var buffer bytes.Buffer + enc := gob.NewEncoder(&buffer) + err := enc.Encode(samples) + if err != nil { + return bls.ZERO, err + } - ok := SingleVerify(ks, &s.Commitment, &lc, s.Coeffs, s.Proof) - if !ok { - fmt.Println("proof", s.Proof.String()) - fmt.Println("commitment", s.Commitment.String()) + err = enc.Encode(params) + if err != nil { + return bls.ZERO, err + } - for i := 0; i < len(s.Coeffs); i++ { - fmt.Printf("%v ", s.Coeffs[i].String()) - } - fmt.Println("q", q, lc.String()) + err = enc.Encode(m) + if err != nil { + return bls.ZERO, err + } - log.Fatalf("Proof %v failed\n", ind) - } else { + var randomFr bls.Fr - fmt.Println("&&&&&&&&&&&&&&&&&&tested frame and pass", ind) - } + err = bls.HashToSingleField(&randomFr, buffer.Bytes()) + if err != nil { + return bls.ZERO, err } + return randomFr, nil +} - D := len(samples[0].Coeffs) // chunkLen +// m is number of blob +func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples []Sample, m int) error { + verifier, _ := group.GetKzgVerifier(params) + ks := verifier.Ks + + D := params.ChunkLen n := len(samples) - rInt := uint64(22894) - var r bls.Fr - bls.AsFr(&r, rInt) + //rInt := uint64(22894) + //var r bls.Fr + //bls.AsFr(&r, rInt) + + r, err := GenRandomness(params, samples, m) + if err != nil { + return err + } randomsFr := make([]bls.Fr, n) - bls.AsFr(&randomsFr[0], rInt) + //bls.AsFr(&randomsFr[0], rInt) + bls.CopyFr(&randomsFr[0], &r) + + fmt.Println("random", r.String()) // lhs var tmp bls.Fr @@ -109,7 +119,7 @@ func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples coeffs := samples[k].Coeffs rk := randomsFr[k] - for j := 0; j < D; j++ { + for j := uint64(0); j < D; j++ { bls.MulModFr(&tmp, &coeffs[j], &rk) bls.AddModFr(&stCoeffs[j], &stCoeffs[j], &tmp) } @@ -128,14 +138,14 @@ func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples params.NumChunks, ) if err != nil { - return false + return err } h := ks.ExpandedRootsOfUnity[x] var hPow bls.Fr bls.CopyFr(&hPow, &bls.ONE) - for j := 0; j < D; j++ { + for j := uint64(0); j < D; j++ { bls.MulModFr(&tmp, &hPow, &h) bls.CopyFr(&hPow, &tmp) } @@ -153,39 +163,36 @@ func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples bls.SubG1(&rhsG1, ftG1, stG1) bls.AddG1(&rhsG1, &rhsG1, ttG1) - return bls.PairingsVerify(lhsG1, lhsG2, &rhsG1, rhsG2) + if bls.PairingsVerify(lhsG1, lhsG2, &rhsG1, rhsG2) { + return nil + } else { + return errors.New("Universal Verify Incorrect paring") + } } -func SingleVerify(ks *kzg.KZGSettings, commitment *bls.G1Point, x *bls.Fr, coeffs []bls.Fr, proof bls.G1Point) bool { - var xPow bls.Fr - bls.CopyFr(&xPow, &bls.ONE) +//func SingleVerify(ks *kzg.KZGSettings, commitment *bls.G1Point, x *bls.Fr, coeffs []bls.Fr, proof bls.G1Point) bool { +// var xPow bls.Fr +// bls.CopyFr(&xPow, &bls.ONE) - var tmp bls.Fr - for i := 0; i < len(coeffs); i++ { - bls.MulModFr(&tmp, &xPow, x) - bls.CopyFr(&xPow, &tmp) - } - - // [x^n]_2 - var xn2 bls.G2Point - bls.MulG2(&xn2, &bls.GenG2, &xPow) +// var tmp bls.Fr +// for i := 0; i < len(coeffs); i++ { +// bls.MulModFr(&tmp, &xPow, x) +// bls.CopyFr(&xPow, &tmp) +// } - // [s^n - x^n]_2 - var xnMinusYn bls.G2Point - bls.SubG2(&xnMinusYn, &ks.Srs.G2[len(coeffs)], &xn2) +// [x^n]_2 +// var xn2 bls.G2Point +// bls.MulG2(&xn2, &bls.GenG2, &xPow) - // [interpolation_polynomial(s)]_1 - is1 := bls.LinCombG1(ks.Srs.G1[:len(coeffs)], coeffs) - // [commitment - interpolation_polynomial(s)]_1 = [commit]_1 - [interpolation_polynomial(s)]_1 - var commitMinusInterpolation bls.G1Point - bls.SubG1(&commitMinusInterpolation, commitment, is1) +// [s^n - x^n]_2 +// var xnMinusYn bls.G2Point +// bls.SubG2(&xnMinusYn, &ks.Srs.G2[len(coeffs)], &xn2) - // Verify the pairing equation - // - // e([commitment - interpolation_polynomial(s)], [1]) = e([proof], [s^n - x^n]) - // equivalent to - // e([commitment - interpolation_polynomial]^(-1), [1]) * e([proof], [s^n - x^n]) = 1_T - // +// [interpolation_polynomial(s)]_1 +// is1 := bls.LinCombG1(ks.Srs.G1[:len(coeffs)], coeffs) +// [commitment - interpolation_polynomial(s)]_1 = [commit]_1 - [interpolation_polynomial(s)]_1 +// var commitMinusInterpolation bls.G1Point +// bls.SubG1(&commitMinusInterpolation, commitment, is1) - return bls.PairingsVerify(&commitMinusInterpolation, &bls.GenG2, &proof, &xnMinusYn) -} +// return bls.PairingsVerify(&commitMinusInterpolation, &bls.GenG2, &proof, &xnMinusYn) +//} diff --git a/pkg/kzg/bn254/bignum_gnark.go b/pkg/kzg/bn254/bignum_gnark.go index cd44540acc..c7f2827573 100644 --- a/pkg/kzg/bn254/bignum_gnark.go +++ b/pkg/kzg/bn254/bignum_gnark.go @@ -50,6 +50,14 @@ func AsFr(dst *Fr, i uint64) { (*fr.Element)(dst).SetUint64(i) } +func HashToSingleField(dst *Fr, msg []byte) error { + DST := []byte("-") + randomFr, err := fr.Hash(msg, DST, 1) + randomFrBytes := (randomFr[0]).Bytes() + FrSetBytes(dst, randomFrBytes[:]) + return err +} + func FrStr(b *Fr) string { if b == nil { return "" From 327699f83881e006881de2c2949e2e2a9cf7b685 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 30 Nov 2023 03:04:20 +0000 Subject: [PATCH 3/6] code refactor --- core/validator.go | 210 ++++++++++++++++++++-------------------------- 1 file changed, 91 insertions(+), 119 deletions(-) diff --git a/core/validator.go b/core/validator.go index 5f2457eecd..0497b45c88 100644 --- a/core/validator.go +++ b/core/validator.go @@ -7,6 +7,7 @@ import ( var ( ErrChunkLengthMismatch = errors.New("chunk length mismatch") ErrInvalidHeader = errors.New("invalid header") + ErrBlobQuorumSkip = errors.New("blob skipped for a quorum before verification") ) type ChunkValidator interface { @@ -32,81 +33,91 @@ func NewChunkValidator(enc Encoder, asgn AssignmentCoordinator, cst ChainState, } } -func (v *chunkValidator) ValidateBlob(blob *BlobMessage, operatorState *OperatorState) error { - if len(blob.Bundles) != len(blob.BlobHeader.QuorumInfos) { - return errors.New("number of bundles does not match number of quorums") +func (v *chunkValidator) preprocessBlob(quorumHeader *BlobQuorumInfo, blob *BlobMessage, operatorState *OperatorState) ([]*Chunk, *Assignment, *EncodingParams, error) { + if quorumHeader.AdversaryThreshold >= quorumHeader.QuorumThreshold { + return nil, nil, nil, errors.New("invalid header: quorum threshold does not exceed adversary threshold") } - // Validate the blob length - err := v.encoder.VerifyBlobLength(blob.BlobHeader.BlobCommitments) - if err != nil { - return err + // Check if the operator is a member of the quorum + if _, ok := operatorState.Operators[quorumHeader.QuorumID]; !ok { + return nil, nil, nil, ErrBlobQuorumSkip } - for _, quorumHeader := range blob.BlobHeader.QuorumInfos { + // Get the assignments for the quorum + assignment, info, err := v.assignment.GetOperatorAssignment(operatorState, quorumHeader.QuorumID, quorumHeader.QuantizationFactor, v.operatorID) + if err != nil { + return nil, nil, nil, err + } - if quorumHeader.AdversaryThreshold >= quorumHeader.QuorumThreshold { - return errors.New("invalid header: quorum threshold does not exceed adversary threshold") - } + // Validate the number of chunks + if assignment.NumChunks == 0 { + return nil, nil, nil, ErrBlobQuorumSkip + } + if assignment.NumChunks != uint(len(blob.Bundles[quorumHeader.QuorumID])) { + return nil, nil, nil, errors.New("number of chunks does not match assignment") + } - // Check if the operator is a member of the quorum - if _, ok := operatorState.Operators[quorumHeader.QuorumID]; !ok { - continue - } + chunkLength, err := v.assignment.GetChunkLengthFromHeader(operatorState, quorumHeader) + if err != nil { + return nil, nil, nil, err + } - // Get the assignments for the quorum - assignment, info, err := v.assignment.GetOperatorAssignment(operatorState, quorumHeader.QuorumID, quorumHeader.QuantizationFactor, v.operatorID) - if err != nil { - return err - } + // Validate the chunkLength against the quorum and adversary threshold parameters + numOperators := uint(len(operatorState.Operators[quorumHeader.QuorumID])) + minChunkLength, err := v.assignment.GetMinimumChunkLength(numOperators, blob.BlobHeader.BlobCommitments.Length, quorumHeader.QuantizationFactor, quorumHeader.QuorumThreshold, quorumHeader.AdversaryThreshold) + if err != nil { + return nil, nil, nil, err + } + params, err := GetEncodingParams(minChunkLength, info.TotalChunks) + if err != nil { + return nil, nil, nil, err + } - // Validate the number of chunks - if assignment.NumChunks == 0 { - continue - } - if assignment.NumChunks != uint(len(blob.Bundles[quorumHeader.QuorumID])) { - return errors.New("number of chunks does not match assignment") - } + if params.ChunkLength != chunkLength { + return nil, nil, nil, errors.New("number of chunks does not match assignment") + } - chunkLength, err := v.assignment.GetChunkLengthFromHeader(operatorState, quorumHeader) - if err != nil { - return err + // Get the chunk length + chunks := blob.Bundles[quorumHeader.QuorumID] + for _, chunk := range chunks { + if uint(chunk.Length()) != chunkLength { + return nil, nil, nil, ErrChunkLengthMismatch } + } - // Validate the chunkLength against the quorum and adversary threshold parameters - numOperators := uint(len(operatorState.Operators[quorumHeader.QuorumID])) - minChunkLength, err := v.assignment.GetMinimumChunkLength(numOperators, blob.BlobHeader.BlobCommitments.Length, quorumHeader.QuantizationFactor, quorumHeader.QuorumThreshold, quorumHeader.AdversaryThreshold) - if err != nil { - return err - } - params, err := GetEncodingParams(minChunkLength, info.TotalChunks) - if err != nil { - return err - } + // Validate the chunk length + if chunkLength*quorumHeader.QuantizationFactor*numOperators != quorumHeader.EncodedBlobLength { + return nil, nil, nil, ErrInvalidHeader + } - if params.ChunkLength != chunkLength { - return errors.New("number of chunks does not match assignment") - } + return chunks, &assignment, ¶ms, nil +} - // Get the chunk length - chunks := blob.Bundles[quorumHeader.QuorumID] - for _, chunk := range chunks { - if uint(chunk.Length()) != chunkLength { - return ErrChunkLengthMismatch - } - } +func (v *chunkValidator) ValidateBlob(blob *BlobMessage, operatorState *OperatorState) error { + if len(blob.Bundles) != len(blob.BlobHeader.QuorumInfos) { + return errors.New("number of bundles does not match number of quorums") + } - // Validate the chunk length - if chunkLength*quorumHeader.QuantizationFactor*numOperators != quorumHeader.EncodedBlobLength { - return ErrInvalidHeader - } + // Validate the blob length + err := v.encoder.VerifyBlobLength(blob.BlobHeader.BlobCommitments) + if err != nil { + return err + } - // Check the received chunks against the commitment - err = v.encoder.VerifyChunks(chunks, assignment.GetIndices(), blob.BlobHeader.BlobCommitments, params) - if err != nil { + for _, quorumHeader := range blob.BlobHeader.QuorumInfos { + // preprocess validation info + chunks, assignment, params, err := v.preprocessBlob(quorumHeader, blob, operatorState) + if err == ErrBlobQuorumSkip { + continue + } else if err != nil { return err + } else { + // Check the received chunks against the commitment + err = v.encoder.VerifyChunks(chunks, assignment.GetIndices(), blob.BlobHeader.BlobCommitments, *params) + if err != nil { + return err + } } - } return nil @@ -133,72 +144,33 @@ func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *Oper // for each quorum for _, quorumHeader := range blob.BlobHeader.QuorumInfos { // Check if the operator is a member of the quorum - if _, ok := operatorState.Operators[quorumHeader.QuorumID]; !ok { + chunks, assignment, params, err := v.preprocessBlob(quorumHeader, blob, operatorState) + if err == ErrBlobQuorumSkip { continue - } - - // Get the assignments for the quorum - assignment, info, err := v.assignment.GetOperatorAssignment( - operatorState, - quorumHeader.QuorumID, - quorumHeader.QuantizationFactor, - v.operatorID, - ) - if err != nil { - return err - } - - // Validate the number of chunks - if assignment.NumChunks == 0 { - continue - } - if assignment.NumChunks != uint(len(blob.Bundles[quorumHeader.QuorumID])) { - return errors.New("number of chunks does not match assignment") - } - - chunkLength, err := v.assignment.GetChunkLengthFromHeader(operatorState, quorumHeader) - if err != nil { + } else if err != nil { return err - } - - // Get the chunk length - chunks := blob.Bundles[quorumHeader.QuorumID] - for _, chunk := range chunks { - if uint(chunk.Length()) != chunkLength { - return ErrChunkLengthMismatch + } else { + // Check the received chunks against the commitment + indices := assignment.GetIndices() + samples := make([]Sample, len(chunks)) + for ind := range chunks { + samples[ind] = Sample{ + Commitment: blob.BlobHeader.BlobCommitments.Commitment, + Chunk: chunks[ind], + EvalIndex: uint(indices[ind]), + BlobIndex: i, + } } - } - - // Validate the chunk length - numOperators := uint(len(operatorState.Operators[quorumHeader.QuorumID])) - if chunkLength*quorumHeader.QuantizationFactor*numOperators != quorumHeader.EncodedBlobLength { - return ErrInvalidHeader - } - // Get Encoding Params - params := EncodingParams{ChunkLength: chunkLength, NumChunks: info.TotalChunks} - - // Check the received chunks against the commitment - indices := assignment.GetIndices() - samples := make([]Sample, 0) - for ind := range chunks { - sample := Sample{ - Commitment: blob.BlobHeader.BlobCommitments.Commitment, - Chunk: chunks[ind], - EvalIndex: uint(indices[ind]), - BlobIndex: i, + // Add into subBatch + subBatch, ok := subBatchMap[*params] + if !ok { + subBatch.Samples = samples + subBatch.NumBlobs = 1 + } else { + subBatch.Samples = append(subBatch.Samples, samples...) + subBatch.NumBlobs += 1 } - samples = append(samples, sample) - } - - // Sort into subBatch - subBatch, ok := subBatchMap[params] - if !ok { - subBatch.Samples = samples - subBatch.NumBlobs = 1 - } else { - subBatch.Samples = append(subBatch.Samples, samples...) - subBatch.NumBlobs += 1 } } } From 8a8123f630856214ef9342a6b7c1737ecdc9a986 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 30 Nov 2023 07:53:22 +0000 Subject: [PATCH 4/6] add batch verification parallelization, fix bug --- core/data.go | 8 +++--- core/encoding/encoder.go | 5 +--- core/validator.go | 37 ++++++++++++++++++------- pkg/encoding/kzgEncoder/multiframe.go | 40 +-------------------------- 4 files changed, 33 insertions(+), 57 deletions(-) diff --git a/core/data.go b/core/data.go index cd518e7354..75fbce5e5b 100644 --- a/core/data.go +++ b/core/data.go @@ -163,10 +163,10 @@ func (cb Bundles) Serialize() ([][][]byte, error) { // Sample is a chunk with associated metadata used by the Universal Batch Verifier type Sample struct { - Commitment *Commitment - Chunk *Chunk - EvalIndex ChunkNumber - BlobIndex int + Commitment *Commitment + Chunk *Chunk + AssignmentIndex ChunkNumber + BlobIndex int } // SubBatch is a part of the whole Batch with identical Encoding Parameters, i.e. (ChunkLen, NumChunk) diff --git a/core/encoding/encoder.go b/core/encoding/encoder.go index 241dd709ea..ca76b84ecb 100644 --- a/core/encoding/encoder.go +++ b/core/encoding/encoder.go @@ -2,7 +2,6 @@ package encoding import ( "crypto/sha256" - "fmt" "github.com/Layr-Labs/eigenda/core" "github.com/Layr-Labs/eigenda/pkg/encoding/encoder" @@ -61,7 +60,6 @@ func (e *Encoder) Encode(data []byte, params core.EncodingParams) (core.BlobComm } } encParams := toEncParams(params) - fmt.Println("encParams", encParams) enc, err := e.EncoderGroup.GetKzgEncoder(encParams) if err != nil { @@ -136,7 +134,6 @@ func (e *Encoder) VerifyChunks(chunks []*core.Chunk, indices []core.ChunkNumber, // convert struct understandable by the crypto library func (e *Encoder) UniversalVerifyChunks(params core.EncodingParams, samplesCore []core.Sample, numBlobs int) error { encParams := toEncParams(params) - samples := make([]kzgEncoder.Sample, len(samplesCore)) for i, sc := range samplesCore { @@ -145,7 +142,7 @@ func (e *Encoder) UniversalVerifyChunks(params core.EncodingParams, samplesCore Proof: sc.Chunk.Proof, Row: sc.BlobIndex, Coeffs: sc.Chunk.Coeffs, - X: sc.EvalIndex, + X: sc.AssignmentIndex, } samples[i] = sample } diff --git a/core/validator.go b/core/validator.go index 0497b45c88..a57550a0e2 100644 --- a/core/validator.go +++ b/core/validator.go @@ -128,7 +128,6 @@ func (v *chunkValidator) UpdateOperatorID(operatorID OperatorID) { } func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *OperatorState) error { - subBatchMap := make(map[EncodingParams]SubBatch) for i, blob := range blobs { @@ -155,18 +154,19 @@ func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *Oper samples := make([]Sample, len(chunks)) for ind := range chunks { samples[ind] = Sample{ - Commitment: blob.BlobHeader.BlobCommitments.Commitment, - Chunk: chunks[ind], - EvalIndex: uint(indices[ind]), - BlobIndex: i, + Commitment: blob.BlobHeader.BlobCommitments.Commitment, + Chunk: chunks[ind], + AssignmentIndex: uint(indices[ind]), + BlobIndex: i, } } - // Add into subBatch subBatch, ok := subBatchMap[*params] if !ok { - subBatch.Samples = samples - subBatch.NumBlobs = 1 + subBatchMap[*params] = SubBatch{ + Samples: samples, + NumBlobs: 1, + } } else { subBatch.Samples = append(subBatch.Samples, samples...) subBatch.NumBlobs += 1 @@ -175,14 +175,31 @@ func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *Oper } } - // ToDo add parallelization for verification for each subBatch + numSubBatch := len(subBatchMap) + out := make(chan error, numSubBatch) for params, subBatch := range subBatchMap { - err := v.encoder.UniversalVerifyChunks(params, subBatch.Samples, subBatch.NumBlobs) + params := params + subBatch := subBatch + go v.UniversalVerifyWorker(params, &subBatch, out) + } + + for i := 0; i < numSubBatch; i++ { + err := <-out if err != nil { return err } } return nil +} + +func (v *chunkValidator) UniversalVerifyWorker(params EncodingParams, subBatch *SubBatch, out chan error) { + + err := v.encoder.UniversalVerifyChunks(params, subBatch.Samples, subBatch.NumBlobs) + if err != nil { + out <- err + return + } + out <- nil } diff --git a/pkg/encoding/kzgEncoder/multiframe.go b/pkg/encoding/kzgEncoder/multiframe.go index f1e80e0e6f..462b8fd663 100644 --- a/pkg/encoding/kzgEncoder/multiframe.go +++ b/pkg/encoding/kzgEncoder/multiframe.go @@ -55,10 +55,7 @@ func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples D := params.ChunkLen n := len(samples) - - //rInt := uint64(22894) - //var r bls.Fr - //bls.AsFr(&r, rInt) + fmt.Printf("Batch verify %v frames of %v symbols out of %v blobs \n", n, params.ChunkLen, m) r, err := GenRandomness(params, samples, m) if err != nil { @@ -66,11 +63,8 @@ func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples } randomsFr := make([]bls.Fr, n) - //bls.AsFr(&randomsFr[0], rInt) bls.CopyFr(&randomsFr[0], &r) - fmt.Println("random", r.String()) - // lhs var tmp bls.Fr @@ -85,7 +79,6 @@ func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples bls.CopyG1(&proofs[i], &samples[i].Proof) } - fmt.Printf("len proof %v len ran %v\n", len(proofs), len(randomsFr)) // lhs g1 lhsG1 := bls.LinCombG1(proofs, randomsFr) @@ -98,9 +91,6 @@ func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples // rhs g1 // get commitments commits := make([]bls.G1Point, m) - //for k := 0 ; k < n ; k++ { - // commits[k] = samples[k].Commitment - //} // get coeffs ftCoeffs := make([]bls.Fr, m) for k := 0; k < n; k++ { @@ -109,7 +99,6 @@ func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples bls.AddModFr(&ftCoeffs[row], &ftCoeffs[row], &randomsFr[k]) bls.CopyG1(&commits[row], &s.Commitment) } - fmt.Printf("len commit %v len coeff %v\n", len(commits), len(ftCoeffs)) ftG1 := bls.LinCombG1(commits, ftCoeffs) @@ -169,30 +158,3 @@ func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples return errors.New("Universal Verify Incorrect paring") } } - -//func SingleVerify(ks *kzg.KZGSettings, commitment *bls.G1Point, x *bls.Fr, coeffs []bls.Fr, proof bls.G1Point) bool { -// var xPow bls.Fr -// bls.CopyFr(&xPow, &bls.ONE) - -// var tmp bls.Fr -// for i := 0; i < len(coeffs); i++ { -// bls.MulModFr(&tmp, &xPow, x) -// bls.CopyFr(&xPow, &tmp) -// } - -// [x^n]_2 -// var xn2 bls.G2Point -// bls.MulG2(&xn2, &bls.GenG2, &xPow) - -// [s^n - x^n]_2 -// var xnMinusYn bls.G2Point -// bls.SubG2(&xnMinusYn, &ks.Srs.G2[len(coeffs)], &xn2) - -// [interpolation_polynomial(s)]_1 -// is1 := bls.LinCombG1(ks.Srs.G1[:len(coeffs)], coeffs) -// [commitment - interpolation_polynomial(s)]_1 = [commit]_1 - [interpolation_polynomial(s)]_1 -// var commitMinusInterpolation bls.G1Point -// bls.SubG1(&commitMinusInterpolation, commitment, is1) - -// return bls.PairingsVerify(&commitMinusInterpolation, &bls.GenG2, &proof, &xnMinusYn) -//} From 65fc825441cfba990b27630e606bfa4d9318503f Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 30 Nov 2023 17:46:25 +0000 Subject: [PATCH 5/6] add comments --- core/validator.go | 6 ++++-- pkg/encoding/kzgEncoder/multiframe.go | 16 ++++++++++++---- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/core/validator.go b/core/validator.go index a57550a0e2..d181b0e865 100644 --- a/core/validator.go +++ b/core/validator.go @@ -33,6 +33,7 @@ func NewChunkValidator(enc Encoder, asgn AssignmentCoordinator, cst ChainState, } } +// preprocessBlob for each Quorum func (v *chunkValidator) preprocessBlob(quorumHeader *BlobQuorumInfo, blob *BlobMessage, operatorState *OperatorState) ([]*Chunk, *Assignment, *EncodingParams, error) { if quorumHeader.AdversaryThreshold >= quorumHeader.QuorumThreshold { return nil, nil, nil, errors.New("invalid header: quorum threshold does not exceed adversary threshold") @@ -175,12 +176,13 @@ func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *Oper } } + // Parallelize the universal verification for each subBatch numSubBatch := len(subBatchMap) out := make(chan error, numSubBatch) for params, subBatch := range subBatchMap { params := params subBatch := subBatch - go v.UniversalVerifyWorker(params, &subBatch, out) + go v.universalVerifyWorker(params, &subBatch, out) } for i := 0; i < numSubBatch; i++ { @@ -193,7 +195,7 @@ func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *Oper return nil } -func (v *chunkValidator) UniversalVerifyWorker(params EncodingParams, subBatch *SubBatch, out chan error) { +func (v *chunkValidator) universalVerifyWorker(params EncodingParams, subBatch *SubBatch, out chan error) { err := v.encoder.UniversalVerifyChunks(params, subBatch.Samples, subBatch.NumBlobs) if err != nil { diff --git a/pkg/encoding/kzgEncoder/multiframe.go b/pkg/encoding/kzgEncoder/multiframe.go index 462b8fd663..474bc35a67 100644 --- a/pkg/encoding/kzgEncoder/multiframe.go +++ b/pkg/encoding/kzgEncoder/multiframe.go @@ -10,12 +10,14 @@ import ( bls "github.com/Layr-Labs/eigenda/pkg/kzg/bn254" ) +// Sample is the basic unit for a verification +// A blob may contain multiple Samples type Sample struct { Commitment bls.G1Point Proof bls.G1Point - Row int + Row int // corresponds to a row in the verification matrix Coeffs []bls.Fr - X uint // X is int , at which index is evaluated + X uint // X is assignment } // generate a random value using Fiat Shamir transform @@ -47,7 +49,12 @@ func GenRandomness(params rs.EncodingParams, samples []Sample, m int) (bls.Fr, e return randomFr, nil } -// m is number of blob +// UniversalVerify implements batch verification on a set of chunks given the same chunk dimension (chunkLen, numChunk). +// The details is given in Ethereum Research post whose authors are George Kadianakis, George Kadianakis, George Kadianakis +// https://ethresear.ch/t/a-universal-verification-equation-for-data-availability-sampling/13240 +// +// m is number of blob, samples is a list of chunks +// Inside the code, ft stands for first term; st for the second term; tt for the third term func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples []Sample, m int) error { verifier, _ := group.GetKzgVerifier(params) ks := verifier.Ks @@ -122,6 +129,8 @@ func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples leadingDs := make([]bls.Fr, n) for k := 0; k < n; k++ { + // It is important to obtain the leading coset here + // As the params from the eigenda Core might not have NumChunks be the power of 2 x, err := rs.GetLeadingCosetIndex( uint64(samples[k].X), params.NumChunks, @@ -141,7 +150,6 @@ func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples bls.CopyFr(&leadingDs[k], &hPow) } - // for k := 0; k < n; k++ { rk := randomsFr[k] bls.MulModFr(&ttCoeffs[k], &rk, &leadingDs[k]) From e87d8e308e18b231ed04a1db7699ceb1b2ad7706 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 30 Nov 2023 18:00:58 +0000 Subject: [PATCH 6/6] fix lint --- core/mock/validator.go | 5 +++++ node/node.go | 18 +++++++++--------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/core/mock/validator.go b/core/mock/validator.go index e8b953f392..a66d38b3de 100644 --- a/core/mock/validator.go +++ b/core/mock/validator.go @@ -23,6 +23,11 @@ func NewMockChunkValidator() *MockChunkValidator { return &MockChunkValidator{} } +func (v *MockChunkValidator) ValidateBatch(blobs []*core.BlobMessage, operatorState *core.OperatorState) error { + args := v.Called(blobs, operatorState) + return args.Error(0) +} + func (v *MockChunkValidator) ValidateBlob(blob *core.BlobMessage, operatorState *core.OperatorState) error { args := v.Called(blob, operatorState) return args.Error(0) diff --git a/node/node.go b/node/node.go index 654c21a61e..77d65b9272 100644 --- a/node/node.go +++ b/node/node.go @@ -430,12 +430,12 @@ func buildSdkClients(config *Config, logger common.Logger) (*constructor.Clients return sdkClients, nil } -func (n *Node) validateBlob(ctx context.Context, blob *core.BlobMessage, operatorState *core.OperatorState, out chan error) { - err := n.Validator.ValidateBlob(blob, operatorState) - if err != nil { - out <- err - return - } - - out <- nil -} +//func (n *Node) validateBlob(ctx context.Context, blob *core.BlobMessage, operatorState *core.OperatorState, out chan error) { +// err := n.Validator.ValidateBlob(blob, operatorState) +// if err != nil { +// out <- err +// return +// } + +// out <- nil +//}