diff --git a/core/test/core_test.go b/core/test/core_test.go index 5311697f88..396b0f0424 100644 --- a/core/test/core_test.go +++ b/core/test/core_test.go @@ -67,82 +67,88 @@ func makeTestBlob(t *testing.T, length int, securityParams []*core.SecurityParam return blob } -// prepareBatch takes in a single blob, encodes it, generates the associated assignments, and the batch header. +// prepareBatch takes in multiple blob, encodes them, generates the associated assignments, and the batch header. // These are the products that a disperser will need in order to disperse data to the DA nodes. -func prepareBatch(t *testing.T, cst core.IndexedChainState, blob core.Blob, quorumIndex uint, quantizationFactor uint, bn uint) (core.EncodedBlob, core.BatchHeader) { +func prepareBatch(t *testing.T, cst core.IndexedChainState, blobs []core.Blob, quorumIndex uint, quantizationFactor uint, bn uint) ([]core.EncodedBlob, core.BatchHeader) { - quorumID := blob.RequestHeader.SecurityParams[quorumIndex].QuorumID - quorums := []core.QuorumID{quorumID} - - state, err := cst.GetOperatorState(context.Background(), bn, quorums) - if err != nil { - t.Fatal(err) + batchHeader := core.BatchHeader{ + ReferenceBlockNumber: bn, + BatchRoot: [32]byte{}, } - assignments, info, err := asn.GetAssignments(state, quorumID, quantizationFactor) - if err != nil { - t.Fatal(err) - } + numBlob := len(blobs) + var encodedBlobs []core.EncodedBlob = make([]core.EncodedBlob, numBlob) - blobSize := uint(len(blob.Data)) - blobLength := core.GetBlobLength(blobSize) - adversaryThreshold := blob.RequestHeader.SecurityParams[quorumIndex].AdversaryThreshold - quorumThreshold := blob.RequestHeader.SecurityParams[quorumIndex].QuorumThreshold + for z, blob := range blobs { + quorumID := blob.RequestHeader.SecurityParams[quorumIndex].QuorumID + quorums := []core.QuorumID{quorumID} - numOperators := uint(len(state.Operators[quorumID])) + state, err := cst.GetOperatorState(context.Background(), bn, quorums) + if err != nil { + t.Fatal(err) + } - chunkLength, err := asn.GetMinimumChunkLength(numOperators, blobLength, quantizationFactor, quorumThreshold, adversaryThreshold) - if err != nil { - t.Fatal(err) - } + assignments, info, err := asn.GetAssignments(state, quorumID, quantizationFactor) + if err != nil { + t.Fatal(err) + } - params, err := core.GetEncodingParams(chunkLength, info.TotalChunks) - if err != nil { - t.Fatal(err) - } + blobSize := uint(len(blob.Data)) + blobLength := core.GetBlobLength(blobSize) + adversaryThreshold := blob.RequestHeader.SecurityParams[quorumIndex].AdversaryThreshold + quorumThreshold := blob.RequestHeader.SecurityParams[quorumIndex].QuorumThreshold - commitments, chunks, err := enc.Encode(blob.Data, params) - if err != nil { - t.Fatal(err) - } + numOperators := uint(len(state.Operators[quorumID])) - quorumHeader := &core.BlobQuorumInfo{ - SecurityParam: core.SecurityParam{ - QuorumID: quorumID, - AdversaryThreshold: adversaryThreshold, - QuorumThreshold: quorumThreshold, - }, - QuantizationFactor: quantizationFactor, - EncodedBlobLength: params.ChunkLength * quantizationFactor * numOperators, - } + chunkLength, err := asn.GetMinimumChunkLength(numOperators, blobLength, quantizationFactor, quorumThreshold, adversaryThreshold) + if err != nil { + t.Fatal(err) + } - blobHeader := &core.BlobHeader{ - BlobCommitments: core.BlobCommitments{ - Commitment: commitments.Commitment, - LengthProof: commitments.LengthProof, - Length: commitments.Length, - }, - QuorumInfos: []*core.BlobQuorumInfo{quorumHeader}, - } + params, err := core.GetEncodingParams(chunkLength, info.TotalChunks) + if err != nil { + t.Fatal(err) + } - batchHeader := core.BatchHeader{ - ReferenceBlockNumber: bn, - BatchRoot: [32]byte{}, - } + commitments, chunks, err := enc.Encode(blob.Data, params) + if err != nil { + t.Fatal(err) + } - var encodedBlob core.EncodedBlob = make(map[core.OperatorID]*core.BlobMessage, len(assignments)) + quorumHeader := &core.BlobQuorumInfo{ + SecurityParam: core.SecurityParam{ + QuorumID: quorumID, + AdversaryThreshold: adversaryThreshold, + QuorumThreshold: quorumThreshold, + }, + QuantizationFactor: quantizationFactor, + EncodedBlobLength: params.ChunkLength * quantizationFactor * numOperators, + } - for id, assignment := range assignments { - bundles := map[core.QuorumID]core.Bundle{ - quorumID: chunks[assignment.StartIndex : assignment.StartIndex+assignment.NumChunks], + blobHeader := &core.BlobHeader{ + BlobCommitments: core.BlobCommitments{ + Commitment: commitments.Commitment, + LengthProof: commitments.LengthProof, + Length: commitments.Length, + }, + QuorumInfos: []*core.BlobQuorumInfo{quorumHeader}, } - encodedBlob[id] = &core.BlobMessage{ - BlobHeader: blobHeader, - Bundles: bundles, + + var encodedBlob core.EncodedBlob = make(map[core.OperatorID]*core.BlobMessage, len(assignments)) + for id, assignment := range assignments { + bundles := map[core.QuorumID]core.Bundle{ + quorumID: chunks[assignment.StartIndex : assignment.StartIndex+assignment.NumChunks], + } + encodedBlob[id] = &core.BlobMessage{ + BlobHeader: blobHeader, + Bundles: bundles, + } } + encodedBlobs[z] = encodedBlob + } - return encodedBlob, batchHeader + return encodedBlobs, batchHeader } @@ -155,11 +161,30 @@ func checkBatch(t *testing.T, cst core.IndexedChainState, encodedBlob core.Encod state, _ := cst.GetIndexedOperatorState(context.Background(), header.ReferenceBlockNumber, quorums) for id := range state.IndexedOperators { - + val.UpdateOperatorID(id) blobMessage := encodedBlob[id] + err := val.ValidateBlob(blobMessage, state.OperatorState) + assert.NoError(t, err) + } + +} + +// checkBatchByUniversalVerifier runs the verification logic for each DA node in the current OperatorState, and returns an error if any of +// the DA nodes' validation checks fails +func checkBatchByUniversalVerifier(t *testing.T, cst core.IndexedChainState, encodedBlobs []core.EncodedBlob, header core.BatchHeader) { + val := core.NewChunkValidator(enc, asn, cst, [32]byte{}) + + quorums := []core.QuorumID{0} + state, _ := cst.GetIndexedOperatorState(context.Background(), header.ReferenceBlockNumber, quorums) + numBlob := len(encodedBlobs) + for id := range state.IndexedOperators { val.UpdateOperatorID(id) - err := val.ValidateBlob(blobMessage, state.OperatorState) + var blobMessages []*core.BlobMessage = make([]*core.BlobMessage, numBlob) + for z, encodedBlob := range encodedBlobs { + blobMessages[z] = encodedBlob[id] + } + err := val.ValidateBatch(blobMessages, state.OperatorState) assert.NoError(t, err) } @@ -167,6 +192,7 @@ func checkBatch(t *testing.T, cst core.IndexedChainState, encodedBlob core.Encod func TestCoreLibrary(t *testing.T) { + numBlob := 5 // must be greater than 0 blobLengths := []int{1, 64, 1000} quantizationFactors := []uint{1, 10} operatorCounts := []uint{1, 2, 4, 10, 30} @@ -184,28 +210,43 @@ func TestCoreLibrary(t *testing.T) { }, } - for _, blobLength := range blobLengths { - for _, quantizationFactor := range quantizationFactors { - for _, operatorCount := range operatorCounts { - for _, securityParam := range securityParams { + quorumIndex := uint(0) + bn := uint(0) - t.Run(fmt.Sprintf("blobLength=%v, quantizationFactor=%v, operatorCount=%v, securityParams=%v", blobLength, quantizationFactor, operatorCount, securityParam), func(t *testing.T) { + for _, operatorCount := range operatorCounts { + cst, err := mock.NewChainDataMock(core.OperatorIndex(operatorCount)) + assert.NoError(t, err) - blob := makeTestBlob(t, blobLength, []*core.SecurityParam{securityParam}) + // batch can only be tested per operatorCount, because the assignment would be wrong otherwise + for _, blobLength := range blobLengths { + batches := make([]core.EncodedBlob, 0) + batchHeader := core.BatchHeader{ + ReferenceBlockNumber: bn, + BatchRoot: [32]byte{}, + } + for _, quantizationFactor := range quantizationFactors { + for _, securityParam := range securityParams { - cst, err := mock.NewChainDataMock(core.OperatorIndex(operatorCount)) - assert.NoError(t, err) + t.Run(fmt.Sprintf("blobLength=%v, quantizationFactor=%v, operatorCount=%v, securityParams=%v", blobLength, quantizationFactor, operatorCount, securityParam), func(t *testing.T) { - quorumIndex := uint(0) - bn := uint(0) + blobs := make([]core.Blob, numBlob) + for i := 0; i < numBlob; i++ { + blobs[i] = makeTestBlob(t, blobLength, []*core.SecurityParam{securityParam}) + } - batch, header := prepareBatch(t, cst, blob, quorumIndex, quantizationFactor, bn) + batch, header := prepareBatch(t, cst, blobs, quorumIndex, quantizationFactor, bn) + batches = append(batches, batch...) - checkBatch(t, cst, batch, header) + checkBatch(t, cst, batch[0], header) }) } + } + t.Run(fmt.Sprintf("universal verifier operatorCount=%v over %v blobs", operatorCount, len(batches)), func(t *testing.T) { + checkBatchByUniversalVerifier(t, cst, batches, batchHeader) + }) } + } } diff --git a/core/validator.go b/core/validator.go index d181b0e865..04a016b6f5 100644 --- a/core/validator.go +++ b/core/validator.go @@ -129,9 +129,9 @@ func (v *chunkValidator) UpdateOperatorID(operatorID OperatorID) { } func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *OperatorState) error { - subBatchMap := make(map[EncodingParams]SubBatch) + subBatchMap := make(map[EncodingParams]*SubBatch) - for i, blob := range blobs { + for _, blob := range blobs { if len(blob.Bundles) != len(blob.BlobHeader.QuorumInfos) { return errors.New("number of bundles does not match number of quorums") } @@ -151,6 +151,12 @@ func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *Oper return err } else { // Check the received chunks against the commitment + blobIndex := 0 + subBatch, ok := subBatchMap[*params] + if ok { + blobIndex = subBatch.NumBlobs + } + indices := assignment.GetIndices() samples := make([]Sample, len(chunks)) for ind := range chunks { @@ -158,13 +164,13 @@ func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *Oper Commitment: blob.BlobHeader.BlobCommitments.Commitment, Chunk: chunks[ind], AssignmentIndex: uint(indices[ind]), - BlobIndex: i, + BlobIndex: blobIndex, } } - // Add into subBatch - subBatch, ok := subBatchMap[*params] + + // update subBatch if !ok { - subBatchMap[*params] = SubBatch{ + subBatchMap[*params] = &SubBatch{ Samples: samples, NumBlobs: 1, } @@ -182,7 +188,7 @@ func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *Oper for params, subBatch := range subBatchMap { params := params subBatch := subBatch - go v.universalVerifyWorker(params, &subBatch, out) + go v.universalVerifyWorker(params, subBatch, out) } for i := 0; i < numSubBatch; i++ { diff --git a/pkg/encoding/kzgEncoder/multiframe.go b/pkg/encoding/kzgEncoder/multiframe.go index 474bc35a67..da48ed6167 100644 --- a/pkg/encoding/kzgEncoder/multiframe.go +++ b/pkg/encoding/kzgEncoder/multiframe.go @@ -56,6 +56,14 @@ func GenRandomness(params rs.EncodingParams, samples []Sample, m int) (bls.Fr, e // m is number of blob, samples is a list of chunks // Inside the code, ft stands for first term; st for the second term; tt for the third term func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples []Sample, m int) error { + // precheck + for i, s := range samples { + if s.Row >= m { + fmt.Printf("sample %v has %v Row, but there are only %v blobs\n", i, s.Row, m) + return errors.New("sample.Row and numBlob is inconsistend") + } + } + verifier, _ := group.GetKzgVerifier(params) ks := verifier.Ks diff --git a/pkg/encoding/kzgEncoder/multiframe_test.go b/pkg/encoding/kzgEncoder/multiframe_test.go new file mode 100644 index 0000000000..a87e0af813 --- /dev/null +++ b/pkg/encoding/kzgEncoder/multiframe_test.go @@ -0,0 +1,50 @@ +package kzgEncoder + +import ( + "testing" + + rs "github.com/Layr-Labs/eigenda/pkg/encoding/encoder" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUniversalVerify(t *testing.T) { + teardownSuite := setupSuite(t) + defer teardownSuite(t) + + group, _ := NewKzgEncoderGroup(kzgConfig) + params := rs.GetEncodingParams(numSys, numPar, uint64(len(GETTYSBURG_ADDRESS_BYTES))) + enc, err := group.NewKzgEncoder(params) + require.Nil(t, err) + + numBlob := 5 + samples := make([]Sample, 0) + for z := 0; z < numBlob; z++ { + inputFr := rs.ToFrArray(GETTYSBURG_ADDRESS_BYTES) + + commit, _, frames, fIndices, err := enc.Encode(inputFr) + require.Nil(t, err) + + // create samples + for i := 0; i < len(frames); i++ { + f := frames[i] + j := fIndices[i] + + q, err := rs.GetLeadingCosetIndex(uint64(i), numSys+numPar) + require.Nil(t, err) + + assert.Equal(t, j, q, "leading coset inconsistency") + + sample = Sample{ + Commitment: commit, + Proof: f.Proof, + Row: z, + Coeffs: f.Coeffs, + X: i, + } + samples = append(samples, sample) + } + } + + assert.True(t, group.UniversalVerify(params, samples, numBlob), "universal batch verification failed\n") +}