Skip to content

Commit

Permalink
working batch verification
Browse files Browse the repository at this point in the history
  • Loading branch information
Ubuntu committed Nov 30, 2023
1 parent c146f1c commit 43f6bee
Show file tree
Hide file tree
Showing 7 changed files with 359 additions and 19 deletions.
7 changes: 7 additions & 0 deletions core/data.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,3 +160,10 @@ func (cb Bundles) Serialize() ([][][]byte, error) {
}
return data, nil
}

type Sample struct {
Commitment *Commitment
Chunk *Chunk // contain proof and coeffs
EvalIndex ChunkNumber
BlobIndex int
}
3 changes: 3 additions & 0 deletions core/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ type Encoder interface {
// VerifyChunks takes in the chunks, indices, commitments, and encoding parameters and returns an error if the chunks are invalid.
VerifyChunks(chunks []*Chunk, indices []ChunkNumber, commitments BlobCommitments, params EncodingParams) error

// VerifyBatch takes in
UniversalVerifyChunks(params EncodingParams, samples []Sample, numBlobs int) error

// VerifyBlobLength takes in the commitments and returns an error if the blob length is invalid.
VerifyBlobLength(commitments BlobCommitments) error

Expand Down
46 changes: 46 additions & 0 deletions core/encoding/encoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@ package encoding

import (
"crypto/sha256"
"errors"
"fmt"
"log"

"github.com/Layr-Labs/eigenda/core"
"github.com/Layr-Labs/eigenda/pkg/encoding/encoder"
Expand Down Expand Up @@ -60,6 +63,7 @@ func (e *Encoder) Encode(data []byte, params core.EncodingParams) (core.BlobComm
}
}
encParams := toEncParams(params)
fmt.Println("encParams", encParams)

enc, err := e.EncoderGroup.GetKzgEncoder(encParams)
if err != nil {
Expand All @@ -78,6 +82,24 @@ func (e *Encoder) Encode(data []byte, params core.EncodingParams) (core.BlobComm
Coeffs: frame.Coeffs,
Proof: frame.Proof,
}

q, _ := encoder.GetLeadingCosetIndex(uint64(ind), uint64(len(chunks)))
lc := enc.Fs.ExpandedRootsOfUnity[uint64(q)]
ok := frame.Verify(enc.Ks, commit, &lc)
if !ok {
log.Fatalf("Proof %v failed\n", ind)
} else {

fmt.Println("proof", frame.Proof.String())
fmt.Println("commitment", commit.String())
for i := 0; i < len(frame.Coeffs); i++ {
fmt.Printf("%v ", frame.Coeffs[i].String())
}
fmt.Println("q", q, lc.String())

fmt.Println("***************tested frame and pass")
}

}

length := uint(len(encoder.ToFrArray(data)))
Expand Down Expand Up @@ -131,6 +153,30 @@ func (e *Encoder) VerifyChunks(chunks []*core.Chunk, indices []core.ChunkNumber,

}

// convert struct understandable by the crypto library
func (e *Encoder) UniversalVerifyChunks(params core.EncodingParams, samplesCore []core.Sample, numBlobs int) error {
encParams := toEncParams(params)

samples := make([]kzgEncoder.Sample, len(samplesCore))

for i, sc := range samplesCore {
sample := kzgEncoder.Sample{
Commitment: *sc.Commitment.G1Point,
Proof: sc.Chunk.Proof,
Row: sc.BlobIndex,
Coeffs: sc.Chunk.Coeffs,
X: sc.EvalIndex,
}
samples[i] = sample
}

if e.EncoderGroup.UniversalVerify(encParams, samples, numBlobs) {
return nil
} else {
return errors.New("Universal Verify wrong")
}
}

// Decode takes in the chunks, indices, and encoding parameters and returns the decoded blob
// The result is trimmed to the given maxInputSize.
func (e *Encoder) Decode(chunks []*core.Chunk, indices []core.ChunkNumber, params core.EncodingParams, maxInputSize uint64) ([]byte, error) {
Expand Down
6 changes: 6 additions & 0 deletions core/encoding/mock_encoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,12 @@ func (e *MockEncoder) VerifyChunks(chunks []*core.Chunk, indices []core.ChunkNum
return args.Error(0)
}

func (e *MockEncoder) UniversalVerifyChunks(params core.EncodingParams, samples []core.Sample, numBlobs int) error {
args := e.Called(params, samples, numBlobs)
time.Sleep(e.Delay)
return args.Error(0)
}

func (e *MockEncoder) VerifyBlobLength(commitments core.BlobCommitments) error {

args := e.Called(commitments)
Expand Down
105 changes: 105 additions & 0 deletions core/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package core

import (
"errors"
"fmt"
)

var (
Expand All @@ -10,6 +11,7 @@ var (
)

type ChunkValidator interface {
ValidateBatch([]*BlobMessage, *OperatorState) error
ValidateBlob(*BlobMessage, *OperatorState) error
UpdateOperatorID(OperatorID)
}
Expand Down Expand Up @@ -114,3 +116,106 @@ func (v *chunkValidator) ValidateBlob(blob *BlobMessage, operatorState *Operator
func (v *chunkValidator) UpdateOperatorID(operatorID OperatorID) {
v.operatorID = operatorID
}

func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *OperatorState) error {

batchGroup := make(map[EncodingParams][]Sample)
numBlobMap := make(map[EncodingParams]int)

for i, blob := range blobs {
if len(blob.Bundles) != len(blob.BlobHeader.QuorumInfos) {
return errors.New("number of bundles does not match number of quorums")
}

// Validate the blob length
err := v.encoder.VerifyBlobLength(blob.BlobHeader.BlobCommitments)
if err != nil {
return err
}
// for each quorum
for _, quorumHeader := range blob.BlobHeader.QuorumInfos {
// Check if the operator is a member of the quorum
if _, ok := operatorState.Operators[quorumHeader.QuorumID]; !ok {
continue
}

// Get the assignments for the quorum
assignment, info, err := v.assignment.GetOperatorAssignment(
operatorState,
quorumHeader.QuorumID,
quorumHeader.QuantizationFactor,
v.operatorID,
)
if err != nil {
return err
}

// Validate the number of chunks
if assignment.NumChunks == 0 {
continue
}
if assignment.NumChunks != uint(len(blob.Bundles[quorumHeader.QuorumID])) {
return errors.New("number of chunks does not match assignment")
}

chunkLength, err := v.assignment.GetChunkLengthFromHeader(operatorState, quorumHeader)
if err != nil {
return err
}

// Get the chunk length
chunks := blob.Bundles[quorumHeader.QuorumID]
for _, chunk := range chunks {
if uint(chunk.Length()) != chunkLength {
return ErrChunkLengthMismatch
}
}

// Validate the chunk length
numOperators := uint(len(operatorState.Operators[quorumHeader.QuorumID]))
if chunkLength*quorumHeader.QuantizationFactor*numOperators != quorumHeader.EncodedBlobLength {
return ErrInvalidHeader
}

// Get Encoding Params
params := EncodingParams{ChunkLength: chunkLength, NumChunks: info.TotalChunks}

// ToDo add a struct
_, ok := batchGroup[params]
if !ok {
batchGroup[params] = make([]Sample, 0)
numBlobMap[params] = 1
} else {
numBlobMap[params] += 1
}

// Check the received chunks against the commitment
indices := assignment.GetIndices()
fmt.Println("indices", indices)
samples := make([]Sample, 0)
for ind := range chunks {
sample := Sample{
Commitment: blob.BlobHeader.BlobCommitments.Commitment,
Chunk: chunks[ind],
EvalIndex: uint(indices[ind]),
BlobIndex: i,
}
samples = append(samples, sample)
}
batchGroup[params] = append(batchGroup[params], samples...)
}
}

// ToDo parallelize
fmt.Println("num batchGroup", len(batchGroup))
for params, samples := range batchGroup {
numBlobs, _ := numBlobMap[params]
err := v.encoder.UniversalVerifyChunks(params, samples, numBlobs)
if err != nil {
return err
}
}

return nil

}
20 changes: 1 addition & 19 deletions node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
"github.com/Layr-Labs/eigensdk-go/nodeapi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gammazero/workerpool"
)

const (
Expand Down Expand Up @@ -322,24 +321,7 @@ func (n *Node) ValidateBatch(ctx context.Context, header *core.BatchHeader, blob
return err
}

pool := workerpool.New(n.Config.NumBatchValidators)
out := make(chan error, len(blobs))
for _, blob := range blobs {
blob := blob
pool.Submit(func() {
n.validateBlob(ctx, blob, operatorState, out)
})
}

for i := 0; i < len(blobs); i++ {
err := <-out
if err != nil {
return err
}
}

return nil

return n.Validator.ValidateBatch(blobs, operatorState)
}

func (n *Node) updateSocketAddress(ctx context.Context, newSocketAddr string) {
Expand Down
Loading

0 comments on commit 43f6bee

Please sign in to comment.