Skip to content

Commit

Permalink
Refactor encoder (2 of N) (Layr-Labs#269)
Browse files Browse the repository at this point in the history
  • Loading branch information
mooselumph authored Feb 23, 2024
1 parent d080f52 commit 3ed5953
Show file tree
Hide file tree
Showing 95 changed files with 1,201 additions and 1,168 deletions.
3 changes: 2 additions & 1 deletion clients/disperser_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
disperser_rpc "github.com/Layr-Labs/eigenda/api/grpc/disperser"
"github.com/Layr-Labs/eigenda/core"
"github.com/Layr-Labs/eigenda/disperser"
"github.com/Layr-Labs/eigenda/encoding"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
Expand Down Expand Up @@ -155,7 +156,7 @@ func (c *disperserClient) DisperseBlobAuthenticated(ctx context.Context, data []
}

authHeader := core.BlobAuthHeader{
BlobCommitments: core.BlobCommitments{},
BlobCommitments: encoding.BlobCommitments{},
AccountID: "",
Nonce: authHeaderReply.BlobAuthHeader.ChallengeParameter,
}
Expand Down
7 changes: 4 additions & 3 deletions clients/node_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (

"github.com/Layr-Labs/eigenda/api/grpc/node"
"github.com/Layr-Labs/eigenda/core"
"github.com/Layr-Labs/eigenda/encoding"
node_utils "github.com/Layr-Labs/eigenda/node/grpc"
"github.com/wealdtech/go-merkletree"
"google.golang.org/grpc"
Expand All @@ -14,7 +15,7 @@ import (

type RetrievedChunks struct {
OperatorID core.OperatorID
Chunks []*core.Chunk
Chunks []*encoding.Frame
Err error
}

Expand Down Expand Up @@ -117,9 +118,9 @@ func (c client) GetChunks(
return
}

chunks := make([]*core.Chunk, len(reply.GetChunks()))
chunks := make([]*encoding.Frame, len(reply.GetChunks()))
for i, data := range reply.GetChunks() {
chunk, err := new(core.Chunk).Deserialize(data)
chunk, err := new(encoding.Frame).Deserialize(data)
if err != nil {
chunksChan <- RetrievedChunks{
OperatorID: opID,
Expand Down
26 changes: 12 additions & 14 deletions clients/retrieval_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (

"github.com/Layr-Labs/eigenda/common"
"github.com/Layr-Labs/eigenda/core"
"github.com/Layr-Labs/eigenda/encoding"
"github.com/Layr-Labs/eigenda/pkg/kzg/bn254"
"github.com/gammazero/workerpool"
"github.com/wealdtech/go-merkletree"
Expand All @@ -27,7 +28,7 @@ type retrievalClient struct {
indexedChainState core.IndexedChainState
assignmentCoordinator core.AssignmentCoordinator
nodeClient NodeClient
encoder core.Encoder
verifier encoding.Verifier
numConnections int
}

Expand All @@ -38,7 +39,7 @@ func NewRetrievalClient(
chainState core.IndexedChainState,
assignmentCoordinator core.AssignmentCoordinator,
nodeClient NodeClient,
encoder core.Encoder,
verifier encoding.Verifier,
numConnections int,
) (*retrievalClient, error) {

Expand All @@ -47,7 +48,7 @@ func NewRetrievalClient(
indexedChainState: chainState,
assignmentCoordinator: assignmentCoordinator,
nodeClient: nodeClient,
encoder: encoder,
verifier: verifier,
numConnections: numConnections,
}, nil
}
Expand Down Expand Up @@ -115,14 +116,14 @@ func (r *retrievalClient) RetrieveBlob(
}

// Validate the blob length
err = r.encoder.VerifyBlobLength(blobHeader.BlobCommitments)
err = r.verifier.VerifyBlobLength(blobHeader.BlobCommitments)
if err != nil {
return nil, err
}

// Validate the commitments are equivalent
commitmentBatch := []core.BlobCommitments{blobHeader.BlobCommitments}
err = r.encoder.VerifyCommitEquivalenceBatch(commitmentBatch)
commitmentBatch := []encoding.BlobCommitments{blobHeader.BlobCommitments}
err = r.verifier.VerifyCommitEquivalenceBatch(commitmentBatch)
if err != nil {
return nil, err
}
Expand All @@ -143,13 +144,10 @@ func (r *retrievalClient) RetrieveBlob(
})
}

encodingParams, err := core.GetEncodingParams(quorumHeader.ChunkLength, info.TotalChunks)
if err != nil {
return nil, err
}
encodingParams := encoding.ParamsFromMins(quorumHeader.ChunkLength, info.TotalChunks)

var chunks []*core.Chunk
var indices []core.ChunkNumber
var chunks []*encoding.Frame
var indices []encoding.ChunkNumber
// TODO(ian-shim): if we gathered enough chunks, cancel remaining RPC calls
for i := 0; i < len(operators); i++ {
reply := <-chunksChan
Expand All @@ -162,7 +160,7 @@ func (r *retrievalClient) RetrieveBlob(
return nil, fmt.Errorf("no assignment to operator %v", reply.OperatorID)
}

err = r.encoder.VerifyChunks(reply.Chunks, assignment.GetIndices(), blobHeader.BlobCommitments, encodingParams)
err = r.verifier.VerifyFrames(reply.Chunks, assignment.GetIndices(), blobHeader.BlobCommitments, encodingParams)
if err != nil {
r.logger.Error("failed to verify chunks from operator", "operator", reply.OperatorID, "err", err)
continue
Expand All @@ -174,5 +172,5 @@ func (r *retrievalClient) RetrieveBlob(
indices = append(indices, assignment.GetIndices()...)
}

return r.encoder.Decode(chunks, indices, encodingParams, uint64(blobHeader.Length)*bn254.BYTES_PER_COEFFICIENT)
return r.verifier.Decode(chunks, indices, encodingParams, uint64(blobHeader.Length)*bn254.BYTES_PER_COEFFICIENT)
}
32 changes: 13 additions & 19 deletions clients/tests/retrieval_client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@ import (
clientsmock "github.com/Layr-Labs/eigenda/clients/mock"
"github.com/Layr-Labs/eigenda/common/logging"
"github.com/Layr-Labs/eigenda/core"
"github.com/Layr-Labs/eigenda/core/encoding"
coreindexer "github.com/Layr-Labs/eigenda/core/indexer"
coremock "github.com/Layr-Labs/eigenda/core/mock"
"github.com/Layr-Labs/eigenda/encoding"
"github.com/Layr-Labs/eigenda/encoding/kzgrs"
"github.com/Layr-Labs/eigenda/encoding/kzgrs/prover"
"github.com/Layr-Labs/eigenda/encoding/kzgrs/verifier"
Expand All @@ -26,7 +26,7 @@ import (

const numOperators = 10

func makeTestEncoder() (core.Encoder, error) {
func makeTestComponents() (encoding.Prover, encoding.Verifier, error) {
config := &kzgrs.KzgConfig{
G1Path: "../../inabox/resources/kzg/g1.point",
G2Path: "../../inabox/resources/kzg/g2.point",
Expand All @@ -36,20 +36,17 @@ func makeTestEncoder() (core.Encoder, error) {
NumWorker: uint64(runtime.GOMAXPROCS(0)),
}

kzgEncoderGroup, err := prover.NewProver(config, true)
p, err := prover.NewProver(config, true)
if err != nil {
return nil, err
return nil, nil, err
}

kzgVerifierGroup, err := verifier.NewVerifier(config, true)
v, err := verifier.NewVerifier(config, true)
if err != nil {
return nil, err
return nil, nil, err
}

return &encoding.Encoder{
EncoderGroup: kzgEncoderGroup,
VerifierGroup: kzgVerifierGroup,
}, nil
return p, v, nil
}

var (
Expand Down Expand Up @@ -82,7 +79,7 @@ func setup(t *testing.T) {

nodeClient = clientsmock.NewNodeClient()
coordinator = &core.StdAssignmentCoordinator{}
encoder, err := makeTestEncoder()
p, v, err := makeTestComponents()
if err != nil {
t.Fatal(err)
}
Expand All @@ -99,7 +96,7 @@ func setup(t *testing.T) {
panic("failed to create a new indexed chain state")
}

retrievalClient, err = clients.NewRetrievalClient(logger, ics, coordinator, nodeClient, encoder, 2)
retrievalClient, err = clients.NewRetrievalClient(logger, ics, coordinator, nodeClient, v, 2)
if err != nil {
panic("failed to create a new retrieval client")
}
Expand Down Expand Up @@ -132,7 +129,7 @@ func setup(t *testing.T) {
}

blobSize := uint(len(blob.Data))
blobLength := core.GetBlobLength(uint(blobSize))
blobLength := encoding.GetBlobLength(uint(blobSize))

chunkLength, err := coordinator.CalculateChunkLength(operatorState, blobLength, 0, securityParams[0])
if err != nil {
Expand All @@ -153,18 +150,15 @@ func setup(t *testing.T) {
t.Fatal(err)
}

params, err := core.GetEncodingParams(chunkLength, info.TotalChunks)
if err != nil {
t.Fatal(err)
}
params := encoding.ParamsFromMins(chunkLength, info.TotalChunks)

commitments, chunks, err := encoder.Encode(blob.Data, params)
commitments, chunks, err := p.EncodeAndProve(blob.Data, params)
if err != nil {
t.Fatal(err)
}

blobHeader = &core.BlobHeader{
BlobCommitments: core.BlobCommitments{
BlobCommitments: encoding.BlobCommitments{
Commitment: commitments.Commitment,
LengthCommitment: commitments.LengthCommitment,
LengthProof: commitments.LengthProof,
Expand Down
3 changes: 2 additions & 1 deletion core/assignment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (

"github.com/Layr-Labs/eigenda/core"
"github.com/Layr-Labs/eigenda/core/mock"
"github.com/Layr-Labs/eigenda/encoding"
"github.com/stretchr/testify/assert"
)

Expand Down Expand Up @@ -88,7 +89,7 @@ func TestOperatorAssignments(t *testing.T) {
assert.Equal(t, assignment, expectedAssignments[operatorID])

header := &core.BlobHeader{
BlobCommitments: core.BlobCommitments{
BlobCommitments: encoding.BlobCommitments{
Length: blobLength,
},
QuorumInfos: []*core.BlobQuorumInfo{quorumInfo},
Expand Down
5 changes: 3 additions & 2 deletions core/auth/auth_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (

"github.com/Layr-Labs/eigenda/core"
"github.com/Layr-Labs/eigenda/core/auth"
"github.com/Layr-Labs/eigenda/encoding"
"github.com/stretchr/testify/assert"
)

Expand All @@ -19,7 +20,7 @@ func TestAuthentication(t *testing.T) {
signer := auth.NewSigner(privateKeyHex)

testHeader := core.BlobAuthHeader{
BlobCommitments: core.BlobCommitments{},
BlobCommitments: encoding.BlobCommitments{},
AccountID: signer.GetAccountID(),
Nonce: rand.Uint32(),
AuthenticationData: []byte{},
Expand All @@ -46,7 +47,7 @@ func TestAuthenticationFail(t *testing.T) {
signer := auth.NewSigner(privateKeyHex)

testHeader := core.BlobAuthHeader{
BlobCommitments: core.BlobCommitments{},
BlobCommitments: encoding.BlobCommitments{},
AccountID: signer.GetAccountID(),
Nonce: rand.Uint32(),
AuthenticationData: []byte{},
Expand Down
50 changes: 4 additions & 46 deletions core/data.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"fmt"

"github.com/Layr-Labs/eigenda/common"
"github.com/Layr-Labs/eigenda/encoding"
"github.com/Layr-Labs/eigenda/pkg/kzg/bn254"
)

Expand Down Expand Up @@ -58,7 +59,7 @@ type Blob struct {
// multiple times (Replay attack).
type BlobAuthHeader struct {
// Commitments
BlobCommitments `json:"commitments"`
encoding.BlobCommitments `json:"commitments"`
// AccountID is the account that is paying for the blob to be stored. AccountID is hexadecimal representation of the ECDSA public key
AccountID AccountID `json:"account_id"`
// Nonce
Expand Down Expand Up @@ -99,7 +100,7 @@ type BlobQuorumInfo struct {

// BlobHeader contains all metadata related to a blob including commitments and parameters for encoding
type BlobHeader struct {
BlobCommitments
encoding.BlobCommitments
// QuorumInfos contains the quorum specific parameters for the blob
QuorumInfos []*BlobQuorumInfo

Expand All @@ -126,14 +127,6 @@ func (b *BlobHeader) EncodedSizeAllQuorums() int64 {
return size
}

// BlomCommitments contains the blob's commitment, degree proof, and the actual degree.
type BlobCommitments struct {
Commitment *G1Commitment `json:"commitment"`
LengthCommitment *G2Commitment `json:"length_commitment"`
LengthProof *LengthProof `json:"length_proof"`
Length uint `json:"length"`
}

// Batch
// A batch is a collection of blobs. DA nodes receive and attest to the blobs in a batch together to amortize signature verification costs

Expand All @@ -148,28 +141,8 @@ type BatchHeader struct {
// EncodedBlob contains the messages to be sent to a group of DA nodes corresponding to a single blob
type EncodedBlob = map[OperatorID]*BlobMessage

// Chunks

// Chunk is the smallest unit that is distributed to DA nodes, including both data and the associated polynomial opening proofs.
// A chunk corresponds to a set of evaluations of the global polynomial whose coefficients are used to construct the blob Commitment.
type Chunk struct {
// The Coeffs field contains the coefficients of the polynomial which interolates these evaluations. This is the same as the
// interpolating polynomial, I(X), used in the KZG multi-reveal (https://dankradfeist.de/ethereum/2020/06/16/kate-polynomial-commitments.html#multiproofs)
Coeffs []Symbol
Proof Proof
}

func (c *Chunk) Length() int {
return len(c.Coeffs)
}

// Returns the size of chunk in bytes.
func (c *Chunk) Size() int {
return c.Length() * bn254.BYTES_PER_COEFFICIENT
}

// A Bundle is the collection of chunks associated with a single blob, for a single operator and a single quorum.
type Bundle = []*Chunk
type Bundle = []*encoding.Frame

// Bundles is the collection of bundles associated with a single blob and a single operator.
type Bundles map[QuorumID]Bundle
Expand Down Expand Up @@ -205,18 +178,3 @@ func (cb Bundles) Size() int64 {
}
return size
}

// Sample is a chunk with associated metadata used by the Universal Batch Verifier
type Sample struct {
Commitment *G1Commitment
Chunk *Chunk
AssignmentIndex ChunkNumber
BlobIndex int
}

// SubBatch is a part of the whole Batch with identical Encoding Parameters, i.e. (ChunkLen, NumChunk)
// Blobs with the same encoding parameters are collected in a single subBatch
type SubBatch struct {
Samples []Sample
NumBlobs int
}
Loading

0 comments on commit 3ed5953

Please sign in to comment.