diff --git a/Makefile b/Makefile index 29fdac1b..ef00d5c2 100644 --- a/Makefile +++ b/Makefile @@ -11,8 +11,8 @@ LDFLAGSSTRING +=-X main.Date=$(BUILD_TIME) LDFLAGSSTRING +=-X main.Version=$(GIT_TAG) LDFLAGS := -ldflags "$(LDFLAGSSTRING)" -E2ETEST = INTEGRATION=true go test -timeout 1m -v ./e2e -parallel 4 -deploy-config ../.devnet/devnetL1.json -HOLESKYTEST = TESTNET=true go test -timeout 50m -v ./e2e -parallel 4 -deploy-config ../.devnet/devnetL1.json +E2ETEST = INTEGRATION=true go test -timeout 1m ./e2e -parallel 4 -deploy-config ../.devnet/devnetL1.json +HOLESKYTEST = TESTNET=true go test -timeout 50m ./e2e -parallel 4 -deploy-config ../.devnet/devnetL1.json .PHONY: eigenda-proxy eigenda-proxy: diff --git a/cmd/server/entrypoint.go b/cmd/server/entrypoint.go index 3f6eac2a..66975d65 100644 --- a/cmd/server/entrypoint.go +++ b/cmd/server/entrypoint.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/Layr-Labs/eigenda-proxy/flags" "github.com/Layr-Labs/eigenda-proxy/metrics" "github.com/Layr-Labs/eigenda-proxy/server" "github.com/urfave/cli/v2" @@ -24,15 +25,14 @@ func StartProxySvr(cliCtx *cli.Context) error { ctx, ctxCancel := context.WithCancel(cliCtx.Context) defer ctxCancel() - m := metrics.NewMetrics("default") - log.Info("Initializing EigenDA proxy server...") daRouter, err := server.LoadStoreRouter(ctx, cfg, log) if err != nil { return fmt.Errorf("failed to create store: %w", err) } - server := server.NewServer(cliCtx.String(server.ListenAddrFlagName), cliCtx.Int(server.PortFlagName), daRouter, log, m) + m := metrics.NewMetrics("default") + server := server.NewServer(cliCtx.String(flags.ListenAddrFlagName), cliCtx.Int(flags.PortFlagName), daRouter, log, m) if err := server.Start(); err != nil { return fmt.Errorf("failed to start the DA server: %w", err) diff --git a/cmd/server/main.go b/cmd/server/main.go index 910a6112..3bb73a4c 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -8,8 +8,8 @@ import ( "github.com/joho/godotenv" "github.com/urfave/cli/v2" + "github.com/Layr-Labs/eigenda-proxy/flags" "github.com/Layr-Labs/eigenda-proxy/metrics" - "github.com/Layr-Labs/eigenda-proxy/server" "github.com/ethereum-optimism/optimism/op-service/cliapp" oplog "github.com/ethereum-optimism/optimism/op-service/log" "github.com/ethereum-optimism/optimism/op-service/metrics/doc" @@ -26,7 +26,7 @@ func main() { oplog.SetupDefaults() app := cli.NewApp() - app.Flags = cliapp.ProtectFlags(server.Flags) + app.Flags = cliapp.ProtectFlags(flags.Flags) app.Version = Version app.Name = "eigenda-proxy" app.Usage = "EigenDA Proxy Sidecar Service" diff --git a/e2e/server_test.go b/e2e/server_test.go index b3d2c1a8..2b9ebc24 100644 --- a/e2e/server_test.go +++ b/e2e/server_test.go @@ -110,7 +110,7 @@ func TestKeccak256CommitmentRequestErrorsWhenS3NotSet(t *testing.T) { testCfg.UseKeccak256ModeS3 = true tsConfig := e2e.TestSuiteConfig(t, testCfg) - tsConfig.S3Config.Endpoint = "" + tsConfig.EigenDAConfig.S3Config.Endpoint = "" ts, kill := e2e.CreateTestSuite(t, tsConfig) defer kill() @@ -393,7 +393,7 @@ func TestProxyServerCachingWithRedis(t *testing.T) { require.Equal(t, testPreimage, preimage) // ensure that read was from cache - redStats, err := ts.Server.GetStoreStats(store.Redis) + redStats, err := ts.Server.GetStoreStats(store.RedisBackendType) require.NoError(t, err) require.Equal(t, 1, redStats.Reads) diff --git a/e2e/setup.go b/e2e/setup.go index b91cdd67..02071569 100644 --- a/e2e/setup.go +++ b/e2e/setup.go @@ -4,13 +4,19 @@ import ( "context" "fmt" "os" + "runtime" "testing" "time" "github.com/Layr-Labs/eigenda-proxy/metrics" "github.com/Layr-Labs/eigenda-proxy/server" - "github.com/Layr-Labs/eigenda-proxy/store" + "github.com/Layr-Labs/eigenda-proxy/store/generated_key/memstore" + "github.com/Layr-Labs/eigenda-proxy/store/precomputed_key/redis" + "github.com/Layr-Labs/eigenda-proxy/store/precomputed_key/s3" + "github.com/Layr-Labs/eigenda-proxy/utils" + "github.com/Layr-Labs/eigenda-proxy/verify" "github.com/Layr-Labs/eigenda/api/clients" + "github.com/Layr-Labs/eigenda/encoding/kzg" "github.com/ethereum/go-ethereum/log" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" @@ -53,15 +59,15 @@ func TestConfig(useMemory bool) *Cfg { } func createRedisConfig(eigendaCfg server.Config) server.CLIConfig { + eigendaCfg.RedisConfig = redis.Config{ + Endpoint: "127.0.0.1:9001", + Password: "", + DB: 0, + Eviction: 10 * time.Minute, + Profile: true, + } return server.CLIConfig{ EigenDAConfig: eigendaCfg, - RedisCfg: store.RedisConfig{ - Endpoint: "127.0.0.1:9001", - Password: "", - DB: 0, - Eviction: 10 * time.Minute, - Profile: true, - }, } } @@ -70,18 +76,18 @@ func createS3Config(eigendaCfg server.Config) server.CLIConfig { bucketName := "eigenda-proxy-test-" + RandString(10) createS3Bucket(bucketName) + eigendaCfg.S3Config = s3.Config{ + Profiling: true, + Bucket: bucketName, + Path: "", + Endpoint: "localhost:4566", + AccessKeySecret: "minioadmin", + AccessKeyID: "minioadmin", + CredentialType: s3.CredentialTypeStatic, + Backup: false, + } return server.CLIConfig{ EigenDAConfig: eigendaCfg, - S3Config: store.S3Config{ - Profiling: true, - Bucket: bucketName, - Path: "", - Endpoint: "localhost:4566", - AccessKeySecret: "minioadmin", - AccessKeyID: "minioadmin", - S3CredentialType: store.S3CredentialStatic, - Backup: false, - }, } } @@ -105,28 +111,39 @@ func TestSuiteConfig(t *testing.T, testCfg *Cfg) server.CLIConfig { pollInterval = time.Minute * 1 } + maxBlobLengthBytes, err := utils.ParseBytesAmount("16mib") + require.NoError(t, err) eigendaCfg := server.Config{ - ClientConfig: clients.EigenDAClientConfig{ + EdaClientConfig: clients.EigenDAClientConfig{ RPC: holeskyDA, StatusQueryTimeout: time.Minute * 45, StatusQueryRetryInterval: pollInterval, DisableTLS: false, SignerPrivateKeyHex: pk, }, - EthRPC: ethRPC, - SvcManagerAddr: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b", // incompatible with non holeskly networks - CacheDir: "../resources/SRSTables", - G1Path: "../resources/g1.point", - MaxBlobLength: "16mib", - G2PowerOfTauPath: "../resources/g2.point.powerOf2", - PutBlobEncodingVersion: 0x00, - MemstoreEnabled: testCfg.UseMemory, - MemstoreBlobExpiration: testCfg.Expiration, - EthConfirmationDepth: 0, + VerifierConfig: verify.Config{ + VerifyCerts: false, + RPCURL: ethRPC, + SvcManagerAddr: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b", // incompatible with non holeskly networks + EthConfirmationDepth: 0, + KzgConfig: &kzg.KzgConfig{ + G1Path: "../resources/g1.point", + G2PowerOf2Path: "../resources/g2.point.powerOf2", + CacheDir: "../resources/SRSTables", + SRSOrder: 268435456, + SRSNumberToLoad: maxBlobLengthBytes / 32, + NumWorker: uint64(runtime.GOMAXPROCS(0)), + }, + }, + MemstoreEnabled: testCfg.UseMemory, + MemstoreConfig: memstore.Config{ + BlobExpiration: testCfg.Expiration, + MaxBlobSizeBytes: maxBlobLengthBytes, + }, } if testCfg.UseMemory { - eigendaCfg.ClientConfig.SignerPrivateKeyHex = "0000000000000000000100000000000000000000000000000000000000000000" + eigendaCfg.EdaClientConfig.SignerPrivateKeyHex = "0000000000000000000100000000000000000000000000000000000000000000" } var cfg server.CLIConfig diff --git a/flags/eigendaflags/cli.go b/flags/eigendaflags/cli.go new file mode 100644 index 00000000..7106afcd --- /dev/null +++ b/flags/eigendaflags/cli.go @@ -0,0 +1,121 @@ +package eigendaflags + +import ( + "time" + + "github.com/Layr-Labs/eigenda/api/clients" + "github.com/Layr-Labs/eigenda/api/clients/codecs" + "github.com/urfave/cli/v2" +) + +// TODO: we should eventually move all of these flags into the eigenda repo + +var ( + DisperserRPCFlagName = withFlagPrefix("disperser-rpc") + StatusQueryRetryIntervalFlagName = withFlagPrefix("status-query-retry-interval") + StatusQueryTimeoutFlagName = withFlagPrefix("status-query-timeout") + DisableTLSFlagName = withFlagPrefix("disable-tls") + ResponseTimeoutFlagName = withFlagPrefix("response-timeout") + CustomQuorumIDsFlagName = withFlagPrefix("custom-quorum-ids") + SignerPrivateKeyHexFlagName = withFlagPrefix("signer-private-key-hex") + PutBlobEncodingVersionFlagName = withFlagPrefix("put-blob-encoding-version") + DisablePointVerificationModeFlagName = withFlagPrefix("disable-point-verification-mode") + WaitForFinalizationFlagName = withFlagPrefix("wait-for-finalization") +) + +func withFlagPrefix(s string) string { + return "eigenda." + s +} + +func withEnvPrefix(envPrefix, s string) []string { + return []string{envPrefix + "_EIGENDA_" + s} +} + +// CLIFlags ... used for EigenDA client configuration +func CLIFlags(envPrefix, category string) []cli.Flag { + return []cli.Flag{ + &cli.StringFlag{ + Name: DisperserRPCFlagName, + Usage: "RPC endpoint of the EigenDA disperser.", + EnvVars: withEnvPrefix(envPrefix, "DISPERSER_RPC"), + Category: category, + }, + &cli.DurationFlag{ + Name: StatusQueryTimeoutFlagName, + Usage: "Duration to wait for a blob to finalize after being sent for dispersal. Default is 30 minutes.", + Value: 30 * time.Minute, + EnvVars: withEnvPrefix(envPrefix, "STATUS_QUERY_TIMEOUT"), + Category: category, + }, + &cli.DurationFlag{ + Name: StatusQueryRetryIntervalFlagName, + Usage: "Interval between retries when awaiting network blob finalization. Default is 5 seconds.", + Value: 5 * time.Second, + EnvVars: withEnvPrefix(envPrefix, "STATUS_QUERY_INTERVAL"), + Category: category, + }, + &cli.BoolFlag{ + Name: DisableTLSFlagName, + Usage: "Disable TLS for gRPC communication with the EigenDA disperser. Default is false.", + Value: false, + EnvVars: withEnvPrefix(envPrefix, "GRPC_DISABLE_TLS"), + Category: category, + }, + &cli.DurationFlag{ + Name: ResponseTimeoutFlagName, + Usage: "Total time to wait for a response from the EigenDA disperser. Default is 60 seconds.", + Value: 60 * time.Second, + EnvVars: withEnvPrefix(envPrefix, "RESPONSE_TIMEOUT"), + Category: category, + }, + &cli.UintSliceFlag{ + Name: CustomQuorumIDsFlagName, + Usage: "Custom quorum IDs for writing blobs. Should not include default quorums 0 or 1.", + Value: cli.NewUintSlice(), + EnvVars: withEnvPrefix(envPrefix, "CUSTOM_QUORUM_IDS"), + Category: category, + }, + &cli.StringFlag{ + Name: SignerPrivateKeyHexFlagName, + Usage: "Hex-encoded signer private key. This key should not be associated with an Ethereum address holding any funds.", + EnvVars: withEnvPrefix(envPrefix, "SIGNER_PRIVATE_KEY_HEX"), + Category: category, + }, + &cli.UintFlag{ + Name: PutBlobEncodingVersionFlagName, + Usage: "Blob encoding version to use when writing blobs from the high-level interface.", + EnvVars: withEnvPrefix(envPrefix, "PUT_BLOB_ENCODING_VERSION"), + Value: 0, + Category: category, + }, + &cli.BoolFlag{ + Name: DisablePointVerificationModeFlagName, + Usage: "Disable point verification mode. This mode performs IFFT on data before writing and FFT on data after reading. Disabling requires supplying the entire blob for verification against the KZG commitment.", + EnvVars: withEnvPrefix(envPrefix, "DISABLE_POINT_VERIFICATION_MODE"), + Value: false, + Category: category, + }, + &cli.BoolFlag{ + Name: WaitForFinalizationFlagName, + Usage: "Wait for blob finalization before returning from PutBlob.", + EnvVars: withEnvPrefix(envPrefix, "WAIT_FOR_FINALIZATION"), + Value: false, + Category: category, + }, + } +} + +func ReadConfig(ctx *cli.Context) clients.EigenDAClientConfig { + return clients.EigenDAClientConfig{ + RPC: ctx.String(DisperserRPCFlagName), + StatusQueryRetryInterval: ctx.Duration(StatusQueryRetryIntervalFlagName), + StatusQueryTimeout: ctx.Duration(StatusQueryTimeoutFlagName), + DisableTLS: ctx.Bool(DisableTLSFlagName), + ResponseTimeout: ctx.Duration(ResponseTimeoutFlagName), + CustomQuorumIDs: ctx.UintSlice(CustomQuorumIDsFlagName), + SignerPrivateKeyHex: ctx.String(SignerPrivateKeyHexFlagName), + PutBlobEncodingVersion: codecs.BlobEncodingVersion(ctx.Uint(PutBlobEncodingVersionFlagName)), + DisablePointVerificationMode: ctx.Bool(DisablePointVerificationModeFlagName), + WaitForFinalization: ctx.Bool(WaitForFinalizationFlagName), + } +} diff --git a/flags/flags.go b/flags/flags.go new file mode 100644 index 00000000..445dbfd3 --- /dev/null +++ b/flags/flags.go @@ -0,0 +1,80 @@ +package flags + +import ( + "github.com/Layr-Labs/eigenda-proxy/flags/eigendaflags" + "github.com/Layr-Labs/eigenda-proxy/store/generated_key/memstore" + "github.com/Layr-Labs/eigenda-proxy/store/precomputed_key/redis" + "github.com/Layr-Labs/eigenda-proxy/store/precomputed_key/s3" + "github.com/urfave/cli/v2" + + opservice "github.com/ethereum-optimism/optimism/op-service" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" +) + +const ( + EigenDAClientCategory = "EigenDA Client" + MemstoreFlagsCategory = "Memstore (replaces EigenDA when enabled)" + RedisCategory = "Redis Cache/Fallback" + S3Category = "S3 Cache/Fallback" +) + +const ( + ListenAddrFlagName = "addr" + PortFlagName = "port" + + // routing flags + FallbackTargetsFlagName = "routing.fallback-targets" + CacheTargetsFlagName = "routing.cache-targets" +) + +const EnvVarPrefix = "EIGENDA_PROXY" + +func prefixEnvVars(name string) []string { + return opservice.PrefixEnvVar(EnvVarPrefix, name) +} + +func CLIFlags() []cli.Flag { + // TODO: Decompose all flags into constituent parts based on their respective category / usage + flags := []cli.Flag{ + &cli.StringFlag{ + Name: ListenAddrFlagName, + Usage: "server listening address", + Value: "0.0.0.0", + EnvVars: prefixEnvVars("ADDR"), + }, + &cli.IntFlag{ + Name: PortFlagName, + Usage: "server listening port", + Value: 3100, + EnvVars: prefixEnvVars("PORT"), + }, + &cli.StringSliceFlag{ + Name: FallbackTargetsFlagName, + Usage: "List of read fallback targets to rollover to if cert can't be read from EigenDA.", + Value: cli.NewStringSlice(), + EnvVars: prefixEnvVars("FALLBACK_TARGETS"), + }, + &cli.StringSliceFlag{ + Name: CacheTargetsFlagName, + Usage: "List of caching targets to use fast reads from EigenDA.", + Value: cli.NewStringSlice(), + EnvVars: prefixEnvVars("CACHE_TARGETS"), + }, + } + + return flags +} + +// Flags contains the list of configuration options available to the binary. +var Flags = []cli.Flag{} + +func init() { + Flags = CLIFlags() + Flags = append(Flags, oplog.CLIFlags(EnvVarPrefix)...) + Flags = append(Flags, opmetrics.CLIFlags(EnvVarPrefix)...) + Flags = append(Flags, eigendaflags.CLIFlags(EnvVarPrefix, EigenDAClientCategory)...) + Flags = append(Flags, redis.CLIFlags(EnvVarPrefix, RedisCategory)...) + Flags = append(Flags, s3.CLIFlags(EnvVarPrefix, S3Category)...) + Flags = append(Flags, memstore.CLIFlags(EnvVarPrefix, MemstoreFlagsCategory)...) +} diff --git a/mocks/router.go b/mocks/router.go index 97c171fc..f1ca67c5 100644 --- a/mocks/router.go +++ b/mocks/router.go @@ -80,10 +80,10 @@ func (mr *MockIRouterMockRecorder) Get(arg0, arg1, arg2 interface{}) *gomock.Cal } // GetEigenDAStore mocks base method. -func (m *MockIRouter) GetEigenDAStore() store.KeyGeneratedStore { +func (m *MockIRouter) GetEigenDAStore() store.GeneratedKeyStore { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetEigenDAStore") - ret0, _ := ret[0].(store.KeyGeneratedStore) + ret0, _ := ret[0].(store.GeneratedKeyStore) return ret0 } diff --git a/server/config.go b/server/config.go index 2193e61a..4cb0c8bf 100644 --- a/server/config.go +++ b/server/config.go @@ -2,218 +2,50 @@ package server import ( "fmt" - "runtime" - "time" + "github.com/urfave/cli/v2" + + "github.com/Layr-Labs/eigenda-proxy/flags" + "github.com/Layr-Labs/eigenda-proxy/flags/eigendaflags" "github.com/Layr-Labs/eigenda-proxy/store" + "github.com/Layr-Labs/eigenda-proxy/store/generated_key/memstore" + "github.com/Layr-Labs/eigenda-proxy/store/precomputed_key/redis" + "github.com/Layr-Labs/eigenda-proxy/store/precomputed_key/s3" "github.com/Layr-Labs/eigenda-proxy/utils" "github.com/Layr-Labs/eigenda-proxy/verify" "github.com/Layr-Labs/eigenda/api/clients" - "github.com/Layr-Labs/eigenda/api/clients/codecs" - "github.com/Layr-Labs/eigenda/encoding/kzg" - "github.com/urfave/cli/v2" -) - -const ( - // eigenda client flags - EigenDADisperserRPCFlagName = "eigenda-disperser-rpc" - StatusQueryRetryIntervalFlagName = "eigenda-status-query-retry-interval" - StatusQueryTimeoutFlagName = "eigenda-status-query-timeout" - DisableTLSFlagName = "eigenda-disable-tls" - ResponseTimeoutFlagName = "eigenda-response-timeout" - CustomQuorumIDsFlagName = "eigenda-custom-quorum-ids" - SignerPrivateKeyHexFlagName = "eigenda-signer-private-key-hex" - PutBlobEncodingVersionFlagName = "eigenda-put-blob-encoding-version" - DisablePointVerificationModeFlagName = "eigenda-disable-point-verification-mode" - - // cert verification flags - CertVerificationEnabledFlagName = "eigenda-cert-verification-enabled" - EthRPCFlagName = "eigenda-eth-rpc" - SvcManagerAddrFlagName = "eigenda-svc-manager-addr" - EthConfirmationDepthFlagName = "eigenda-eth-confirmation-depth" - - // kzg flags - G1PathFlagName = "eigenda-g1-path" - G2TauFlagName = "eigenda-g2-tau-path" - CachePathFlagName = "eigenda-cache-path" - MaxBlobLengthFlagName = "eigenda-max-blob-length" - - // memstore flags - MemstoreFlagName = "memstore.enabled" - MemstoreExpirationFlagName = "memstore.expiration" - MemstorePutLatencyFlagName = "memstore.put-latency" - MemstoreGetLatencyFlagName = "memstore.get-latency" - - // redis client flags - RedisEndpointFlagName = "redis.endpoint" - RedisPasswordFlagName = "redis.password" - RedisDBFlagName = "redis.db" - RedisEvictionFlagName = "redis.eviction" - - // S3 client flags - S3CredentialTypeFlagName = "s3.credential-type" // #nosec G101 - S3BucketFlagName = "s3.bucket" // #nosec G101 - S3PathFlagName = "s3.path" - S3EndpointFlagName = "s3.endpoint" - S3AccessKeyIDFlagName = "s3.access-key-id" // #nosec G101 - S3AccessKeySecretFlagName = "s3.access-key-secret" // #nosec G101 - S3BackupFlagName = "s3.backup" - S3TimeoutFlagName = "s3.timeout" - - // routing flags - FallbackTargets = "routing.fallback-targets" - CacheTargets = "routing.cache-targets" -) -const ( - BytesPerSymbol = 31 - MaxCodingRatio = 8 -) - -var ( - MaxSRSPoints = 1 << 28 // 2^28 - MaxAllowedBlobSize = uint64(MaxSRSPoints * BytesPerSymbol / MaxCodingRatio) + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" ) type Config struct { - // eigenda - ClientConfig clients.EigenDAClientConfig - - // the blob encoding version to use when writing blobs from the high level interface. - PutBlobEncodingVersion codecs.BlobEncodingVersion - - // eth verification vars - // TODO: right now verification and confirmation depth are tightly coupled - // we should decouple them - CertVerificationEnabled bool - EthRPC string - SvcManagerAddr string - EthConfirmationDepth int64 - - // kzg vars - CacheDir string - G1Path string - G2Path string - G2PowerOfTauPath string - - // size constraints - MaxBlobLength string - maxBlobLengthBytes uint64 + EdaClientConfig clients.EigenDAClientConfig + VerifierConfig verify.Config - // memstore - MemstoreEnabled bool - MemstoreBlobExpiration time.Duration - MemstoreGetLatency time.Duration - MemstorePutLatency time.Duration + MemstoreEnabled bool + MemstoreConfig memstore.Config // routing FallbackTargets []string CacheTargets []string // secondary storage - RedisCfg store.RedisConfig - S3Config store.S3Config -} - -// GetMaxBlobLength ... returns the maximum blob length in bytes -func (cfg *Config) GetMaxBlobLength() (uint64, error) { - if cfg.maxBlobLengthBytes == 0 { - numBytes, err := utils.ParseBytesAmount(cfg.MaxBlobLength) - if err != nil { - return 0, err - } - - if numBytes > MaxAllowedBlobSize { - return 0, fmt.Errorf("excluding disperser constraints on max blob size, SRS points constrain the maxBlobLength configuration parameter to be less than than %d bytes", MaxAllowedBlobSize) - } - - cfg.maxBlobLengthBytes = numBytes - } - - return cfg.maxBlobLengthBytes, nil -} - -// VerificationCfg ... returns certificate config used to verify blobs from eigenda -func (cfg *Config) VerificationCfg() *verify.Config { - numBytes, err := cfg.GetMaxBlobLength() - if err != nil { - panic(fmt.Errorf("failed to read max blob length: %w", err)) - } - - kzgCfg := &kzg.KzgConfig{ - G1Path: cfg.G1Path, - G2PowerOf2Path: cfg.G2PowerOfTauPath, - CacheDir: cfg.CacheDir, - SRSOrder: 268435456, // 2 ^ 32 - SRSNumberToLoad: numBytes / 32, // # of fr.Elements - NumWorker: uint64(runtime.GOMAXPROCS(0)), // #nosec G115 - } - - return &verify.Config{ - KzgConfig: kzgCfg, - VerifyCerts: cfg.CertVerificationEnabled, - RPCURL: cfg.EthRPC, - SvcManagerAddr: cfg.SvcManagerAddr, - EthConfirmationDepth: uint64(cfg.EthConfirmationDepth), // #nosec G115 - } + RedisConfig redis.Config + S3Config s3.Config } // ReadConfig ... parses the Config from the provided flags or environment variables. func ReadConfig(ctx *cli.Context) Config { - cfg := Config{ - RedisCfg: store.RedisConfig{ - Endpoint: ctx.String(RedisEndpointFlagName), - Password: ctx.String(RedisPasswordFlagName), - DB: ctx.Int(RedisDBFlagName), - Eviction: ctx.Duration(RedisEvictionFlagName), - }, - S3Config: store.S3Config{ - S3CredentialType: store.StringToS3CredentialType(ctx.String(S3CredentialTypeFlagName)), - Bucket: ctx.String(S3BucketFlagName), - Path: ctx.String(S3PathFlagName), - Endpoint: ctx.String(S3EndpointFlagName), - AccessKeyID: ctx.String(S3AccessKeyIDFlagName), - AccessKeySecret: ctx.String(S3AccessKeySecretFlagName), - Backup: ctx.Bool(S3BackupFlagName), - Timeout: ctx.Duration(S3TimeoutFlagName), - }, - ClientConfig: clients.EigenDAClientConfig{ - RPC: ctx.String(EigenDADisperserRPCFlagName), - StatusQueryRetryInterval: ctx.Duration(StatusQueryRetryIntervalFlagName), - StatusQueryTimeout: ctx.Duration(StatusQueryTimeoutFlagName), - DisableTLS: ctx.Bool(DisableTLSFlagName), - ResponseTimeout: ctx.Duration(ResponseTimeoutFlagName), - CustomQuorumIDs: ctx.UintSlice(CustomQuorumIDsFlagName), - SignerPrivateKeyHex: ctx.String(SignerPrivateKeyHexFlagName), - PutBlobEncodingVersion: codecs.BlobEncodingVersion(ctx.Uint(PutBlobEncodingVersionFlagName)), - DisablePointVerificationMode: ctx.Bool(DisablePointVerificationModeFlagName), - }, - G1Path: ctx.String(G1PathFlagName), - G2PowerOfTauPath: ctx.String(G2TauFlagName), - CacheDir: ctx.String(CachePathFlagName), - CertVerificationEnabled: ctx.Bool(CertVerificationEnabledFlagName), - MaxBlobLength: ctx.String(MaxBlobLengthFlagName), - SvcManagerAddr: ctx.String(SvcManagerAddrFlagName), - EthRPC: ctx.String(EthRPCFlagName), - EthConfirmationDepth: ctx.Int64(EthConfirmationDepthFlagName), - MemstoreEnabled: ctx.Bool(MemstoreFlagName), - MemstoreBlobExpiration: ctx.Duration(MemstoreExpirationFlagName), - MemstoreGetLatency: ctx.Duration(MemstoreGetLatencyFlagName), - MemstorePutLatency: ctx.Duration(MemstorePutLatencyFlagName), - FallbackTargets: ctx.StringSlice(FallbackTargets), - CacheTargets: ctx.StringSlice(CacheTargets), - } - // the eigenda client can only wait for 0 confirmations or finality - // the da-proxy has a more fine-grained notion of confirmation depth - // we use -1 to let the da client wait for finality, and then need to set the confirmation depth - // for the da-proxy to 0 (because negative confirmation depth doesn't mean anything and leads to errors) - // TODO: should the eigenda-client implement this feature for us instead? - if cfg.EthConfirmationDepth < 0 { - cfg.ClientConfig.WaitForFinalization = true - cfg.EthConfirmationDepth = 0 + return Config{ + RedisConfig: redis.ReadConfig(ctx), + S3Config: s3.ReadConfig(ctx), + EdaClientConfig: eigendaflags.ReadConfig(ctx), + VerifierConfig: verify.ReadConfig(ctx), + MemstoreEnabled: ctx.Bool(memstore.EnabledFlagName), + MemstoreConfig: memstore.ReadConfig(ctx), + FallbackTargets: ctx.StringSlice(flags.FallbackTargetsFlagName), + CacheTargets: ctx.StringSlice(flags.CacheTargetsFlagName), } - - return cfg } // checkTargets ... verifies that a backend target slice is constructed correctly @@ -237,47 +69,40 @@ func (cfg *Config) checkTargets(targets []string) error { // Check ... verifies that configuration values are adequately set func (cfg *Config) Check() error { - l, err := cfg.GetMaxBlobLength() - if err != nil { - return err - } - - if l == 0 { - return fmt.Errorf("max blob length is 0") - } - if !cfg.MemstoreEnabled { - if cfg.ClientConfig.RPC == "" { + if cfg.EdaClientConfig.RPC == "" { return fmt.Errorf("using eigenda backend (memstore.enabled=false) but eigenda disperser rpc url is not set") } } - if cfg.CertVerificationEnabled { + // cert verification is enabled + // TODO: move this verification logic to verify/cli.go + if cfg.VerifierConfig.VerifyCerts { if cfg.MemstoreEnabled { return fmt.Errorf("cannot enable cert verification when memstore is enabled") } - if cfg.EthRPC == "" { + if cfg.VerifierConfig.RPCURL == "" { return fmt.Errorf("cert verification enabled but eth rpc is not set") } - if cfg.SvcManagerAddr == "" { + if cfg.VerifierConfig.SvcManagerAddr == "" { return fmt.Errorf("cert verification enabled but svc manager address is not set") } } - if cfg.S3Config.S3CredentialType == store.S3CredentialUnknown && cfg.S3Config.Endpoint != "" { + if cfg.S3Config.CredentialType == s3.CredentialTypeUnknown && cfg.S3Config.Endpoint != "" { return fmt.Errorf("s3 credential type must be set") } - if cfg.S3Config.S3CredentialType == store.S3CredentialStatic { + if cfg.S3Config.CredentialType == s3.CredentialTypeStatic { if cfg.S3Config.Endpoint != "" && (cfg.S3Config.AccessKeyID == "" || cfg.S3Config.AccessKeySecret == "") { return fmt.Errorf("s3 endpoint is set, but access key id or access key secret is not set") } } - if cfg.RedisCfg.Endpoint == "" && cfg.RedisCfg.Password != "" { + if cfg.RedisConfig.Endpoint == "" && cfg.RedisConfig.Password != "" { return fmt.Errorf("redis password is set, but endpoint is not") } - err = cfg.checkTargets(cfg.FallbackTargets) + err := cfg.checkTargets(cfg.FallbackTargets) if err != nil { return err } @@ -297,230 +122,23 @@ func (cfg *Config) Check() error { return nil } -// s3Flags ... used for S3 backend configuration -func s3Flags() []cli.Flag { - return []cli.Flag{ - &cli.StringFlag{ - Name: S3CredentialTypeFlagName, - Usage: "The way to authenticate to S3, options are [iam, static]", - EnvVars: prefixEnvVars("S3_CREDENTIAL_TYPE"), - }, - &cli.StringFlag{ - Name: S3BucketFlagName, - Usage: "bucket name for S3 storage", - EnvVars: prefixEnvVars("S3_BUCKET"), - }, - &cli.StringFlag{ - Name: S3PathFlagName, - Usage: "path for S3 storage", - EnvVars: prefixEnvVars("S3_PATH"), - }, - &cli.StringFlag{ - Name: S3EndpointFlagName, - Usage: "endpoint for S3 storage", - Value: "", - EnvVars: prefixEnvVars("S3_ENDPOINT"), - }, - &cli.StringFlag{ - Name: S3AccessKeyIDFlagName, - Usage: "access key id for S3 storage", - Value: "", - EnvVars: prefixEnvVars("S3_ACCESS_KEY_ID"), - }, - &cli.StringFlag{ - Name: S3AccessKeySecretFlagName, - Usage: "access key secret for S3 storage", - Value: "", - EnvVars: prefixEnvVars("S3_ACCESS_KEY_SECRET"), - }, - &cli.BoolFlag{ - Name: S3BackupFlagName, - Usage: "whether to use S3 as a backup store to ensure resiliency in case of EigenDA read failure", - Value: false, - EnvVars: prefixEnvVars("S3_BACKUP"), - }, - &cli.DurationFlag{ - Name: S3TimeoutFlagName, - Usage: "timeout for S3 storage operations (e.g. get, put)", - Value: 5 * time.Second, - EnvVars: prefixEnvVars("S3_TIMEOUT"), - }, - } +type CLIConfig struct { + EigenDAConfig Config + MetricsCfg opmetrics.CLIConfig } -// redisFlags ... used for Redis backend configuration -func redisFlags() []cli.Flag { - return []cli.Flag{ - &cli.StringFlag{ - Name: RedisEndpointFlagName, - Usage: "Redis endpoint", - EnvVars: prefixEnvVars("REDIS_ENDPOINT"), - }, - &cli.StringFlag{ - Name: RedisPasswordFlagName, - Usage: "Redis password", - EnvVars: prefixEnvVars("REDIS_PASSWORD"), - }, - &cli.IntFlag{ - Name: RedisDBFlagName, - Usage: "Redis database", - Value: 0, - EnvVars: prefixEnvVars("REDIS_DB"), - }, - &cli.DurationFlag{ - Name: RedisEvictionFlagName, - Usage: "Redis eviction time", - Value: 24 * time.Hour, - EnvVars: prefixEnvVars("REDIS_EVICTION"), - }, +func ReadCLIConfig(ctx *cli.Context) CLIConfig { + config := ReadConfig(ctx) + return CLIConfig{ + EigenDAConfig: config, + MetricsCfg: opmetrics.ReadCLIConfig(ctx), } } -func CLIFlags() []cli.Flag { - // TODO: Decompose all flags into constituent parts based on their respective category / usage - flags := []cli.Flag{ - &cli.StringFlag{ - Name: EigenDADisperserRPCFlagName, - Usage: "RPC endpoint of the EigenDA disperser.", - EnvVars: prefixEnvVars("EIGENDA_DISPERSER_RPC"), - }, - &cli.DurationFlag{ - Name: StatusQueryTimeoutFlagName, - Usage: "Duration to wait for a blob to finalize after being sent for dispersal. Default is 30 minutes.", - Value: 30 * time.Minute, - EnvVars: prefixEnvVars("STATUS_QUERY_TIMEOUT"), - }, - &cli.DurationFlag{ - Name: StatusQueryRetryIntervalFlagName, - Usage: "Interval between retries when awaiting network blob finalization. Default is 5 seconds.", - Value: 5 * time.Second, - EnvVars: prefixEnvVars("STATUS_QUERY_INTERVAL"), - }, - &cli.BoolFlag{ - Name: DisableTLSFlagName, - Usage: "Disable TLS for gRPC communication with the EigenDA disperser. Default is false.", - Value: false, - EnvVars: prefixEnvVars("GRPC_DISABLE_TLS"), - }, - &cli.DurationFlag{ - Name: ResponseTimeoutFlagName, - Usage: "Total time to wait for a response from the EigenDA disperser. Default is 60 seconds.", - Value: 60 * time.Second, - EnvVars: prefixEnvVars("RESPONSE_TIMEOUT"), - }, - &cli.UintSliceFlag{ - Name: CustomQuorumIDsFlagName, - Usage: "Custom quorum IDs for writing blobs. Should not include default quorums 0 or 1.", - Value: cli.NewUintSlice(), - EnvVars: prefixEnvVars("CUSTOM_QUORUM_IDS"), - }, - &cli.StringFlag{ - Name: SignerPrivateKeyHexFlagName, - Usage: "Hex-encoded signer private key. This key should not be associated with an Ethereum address holding any funds.", - EnvVars: prefixEnvVars("SIGNER_PRIVATE_KEY_HEX"), - }, - &cli.UintFlag{ - Name: PutBlobEncodingVersionFlagName, - Usage: "Blob encoding version to use when writing blobs from the high-level interface.", - EnvVars: prefixEnvVars("PUT_BLOB_ENCODING_VERSION"), - Value: 0, - }, - &cli.BoolFlag{ - Name: DisablePointVerificationModeFlagName, - Usage: "Disable point verification mode. This mode performs IFFT on data before writing and FFT on data after reading. Disabling requires supplying the entire blob for verification against the KZG commitment.", - EnvVars: prefixEnvVars("DISABLE_POINT_VERIFICATION_MODE"), - Value: false, - }, - &cli.StringFlag{ - Name: MaxBlobLengthFlagName, - Usage: "Maximum blob length to be written or read from EigenDA. Determines the number of SRS points loaded into memory for KZG commitments. Example units: '30MiB', '4Kb', '30MB'. Maximum size slightly exceeds 1GB.", - EnvVars: prefixEnvVars("MAX_BLOB_LENGTH"), - Value: "16MiB", - }, - &cli.StringFlag{ - Name: G1PathFlagName, - Usage: "Directory path to g1.point file.", - EnvVars: prefixEnvVars("TARGET_KZG_G1_PATH"), - Value: "resources/g1.point", - }, - &cli.StringFlag{ - Name: G2TauFlagName, - Usage: "Directory path to g2.point.powerOf2 file.", - EnvVars: prefixEnvVars("TARGET_G2_TAU_PATH"), - Value: "resources/g2.point.powerOf2", - }, - &cli.StringFlag{ - Name: CachePathFlagName, - Usage: "Directory path to SRS tables for caching.", - EnvVars: prefixEnvVars("TARGET_CACHE_PATH"), - Value: "resources/SRSTables/", - }, - &cli.BoolFlag{ - Name: CertVerificationEnabledFlagName, - Usage: "Whether to verify certificates received from EigenDA disperser.", - EnvVars: prefixEnvVars("CERT_VERIFICATION_ENABLED"), - // TODO: ideally we'd want this to be turned on by default when eigenda backend is used (memstore.enabled=false) - Value: false, - }, - &cli.StringFlag{ - Name: EthRPCFlagName, - Usage: "JSON RPC node endpoint for the Ethereum network used for finalizing DA blobs.\n" + - "See available list here: https://docs.eigenlayer.xyz/eigenda/networks/\n" + - fmt.Sprintf("Mandatory when %s is true.", CertVerificationEnabledFlagName), - EnvVars: prefixEnvVars("ETH_RPC"), - }, - &cli.StringFlag{ - Name: SvcManagerAddrFlagName, - Usage: "The deployed EigenDA service manager address.\n" + - "The list can be found here: https://github.com/Layr-Labs/eigenlayer-middleware/?tab=readme-ov-file#current-mainnet-deployment\n" + - fmt.Sprintf("Mandatory when %s is true.", CertVerificationEnabledFlagName), - EnvVars: prefixEnvVars("SERVICE_MANAGER_ADDR"), - }, - &cli.Int64Flag{ - Name: EthConfirmationDepthFlagName, - Usage: "The number of Ethereum blocks to wait before considering a submitted blob's DA batch submission confirmed.\n" + - "`0` means wait for inclusion only. `-1` means wait for finality.", - EnvVars: prefixEnvVars("ETH_CONFIRMATION_DEPTH"), - Value: -1, - }, - &cli.BoolFlag{ - Name: MemstoreFlagName, - Usage: "Whether to use mem-store for DA logic.", - EnvVars: prefixEnvVars("MEMSTORE_ENABLED"), - }, - &cli.DurationFlag{ - Name: MemstoreExpirationFlagName, - Usage: "Duration that a mem-store blob/commitment pair are allowed to live.", - Value: 25 * time.Minute, - EnvVars: prefixEnvVars("MEMSTORE_EXPIRATION"), - }, - &cli.DurationFlag{ - Name: MemstorePutLatencyFlagName, - Usage: "Artificial latency added for memstore backend to mimic EigenDA's dispersal latency.", - Value: 0, - EnvVars: prefixEnvVars("MEMSTORE_PUT_LATENCY"), - }, - &cli.DurationFlag{ - Name: MemstoreGetLatencyFlagName, - Usage: "Artificial latency added for memstore backend to mimic EigenDA's retrieval latency.", - Value: 0, - EnvVars: prefixEnvVars("MEMSTORE_GET_LATENCY"), - }, - &cli.StringSliceFlag{ - Name: FallbackTargets, - Usage: "List of read fallback targets to rollover to if cert can't be read from EigenDA.", - Value: cli.NewStringSlice(), - EnvVars: prefixEnvVars("FALLBACK_TARGETS"), - }, - &cli.StringSliceFlag{ - Name: CacheTargets, - Usage: "List of caching targets to use fast reads from EigenDA.", - Value: cli.NewStringSlice(), - EnvVars: prefixEnvVars("CACHE_TARGETS"), - }, +func (c CLIConfig) Check() error { + err := c.EigenDAConfig.Check() + if err != nil { + return err } - - flags = append(flags, s3Flags()...) - flags = append(flags, redisFlags()...) - return flags + return nil } diff --git a/server/config_test.go b/server/config_test.go index 36ac6244..4d747dd0 100644 --- a/server/config_test.go +++ b/server/config_test.go @@ -4,27 +4,36 @@ import ( "testing" "time" - "github.com/Layr-Labs/eigenda-proxy/store" + "github.com/Layr-Labs/eigenda-proxy/store/generated_key/memstore" + "github.com/Layr-Labs/eigenda-proxy/store/precomputed_key/redis" + "github.com/Layr-Labs/eigenda-proxy/store/precomputed_key/s3" + "github.com/Layr-Labs/eigenda-proxy/utils" + "github.com/Layr-Labs/eigenda-proxy/verify" "github.com/Layr-Labs/eigenda/api/clients" + "github.com/Layr-Labs/eigenda/encoding/kzg" "github.com/stretchr/testify/require" ) func validCfg() *Config { + maxBlobLengthBytes, err := utils.ParseBytesAmount("2MiB") + if err != nil { + panic(err) + } return &Config{ - RedisCfg: store.RedisConfig{ + RedisConfig: redis.Config{ Endpoint: "localhost:6379", Password: "password", DB: 0, Eviction: 10 * time.Minute, }, - S3Config: store.S3Config{ + S3Config: s3.Config{ Bucket: "test-bucket", Path: "", Endpoint: "http://localhost:9000", AccessKeyID: "access-key-id", AccessKeySecret: "access-key-secret", }, - ClientConfig: clients.EigenDAClientConfig{ + EdaClientConfig: clients.EigenDAClientConfig{ RPC: "http://localhost:8545", StatusQueryRetryInterval: 5 * time.Second, StatusQueryTimeout: 30 * time.Minute, @@ -35,16 +44,22 @@ func validCfg() *Config { PutBlobEncodingVersion: 0, DisablePointVerificationMode: false, }, - G1Path: "path/to/g1", - G2PowerOfTauPath: "path/to/g2", - CacheDir: "path/to/cache", - MaxBlobLength: "2MiB", - CertVerificationEnabled: false, - SvcManagerAddr: "0x1234567890abcdef", - EthRPC: "http://localhost:8545", - EthConfirmationDepth: 12, - MemstoreEnabled: true, - MemstoreBlobExpiration: 25 * time.Minute, + VerifierConfig: verify.Config{ + KzgConfig: &kzg.KzgConfig{ + G1Path: "path/to/g1", + G2PowerOf2Path: "path/to/g2", + CacheDir: "path/to/cache", + SRSOrder: maxBlobLengthBytes / 32, + }, + VerifyCerts: false, + SvcManagerAddr: "0x1234567890abcdef", + RPCURL: "http://localhost:8545", + EthConfirmationDepth: 12, + }, + MemstoreEnabled: true, + MemstoreConfig: memstore.Config{ + BlobExpiration: 25 * time.Minute, + }, } } @@ -56,22 +71,6 @@ func TestConfigVerification(t *testing.T) { require.NoError(t, err) }) - t.Run("InvalidMaxBlobLength", func(t *testing.T) { - cfg := validCfg() - cfg.MaxBlobLength = "0kzg" - - err := cfg.Check() - require.Error(t, err) - }) - - t.Run("0MaxBlobLength", func(t *testing.T) { - cfg := validCfg() - cfg.MaxBlobLength = "0kib" - - err := cfg.Check() - require.Error(t, err) - }) - t.Run("CertVerificationEnabled", func(t *testing.T) { // when eigenDABackend is enabled (memstore.enabled = false), // some extra fields are required. @@ -79,8 +78,8 @@ func TestConfigVerification(t *testing.T) { cfg := validCfg() // cert verification only makes sense when memstore is disabled (we use eigenda as backend) cfg.MemstoreEnabled = false - cfg.CertVerificationEnabled = true - cfg.SvcManagerAddr = "" + cfg.VerifierConfig.VerifyCerts = true + cfg.VerifierConfig.SvcManagerAddr = "" err := cfg.Check() require.Error(t, err) @@ -90,8 +89,8 @@ func TestConfigVerification(t *testing.T) { cfg := validCfg() // cert verification only makes sense when memstore is disabled (we use eigenda as backend) cfg.MemstoreEnabled = false - cfg.CertVerificationEnabled = true - cfg.EthRPC = "" + cfg.VerifierConfig.VerifyCerts = true + cfg.VerifierConfig.RPCURL = "" err := cfg.Check() require.Error(t, err) @@ -100,7 +99,7 @@ func TestConfigVerification(t *testing.T) { t.Run("CantDoCertVerificationWhenMemstoreEnabled", func(t *testing.T) { cfg := validCfg() cfg.MemstoreEnabled = true - cfg.CertVerificationEnabled = true + cfg.VerifierConfig.VerifyCerts = true err := cfg.Check() require.Error(t, err) @@ -110,7 +109,7 @@ func TestConfigVerification(t *testing.T) { t.Run("MissingS3AccessKeys", func(t *testing.T) { cfg := validCfg() - cfg.S3Config.S3CredentialType = store.S3CredentialStatic + cfg.S3Config.CredentialType = s3.CredentialTypeStatic cfg.S3Config.Endpoint = "http://localhost:9000" cfg.S3Config.AccessKeyID = "" @@ -121,7 +120,7 @@ func TestConfigVerification(t *testing.T) { t.Run("MissingS3Credential", func(t *testing.T) { cfg := validCfg() - cfg.S3Config.S3CredentialType = store.S3CredentialUnknown + cfg.S3Config.CredentialType = s3.CredentialTypeUnknown err := cfg.Check() require.Error(t, err) @@ -129,7 +128,7 @@ func TestConfigVerification(t *testing.T) { t.Run("MissingEigenDADisperserRPC", func(t *testing.T) { cfg := validCfg() - cfg.ClientConfig.RPC = "" + cfg.EdaClientConfig.RPC = "" cfg.MemstoreEnabled = false err := cfg.Check() @@ -186,7 +185,7 @@ func TestConfigVerification(t *testing.T) { t.Run("BadRedisConfiguration", func(t *testing.T) { cfg := validCfg() - cfg.RedisCfg.Endpoint = "" + cfg.RedisConfig.Endpoint = "" err := cfg.Check() require.Error(t, err) diff --git a/server/flags.go b/server/flags.go deleted file mode 100644 index 9046e3b7..00000000 --- a/server/flags.go +++ /dev/null @@ -1,68 +0,0 @@ -package server - -import ( - "github.com/Layr-Labs/eigenda-proxy/store" - "github.com/urfave/cli/v2" - - opservice "github.com/ethereum-optimism/optimism/op-service" - oplog "github.com/ethereum-optimism/optimism/op-service/log" - opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" -) - -const ( - ListenAddrFlagName = "addr" - PortFlagName = "port" -) - -const EnvVarPrefix = "EIGENDA_PROXY" - -func prefixEnvVars(name string) []string { - return opservice.PrefixEnvVar(EnvVarPrefix, name) -} - -// Flags contains the list of configuration options available to the binary. -var Flags = []cli.Flag{ - &cli.StringFlag{ - Name: ListenAddrFlagName, - Usage: "server listening address", - Value: "0.0.0.0", - EnvVars: prefixEnvVars("ADDR"), - }, - &cli.IntFlag{ - Name: PortFlagName, - Usage: "server listening port", - Value: 3100, - EnvVars: prefixEnvVars("PORT"), - }, -} - -func init() { - Flags = append(Flags, oplog.CLIFlags(EnvVarPrefix)...) - Flags = append(Flags, CLIFlags()...) - Flags = append(Flags, opmetrics.CLIFlags(EnvVarPrefix)...) -} - -type CLIConfig struct { - RedisCfg store.RedisConfig - S3Config store.S3Config - EigenDAConfig Config - MetricsCfg opmetrics.CLIConfig -} - -func ReadCLIConfig(ctx *cli.Context) CLIConfig { - config := ReadConfig(ctx) - return CLIConfig{ - RedisCfg: config.RedisCfg, - EigenDAConfig: config, - MetricsCfg: opmetrics.ReadCLIConfig(ctx), - S3Config: config.S3Config, - } -} - -func (c CLIConfig) Check() error { - err := c.EigenDAConfig.Check() - if err != nil { - return err - } - return nil -} diff --git a/server/load_store.go b/server/load_store.go index 53e798ca..0c042b1b 100644 --- a/server/load_store.go +++ b/server/load_store.go @@ -5,26 +5,30 @@ import ( "fmt" "github.com/Layr-Labs/eigenda-proxy/store" + "github.com/Layr-Labs/eigenda-proxy/store/generated_key/eigenda" + "github.com/Layr-Labs/eigenda-proxy/store/generated_key/memstore" + "github.com/Layr-Labs/eigenda-proxy/store/precomputed_key/redis" + "github.com/Layr-Labs/eigenda-proxy/store/precomputed_key/s3" "github.com/Layr-Labs/eigenda-proxy/verify" "github.com/Layr-Labs/eigenda/api/clients" "github.com/ethereum/go-ethereum/log" ) // populateTargets ... creates a list of storage backends based on the provided target strings -func populateTargets(targets []string, s3 store.PrecomputedKeyStore, redis *store.RedStore) []store.PrecomputedKeyStore { +func populateTargets(targets []string, s3 store.PrecomputedKeyStore, redis *redis.Store) []store.PrecomputedKeyStore { stores := make([]store.PrecomputedKeyStore, len(targets)) for i, f := range targets { b := store.StringToBackendType(f) switch b { - case store.Redis: + case store.RedisBackendType: stores[i] = redis - case store.S3: + case store.S3BackendType: stores[i] = s3 - case store.EigenDA, store.Memory: + case store.EigenDABackendType, store.MemoryBackendType: panic(fmt.Sprintf("Invalid target for fallback: %s", f)) case store.Unknown: @@ -42,21 +46,21 @@ func populateTargets(targets []string, s3 store.PrecomputedKeyStore, redis *stor func LoadStoreRouter(ctx context.Context, cfg CLIConfig, log log.Logger) (store.IRouter, error) { // create S3 backend store (if enabled) var err error - var s3 store.PrecomputedKeyStore - var redis *store.RedStore + var s3Store store.PrecomputedKeyStore + var redisStore *redis.Store - if cfg.S3Config.Bucket != "" && cfg.S3Config.Endpoint != "" { + if cfg.EigenDAConfig.S3Config.Bucket != "" && cfg.EigenDAConfig.S3Config.Endpoint != "" { log.Info("Using S3 backend") - s3, err = store.NewS3(cfg.S3Config) + s3Store, err = s3.NewS3(cfg.EigenDAConfig.S3Config) if err != nil { return nil, fmt.Errorf("failed to create S3 store: %w", err) } } - if cfg.RedisCfg.Endpoint != "" { + if cfg.EigenDAConfig.RedisConfig.Endpoint != "" { log.Info("Using Redis backend") // create Redis backend store - redis, err = store.NewRedisStore(&cfg.RedisCfg) + redisStore, err = redis.NewStore(&cfg.EigenDAConfig.RedisConfig) if err != nil { return nil, fmt.Errorf("failed to create Redis store: %w", err) } @@ -64,9 +68,9 @@ func LoadStoreRouter(ctx context.Context, cfg CLIConfig, log log.Logger) (store. // create cert/data verification type daCfg := cfg.EigenDAConfig - vCfg := daCfg.VerificationCfg() + vCfg := daCfg.VerifierConfig - verifier, err := verify.NewVerifier(vCfg, log) + verifier, err := verify.NewVerifier(&vCfg, log) if err != nil { return nil, fmt.Errorf("failed to create verifier: %w", err) } @@ -77,37 +81,27 @@ func LoadStoreRouter(ctx context.Context, cfg CLIConfig, log log.Logger) (store. log.Warn("Verification disabled") } - maxBlobLength, err := daCfg.GetMaxBlobLength() - if err != nil { - return nil, err - } - // create EigenDA backend store - var eigenda store.KeyGeneratedStore + var eigenDA store.GeneratedKeyStore if cfg.EigenDAConfig.MemstoreEnabled { log.Info("Using mem-store backend for EigenDA") - eigenda, err = store.NewMemStore(ctx, verifier, log, store.MemStoreConfig{ - MaxBlobSizeBytes: maxBlobLength, - BlobExpiration: cfg.EigenDAConfig.MemstoreBlobExpiration, - PutLatency: cfg.EigenDAConfig.MemstorePutLatency, - GetLatency: cfg.EigenDAConfig.MemstoreGetLatency, - }) + eigenDA, err = memstore.New(ctx, verifier, log, cfg.EigenDAConfig.MemstoreConfig) } else { var client *clients.EigenDAClient log.Info("Using EigenDA backend") - client, err = clients.NewEigenDAClient(log.With("subsystem", "eigenda-client"), daCfg.ClientConfig) + client, err = clients.NewEigenDAClient(log.With("subsystem", "eigenda-client"), daCfg.EdaClientConfig) if err != nil { return nil, err } - eigenda, err = store.NewEigenDAStore( + eigenDA, err = eigenda.NewStore( client, verifier, log, - &store.EigenDAStoreConfig{ - MaxBlobSizeBytes: maxBlobLength, - EthConfirmationDepth: uint64(cfg.EigenDAConfig.EthConfirmationDepth), // #nosec G115 - StatusQueryTimeout: cfg.EigenDAConfig.ClientConfig.StatusQueryTimeout, + &eigenda.StoreConfig{ + MaxBlobSizeBytes: cfg.EigenDAConfig.MemstoreConfig.MaxBlobSizeBytes, + EthConfirmationDepth: cfg.EigenDAConfig.VerifierConfig.EthConfirmationDepth, + StatusQueryTimeout: cfg.EigenDAConfig.EdaClientConfig.StatusQueryTimeout, }, ) } @@ -117,9 +111,9 @@ func LoadStoreRouter(ctx context.Context, cfg CLIConfig, log log.Logger) (store. } // determine read fallbacks - fallbacks := populateTargets(cfg.EigenDAConfig.FallbackTargets, s3, redis) - caches := populateTargets(cfg.EigenDAConfig.CacheTargets, s3, redis) + fallbacks := populateTargets(cfg.EigenDAConfig.FallbackTargets, s3Store, redisStore) + caches := populateTargets(cfg.EigenDAConfig.CacheTargets, s3Store, redisStore) - log.Info("Creating storage router", "eigenda backend type", eigenda != nil, "s3 backend type", s3 != nil) - return store.NewRouter(eigenda, s3, log, caches, fallbacks) + log.Info("Creating storage router", "eigenda backend type", eigenDA != nil, "s3 backend type", s3Store != nil) + return store.NewRouter(eigenDA, s3Store, log, caches, fallbacks) } diff --git a/store/eigenda.go b/store/generated_key/eigenda/eigenda.go similarity index 82% rename from store/eigenda.go rename to store/generated_key/eigenda/eigenda.go index c1811a05..05e8a4f8 100644 --- a/store/eigenda.go +++ b/store/generated_key/eigenda/eigenda.go @@ -1,4 +1,4 @@ -package store +package eigenda import ( "context" @@ -6,13 +6,14 @@ import ( "fmt" "time" + "github.com/Layr-Labs/eigenda-proxy/store" "github.com/Layr-Labs/eigenda-proxy/verify" "github.com/Layr-Labs/eigenda/api/clients" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) -type EigenDAStoreConfig struct { +type StoreConfig struct { MaxBlobSizeBytes uint64 // the # of Ethereum blocks to wait after the EigenDA L1BlockReference # before attempting to verify // & accredit a blob @@ -22,19 +23,19 @@ type EigenDAStoreConfig struct { StatusQueryTimeout time.Duration } -// EigenDAStore does storage interactions and verifications for blobs with DA. -type EigenDAStore struct { +// Store does storage interactions and verifications for blobs with DA. +type Store struct { client *clients.EigenDAClient verifier *verify.Verifier - cfg *EigenDAStoreConfig + cfg *StoreConfig log log.Logger } -var _ KeyGeneratedStore = (*EigenDAStore)(nil) +var _ store.GeneratedKeyStore = (*Store)(nil) -func NewEigenDAStore(client *clients.EigenDAClient, - v *verify.Verifier, log log.Logger, cfg *EigenDAStoreConfig) (*EigenDAStore, error) { - return &EigenDAStore{ +func NewStore(client *clients.EigenDAClient, + v *verify.Verifier, log log.Logger, cfg *StoreConfig) (*Store, error) { + return &Store{ client: client, verifier: v, log: log, @@ -44,7 +45,7 @@ func NewEigenDAStore(client *clients.EigenDAClient, // Get fetches a blob from DA using certificate fields and verifies blob // against commitment to ensure data is valid and non-tampered. -func (e EigenDAStore) Get(ctx context.Context, key []byte) ([]byte, error) { +func (e Store) Get(ctx context.Context, key []byte) ([]byte, error) { var cert verify.Certificate err := rlp.DecodeBytes(key, &cert) if err != nil { @@ -60,13 +61,13 @@ func (e EigenDAStore) Get(ctx context.Context, key []byte) ([]byte, error) { } // Put disperses a blob for some pre-image and returns the associated RLP encoded certificate commit. -func (e EigenDAStore) Put(ctx context.Context, value []byte) ([]byte, error) { +func (e Store) Put(ctx context.Context, value []byte) ([]byte, error) { encodedBlob, err := e.client.GetCodec().EncodeBlob(value) if err != nil { return nil, fmt.Errorf("EigenDA client failed to re-encode blob: %w", err) } if uint64(len(encodedBlob)) > e.cfg.MaxBlobSizeBytes { - return nil, fmt.Errorf("%w: blob length %d, max blob size %d", ErrProxyOversizedBlob, len(value), e.cfg.MaxBlobSizeBytes) + return nil, fmt.Errorf("%w: blob length %d, max blob size %d", store.ErrProxyOversizedBlob, len(value), e.cfg.MaxBlobSizeBytes) } dispersalStart := time.Now() @@ -116,18 +117,18 @@ func (e EigenDAStore) Put(ctx context.Context, value []byte) ([]byte, error) { } // Entries are a no-op for EigenDA Store -func (e EigenDAStore) Stats() *Stats { +func (e Store) Stats() *store.Stats { return nil } // Backend returns the backend type for EigenDA Store -func (e EigenDAStore) BackendType() BackendType { - return EigenDA +func (e Store) BackendType() store.BackendType { + return store.EigenDABackendType } // Key is used to recover certificate fields and that verifies blob // against commitment to ensure data is valid and non-tampered. -func (e EigenDAStore) Verify(key []byte, value []byte) error { +func (e Store) Verify(key []byte, value []byte) error { var cert verify.Certificate err := rlp.DecodeBytes(key, &cert) if err != nil { diff --git a/store/generated_key/memstore/cli.go b/store/generated_key/memstore/cli.go new file mode 100644 index 00000000..250482e4 --- /dev/null +++ b/store/generated_key/memstore/cli.go @@ -0,0 +1,70 @@ +package memstore + +import ( + "time" + + "github.com/Layr-Labs/eigenda-proxy/verify" + "github.com/urfave/cli/v2" +) + +var ( + EnabledFlagName = withFlagPrefix("enabled") + ExpirationFlagName = withFlagPrefix("expiration") + PutLatencyFlagName = withFlagPrefix("put-latency") + GetLatencyFlagName = withFlagPrefix("get-latency") +) + +func withFlagPrefix(s string) string { + return "memstore." + s +} + +func withEnvPrefix(envPrefix, s string) []string { + return []string{envPrefix + "_MEMSTORE_" + s} +} + +// CLIFlags ... used for Redis backend configuration +// category is used to group the flags in the help output (see https://cli.urfave.org/v2/examples/flags/#grouping) +func CLIFlags(envPrefix, category string) []cli.Flag { + return []cli.Flag{ + &cli.BoolFlag{ + Name: EnabledFlagName, + Usage: "Whether to use mem-store for DA logic.", + EnvVars: withEnvPrefix(envPrefix, "ENABLED"), + Category: category, + }, + &cli.DurationFlag{ + Name: ExpirationFlagName, + Usage: "Duration that a memstore blob/commitment pair is allowed to live.", + Value: 25 * time.Minute, + EnvVars: withEnvPrefix(envPrefix, "EXPIRATION"), + Category: category, + }, + &cli.DurationFlag{ + Name: PutLatencyFlagName, + Usage: "Artificial latency added for memstore backend to mimic EigenDA's dispersal latency.", + Value: 0, + EnvVars: withEnvPrefix(envPrefix, "PUT_LATENCY"), + Category: category, + }, + &cli.DurationFlag{ + Name: GetLatencyFlagName, + Usage: "Artificial latency added for memstore backend to mimic EigenDA's retrieval latency.", + Value: 0, + EnvVars: withEnvPrefix(envPrefix, "GET_LATENCY"), + Category: category, + }, + } +} + +func ReadConfig(ctx *cli.Context) Config { + return Config{ + // TODO: there has to be a better way to get MaxBlobLengthBytes + // right now we get it from the verifier cli, but there's probably a way to share flags more nicely? + // maybe use a duplicate but hidden flag in memstore category, and set it using the action by reading + // from the other flag? + MaxBlobSizeBytes: verify.MaxBlobLengthBytes, + BlobExpiration: ctx.Duration(ExpirationFlagName), + PutLatency: ctx.Duration(PutLatencyFlagName), + GetLatency: ctx.Duration(GetLatencyFlagName), + } +} diff --git a/store/memory.go b/store/generated_key/memstore/memstore.go similarity index 92% rename from store/memory.go rename to store/generated_key/memstore/memstore.go index 03b80f70..a25eac6f 100644 --- a/store/memory.go +++ b/store/generated_key/memstore/memstore.go @@ -1,4 +1,4 @@ -package store +package memstore import ( "context" @@ -11,6 +11,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" + "github.com/Layr-Labs/eigenda-proxy/store" "github.com/Layr-Labs/eigenda-proxy/verify" "github.com/Layr-Labs/eigenda/api/clients/codecs" "github.com/Layr-Labs/eigenda/api/grpc/common" @@ -22,7 +23,7 @@ const ( DefaultPruneInterval = 500 * time.Millisecond ) -type MemStoreConfig struct { +type Config struct { MaxBlobSizeBytes uint64 BlobExpiration time.Duration // artificial latency added for memstore backend to mimic eigenda's latency @@ -38,7 +39,7 @@ EigenDA operators. type MemStore struct { sync.RWMutex - config MemStoreConfig + config Config l log.Logger keyStarts map[string]time.Time store map[string][]byte @@ -48,11 +49,11 @@ type MemStore struct { reads int } -var _ KeyGeneratedStore = (*MemStore)(nil) +var _ store.GeneratedKeyStore = (*MemStore)(nil) -// NewMemStore ... constructor -func NewMemStore( - ctx context.Context, verifier *verify.Verifier, l log.Logger, config MemStoreConfig, +// New ... constructor +func New( + ctx context.Context, verifier *verify.Verifier, l log.Logger, config Config, ) (*MemStore, error) { store := &MemStore{ l: l, @@ -134,7 +135,7 @@ func (e *MemStore) Get(_ context.Context, commit []byte) ([]byte, error) { func (e *MemStore) Put(_ context.Context, value []byte) ([]byte, error) { time.Sleep(e.config.PutLatency) if uint64(len(value)) > e.config.MaxBlobSizeBytes { - return nil, fmt.Errorf("%w: blob length %d, max blob size %d", ErrProxyOversizedBlob, len(value), e.config.MaxBlobSizeBytes) + return nil, fmt.Errorf("%w: blob length %d, max blob size %d", store.ErrProxyOversizedBlob, len(value), e.config.MaxBlobSizeBytes) } e.Lock() @@ -222,15 +223,15 @@ func (e *MemStore) Verify(_, _ []byte) error { } // Stats ... returns the current usage metrics of the in-memory key-value data store. -func (e *MemStore) Stats() *Stats { +func (e *MemStore) Stats() *store.Stats { e.RLock() defer e.RUnlock() - return &Stats{ + return &store.Stats{ Entries: len(e.store), Reads: e.reads, } } -func (e *MemStore) BackendType() BackendType { - return Memory +func (e *MemStore) BackendType() store.BackendType { + return store.MemoryBackendType } diff --git a/store/memory_test.go b/store/generated_key/memstore/memstore_test.go similarity index 88% rename from store/memory_test.go rename to store/generated_key/memstore/memstore_test.go index 63103fba..a8030515 100644 --- a/store/memory_test.go +++ b/store/generated_key/memstore/memstore_test.go @@ -1,4 +1,4 @@ -package store +package memstore import ( "context" @@ -16,8 +16,8 @@ const ( testPreimage = "Four score and seven years ago" ) -func getDefaultMemStoreTestConfig() MemStoreConfig { - return MemStoreConfig{ +func getDefaultMemStoreTestConfig() Config { + return Config{ MaxBlobSizeBytes: 1024 * 1024, BlobExpiration: 0, PutLatency: 0, @@ -29,9 +29,9 @@ func getDefaultVerifierTestConfig() *verify.Config { return &verify.Config{ VerifyCerts: false, KzgConfig: &kzg.KzgConfig{ - G1Path: "../resources/g1.point", - G2PowerOf2Path: "../resources/g2.point.powerOf2", - CacheDir: "../resources/SRSTables", + G1Path: "../../../resources/g1.point", + G2PowerOf2Path: "../../../resources/g2.point.powerOf2", + CacheDir: "../../..resources/SRSTables", SRSOrder: 3000, SRSNumberToLoad: 3000, NumWorker: uint64(runtime.GOMAXPROCS(0)), @@ -46,7 +46,7 @@ func TestGetSet(t *testing.T) { verifier, err := verify.NewVerifier(getDefaultVerifierTestConfig(), nil) require.NoError(t, err) - ms, err := NewMemStore( + ms, err := New( ctx, verifier, log.New(), @@ -75,7 +75,7 @@ func TestExpiration(t *testing.T) { memstoreConfig := getDefaultMemStoreTestConfig() memstoreConfig.BlobExpiration = 10 * time.Millisecond - ms, err := NewMemStore( + ms, err := New( ctx, verifier, log.New(), @@ -111,7 +111,7 @@ func TestLatency(t *testing.T) { config := getDefaultMemStoreTestConfig() config.PutLatency = putLatency config.GetLatency = getLatency - ms, err := NewMemStore(ctx, verifier, log.New(), config) + ms, err := New(ctx, verifier, log.New(), config) require.NoError(t, err) diff --git a/store/precomputed_key/redis/cli.go b/store/precomputed_key/redis/cli.go new file mode 100644 index 00000000..423d6f8f --- /dev/null +++ b/store/precomputed_key/redis/cli.go @@ -0,0 +1,64 @@ +package redis + +import ( + "time" + + "github.com/urfave/cli/v2" +) + +var ( + EndpointFlagName = withFlagPrefix("endpoint") + PasswordFlagName = withFlagPrefix("password") + DBFlagName = withFlagPrefix("db") + EvictionFlagName = withFlagPrefix("eviction") +) + +func withFlagPrefix(s string) string { + return "redis." + s +} + +func withEnvPrefix(envPrefix, s string) []string { + return []string{envPrefix + "_REDIS_" + s} +} + +// CLIFlags ... used for Redis backend configuration +// category is used to group the flags in the help output (see https://cli.urfave.org/v2/examples/flags/#grouping) +func CLIFlags(envPrefix, category string) []cli.Flag { + return []cli.Flag{ + &cli.StringFlag{ + Name: EndpointFlagName, + Usage: "Redis endpoint", + EnvVars: withEnvPrefix(envPrefix, "ENDPOINT"), + Category: category, + }, + &cli.StringFlag{ + Name: PasswordFlagName, + Usage: "Redis password", + EnvVars: withEnvPrefix(envPrefix, "PASSWORD"), + Category: category, + }, + &cli.IntFlag{ + Name: DBFlagName, + Usage: "Redis database", + Value: 0, + EnvVars: withEnvPrefix(envPrefix, "DB"), + Category: category, + }, + &cli.DurationFlag{ + Name: EvictionFlagName, + Usage: "Redis eviction time", + Value: 24 * time.Hour, + EnvVars: withEnvPrefix(envPrefix, "EVICTION"), + Category: category, + }, + } +} + +func ReadConfig(ctx *cli.Context) Config { + return Config{ + Endpoint: ctx.String(EndpointFlagName), + Password: ctx.String(PasswordFlagName), + DB: ctx.Int(DBFlagName), + Eviction: ctx.Duration(EvictionFlagName), + } +} diff --git a/store/redis.go b/store/precomputed_key/redis/redis.go similarity index 67% rename from store/redis.go rename to store/precomputed_key/redis/redis.go index 4985698e..6b2975ac 100644 --- a/store/redis.go +++ b/store/precomputed_key/redis/redis.go @@ -1,4 +1,4 @@ -package store +package redis import ( "context" @@ -6,11 +6,12 @@ import ( "fmt" "time" + "github.com/Layr-Labs/eigenda-proxy/store" "github.com/go-redis/redis/v8" ) -// RedisConfig ... user configurable -type RedisConfig struct { +// Config ... user configurable +type Config struct { Endpoint string Password string DB int @@ -18,8 +19,8 @@ type RedisConfig struct { Profile bool } -// RedStore ... Redis storage backend implementation (This not safe for concurrent usage) -type RedStore struct { +// Store ... Redis storage backend implementation (This not safe for concurrent usage) +type Store struct { eviction time.Duration client *redis.Client @@ -29,10 +30,10 @@ type RedStore struct { entries int } -var _ PrecomputedKeyStore = (*RedStore)(nil) +var _ store.PrecomputedKeyStore = (*Store)(nil) -// NewRedisStore ... constructor -func NewRedisStore(cfg *RedisConfig) (*RedStore, error) { +// NewStore ... constructor +func NewStore(cfg *Config) (*Store, error) { client := redis.NewClient(&redis.Options{ Addr: cfg.Endpoint, Password: cfg.Password, @@ -48,7 +49,7 @@ func NewRedisStore(cfg *RedisConfig) (*RedStore, error) { return nil, fmt.Errorf("failed to ping redis server: %w", cmd.Err()) } - return &RedStore{ + return &Store{ eviction: cfg.Eviction, client: client, profile: cfg.Profile, @@ -58,7 +59,7 @@ func NewRedisStore(cfg *RedisConfig) (*RedStore, error) { // Get ... retrieves a value from the Redis store. Returns nil if the key is not found vs. an error // if the key is found but the value is not retrievable. -func (r *RedStore) Get(ctx context.Context, key []byte) ([]byte, error) { +func (r *Store) Get(ctx context.Context, key []byte) ([]byte, error) { value, err := r.client.Get(ctx, string(key)).Result() if errors.Is(err, redis.Nil) { // key DNE return nil, nil @@ -75,7 +76,7 @@ func (r *RedStore) Get(ctx context.Context, key []byte) ([]byte, error) { } // Put ... inserts a value into the Redis store -func (r *RedStore) Put(ctx context.Context, key []byte, value []byte) error { +func (r *Store) Put(ctx context.Context, key []byte, value []byte) error { err := r.client.Set(ctx, string(key), string(value), r.eviction).Err() if err == nil && r.profile { r.entries++ @@ -84,16 +85,16 @@ func (r *RedStore) Put(ctx context.Context, key []byte, value []byte) error { return err } -func (r *RedStore) Verify(_ []byte, _ []byte) error { +func (r *Store) Verify(_ []byte, _ []byte) error { return nil } -func (r *RedStore) BackendType() BackendType { - return Redis +func (r *Store) BackendType() store.BackendType { + return store.RedisBackendType } -func (r *RedStore) Stats() *Stats { - return &Stats{ +func (r *Store) Stats() *store.Stats { + return &store.Stats{ Entries: r.entries, Reads: r.reads, } diff --git a/store/precomputed_key/s3/cli.go b/store/precomputed_key/s3/cli.go new file mode 100644 index 00000000..4e2d0be2 --- /dev/null +++ b/store/precomputed_key/s3/cli.go @@ -0,0 +1,96 @@ +package s3 + +import ( + "time" + + "github.com/urfave/cli/v2" +) + +var ( + EndpointFlagName = withFlagPrefix("endpoint") + CredentialTypeFlagName = withFlagPrefix("credential-type") + AccessKeyIDFlagName = withFlagPrefix("access-key-id") // #nosec G101 + AccessKeySecretFlagName = withFlagPrefix("access-key-secret") // #nosec G101 + BucketFlagName = withFlagPrefix("bucket") + PathFlagName = withFlagPrefix("path") + BackupFlagName = withFlagPrefix("backup") + TimeoutFlagName = withFlagPrefix("timeout") +) + +func withFlagPrefix(s string) string { + return "s3." + s +} + +func withEnvPrefix(envPrefix, s string) []string { + return []string{envPrefix + "_S3_" + s} +} + +// CLIFlags ... used for S3 backend configuration +// category is used to group the flags in the help output (see https://cli.urfave.org/v2/examples/flags/#grouping) +func CLIFlags(envPrefix, category string) []cli.Flag { + return []cli.Flag{ + &cli.StringFlag{ + Name: EndpointFlagName, + Usage: "endpoint for S3 storage", + EnvVars: withEnvPrefix(envPrefix, "S3_ENDPOINT"), + Category: category, + }, + &cli.StringFlag{ + Name: CredentialTypeFlagName, + Usage: "The way to authenticate to S3, options are [iam, static]", + EnvVars: withEnvPrefix(envPrefix, "CREDENTIAL_TYPE"), + Category: category, + }, + &cli.StringFlag{ + Name: AccessKeyIDFlagName, + Usage: "access key id for S3 storage", + EnvVars: withEnvPrefix(envPrefix, "ACCESS_KEY_ID"), + Category: category, + }, + &cli.StringFlag{ + Name: AccessKeySecretFlagName, + Usage: "access key secret for S3 storage", + EnvVars: withEnvPrefix(envPrefix, "ACCESS_KEY_SECRET"), + Category: category, + }, + &cli.StringFlag{ + Name: BucketFlagName, + Usage: "bucket name for S3 storage", + EnvVars: withEnvPrefix(envPrefix, "BUCKET"), + Category: category, + }, + &cli.StringFlag{ + Name: PathFlagName, + Usage: "path for S3 storage", + EnvVars: withEnvPrefix(envPrefix, "PATH"), + Category: category, + }, + &cli.BoolFlag{ + Name: BackupFlagName, + Usage: "whether to use S3 as a backup store to ensure resiliency in case of EigenDA read failure", + Value: false, + EnvVars: withEnvPrefix(envPrefix, "BACKUP"), + Category: category, + }, + &cli.DurationFlag{ + Name: TimeoutFlagName, + Usage: "timeout for S3 storage operations (e.g. get, put)", + Value: 5 * time.Second, + EnvVars: withEnvPrefix(envPrefix, "TIMEOUT"), + Category: category, + }, + } +} + +func ReadConfig(ctx *cli.Context) Config { + return Config{ + CredentialType: StringToCredentialType(ctx.String(CredentialTypeFlagName)), + Endpoint: ctx.String(EndpointFlagName), + AccessKeyID: ctx.String(AccessKeyIDFlagName), + AccessKeySecret: ctx.String(AccessKeySecretFlagName), + Bucket: ctx.String(BucketFlagName), + Path: ctx.String(PathFlagName), + Backup: ctx.Bool(BackupFlagName), + Timeout: ctx.Duration(TimeoutFlagName), + } +} diff --git a/store/s3.go b/store/precomputed_key/s3/s3.go similarity index 55% rename from store/s3.go rename to store/precomputed_key/s3/s3.go index bfcd1802..6213245d 100644 --- a/store/s3.go +++ b/store/precomputed_key/s3/s3.go @@ -1,4 +1,4 @@ -package store +package s3 import ( "bytes" @@ -9,6 +9,7 @@ import ( "path" "time" + "github.com/Layr-Labs/eigenda-proxy/store" "github.com/ethereum/go-ethereum/crypto" "github.com/minio/minio-go/v7" @@ -16,44 +17,44 @@ import ( ) const ( - S3CredentialStatic S3CredentialType = "static" - S3CredentialIAM S3CredentialType = "iam" - S3CredentialUnknown S3CredentialType = "unknown" + CredentialTypeStatic CredentialType = "static" + CredentialTypeIAM CredentialType = "iam" + CredentialTypeUnknown CredentialType = "unknown" ) -func StringToS3CredentialType(s string) S3CredentialType { +func StringToCredentialType(s string) CredentialType { switch s { case "static": - return S3CredentialStatic + return CredentialTypeStatic case "iam": - return S3CredentialIAM + return CredentialTypeIAM default: - return S3CredentialUnknown + return CredentialTypeUnknown } } -var _ PrecomputedKeyStore = (*S3Store)(nil) - -type S3CredentialType string -type S3Config struct { - S3CredentialType S3CredentialType - Bucket string - Path string - Endpoint string - AccessKeyID string - AccessKeySecret string - Profiling bool - Backup bool - Timeout time.Duration +var _ store.PrecomputedKeyStore = (*Store)(nil) + +type CredentialType string +type Config struct { + CredentialType CredentialType + Endpoint string + AccessKeyID string + AccessKeySecret string + Bucket string + Path string + Backup bool + Timeout time.Duration + Profiling bool } -type S3Store struct { - cfg S3Config +type Store struct { + cfg Config client *minio.Client - stats *Stats + stats *store.Stats } -func NewS3(cfg S3Config) (*S3Store, error) { +func NewS3(cfg Config) (*Store, error) { client, err := minio.New(cfg.Endpoint, &minio.Options{ Creds: creds(cfg), Secure: false, @@ -62,17 +63,17 @@ func NewS3(cfg S3Config) (*S3Store, error) { return nil, err } - return &S3Store{ + return &Store{ cfg: cfg, client: client, - stats: &Stats{ + stats: &store.Stats{ Entries: 0, Reads: 0, }, }, nil } -func (s *S3Store) Get(ctx context.Context, key []byte) ([]byte, error) { +func (s *Store) Get(ctx context.Context, key []byte) ([]byte, error) { result, err := s.client.GetObject(ctx, s.cfg.Bucket, path.Join(s.cfg.Path, hex.EncodeToString(key)), minio.GetObjectOptions{}) if err != nil { errResponse := minio.ToErrorResponse(err) @@ -94,7 +95,7 @@ func (s *S3Store) Get(ctx context.Context, key []byte) ([]byte, error) { return data, nil } -func (s *S3Store) Put(ctx context.Context, key []byte, value []byte) error { +func (s *Store) Put(ctx context.Context, key []byte, value []byte) error { _, err := s.client.PutObject(ctx, s.cfg.Bucket, path.Join(s.cfg.Path, hex.EncodeToString(key)), bytes.NewReader(value), int64(len(value)), minio.PutObjectOptions{}) if err != nil { return err @@ -107,7 +108,7 @@ func (s *S3Store) Put(ctx context.Context, key []byte, value []byte) error { return nil } -func (s *S3Store) Verify(key []byte, value []byte) error { +func (s *Store) Verify(key []byte, value []byte) error { h := crypto.Keccak256Hash(value) if !bytes.Equal(h[:], key) { return errors.New("key does not match value") @@ -116,16 +117,16 @@ func (s *S3Store) Verify(key []byte, value []byte) error { return nil } -func (s *S3Store) Stats() *Stats { +func (s *Store) Stats() *store.Stats { return s.stats } -func (s *S3Store) BackendType() BackendType { - return S3 +func (s *Store) BackendType() store.BackendType { + return store.S3BackendType } -func creds(cfg S3Config) *credentials.Credentials { - if cfg.S3CredentialType == S3CredentialIAM { +func creds(cfg Config) *credentials.Credentials { + if cfg.CredentialType == CredentialTypeIAM { return credentials.NewIAM("") } return credentials.NewStaticV4(cfg.AccessKeyID, cfg.AccessKeySecret, "") diff --git a/store/router.go b/store/router.go index ad2805cc..6ee35aa2 100644 --- a/store/router.go +++ b/store/router.go @@ -17,7 +17,7 @@ type IRouter interface { Get(ctx context.Context, key []byte, cm commitments.CommitmentMode) ([]byte, error) Put(ctx context.Context, cm commitments.CommitmentMode, key, value []byte) ([]byte, error) - GetEigenDAStore() KeyGeneratedStore + GetEigenDAStore() GeneratedKeyStore GetS3Store() PrecomputedKeyStore Caches() []PrecomputedKeyStore Fallbacks() []PrecomputedKeyStore @@ -26,7 +26,7 @@ type IRouter interface { // Router ... storage backend routing layer type Router struct { log log.Logger - eigenda KeyGeneratedStore + eigenda GeneratedKeyStore s3 PrecomputedKeyStore caches []PrecomputedKeyStore @@ -36,7 +36,7 @@ type Router struct { fallbackLock sync.RWMutex } -func NewRouter(eigenda KeyGeneratedStore, s3 PrecomputedKeyStore, l log.Logger, +func NewRouter(eigenda GeneratedKeyStore, s3 PrecomputedKeyStore, l log.Logger, caches []PrecomputedKeyStore, fallbacks []PrecomputedKeyStore) (IRouter, error) { return &Router{ log: l, @@ -252,7 +252,7 @@ func (r *Router) cacheEnabled() bool { } // GetEigenDAStore ... -func (r *Router) GetEigenDAStore() KeyGeneratedStore { +func (r *Router) GetEigenDAStore() GeneratedKeyStore { return r.eigenda } diff --git a/store/store.go b/store/store.go index c48c89d7..0b757896 100644 --- a/store/store.go +++ b/store/store.go @@ -9,10 +9,10 @@ import ( type BackendType uint8 const ( - EigenDA BackendType = iota - Memory - S3 - Redis + EigenDABackendType BackendType = iota + MemoryBackendType + S3BackendType + RedisBackendType Unknown ) @@ -24,13 +24,13 @@ var ( func (b BackendType) String() string { switch b { - case EigenDA: + case EigenDABackendType: return "EigenDA" - case Memory: + case MemoryBackendType: return "Memory" - case S3: + case S3BackendType: return "S3" - case Redis: + case RedisBackendType: return "Redis" case Unknown: fallthrough @@ -44,13 +44,13 @@ func StringToBackendType(s string) BackendType { switch lower { case "eigenda": - return EigenDA + return EigenDABackendType case "memory": - return Memory + return MemoryBackendType case "s3": - return S3 + return S3BackendType case "redis": - return Redis + return RedisBackendType case "unknown": fallthrough default: @@ -73,7 +73,7 @@ type Store interface { Verify(key []byte, value []byte) error } -type KeyGeneratedStore interface { +type GeneratedKeyStore interface { Store // Get retrieves the given key if it's present in the key-value data store. Get(ctx context.Context, key []byte) ([]byte, error) diff --git a/verify/cli.go b/verify/cli.go new file mode 100644 index 00000000..3c97c694 --- /dev/null +++ b/verify/cli.go @@ -0,0 +1,151 @@ +package verify + +import ( + "fmt" + "runtime" + + "github.com/Layr-Labs/eigenda-proxy/utils" + "github.com/Layr-Labs/eigenda/encoding/kzg" + "github.com/urfave/cli/v2" +) + +var ( + BytesPerSymbol = 31 + MaxCodingRatio = 8 + MaxSRSPoints = 1 << 28 // 2^28 + MaxAllowedBlobSize = uint64(MaxSRSPoints * BytesPerSymbol / MaxCodingRatio) +) + +// TODO: should this live in the resources pkg? +// So that if we ever change the SRS files there we can change this value +const srsOrder = 268435456 // 2 ^ 32 + +var ( + // cert verification flags + // TODO: we keep the eigenda prefix like eigenda client flags, because we + // plan to upstream this verification logic into the eigenda client + CertVerificationEnabledFlagName = withFlagPrefix("cert-verification-enabled") + EthRPCFlagName = withFlagPrefix("eth-rpc") + SvcManagerAddrFlagName = withFlagPrefix("svc-manager-addr") + EthConfirmationDepthFlagName = withFlagPrefix("eth-confirmation-depth") + + // kzg flags + G1PathFlagName = withFlagPrefix("g1-path") + G2TauFlagName = withFlagPrefix("g2-tau-path") + CachePathFlagName = withFlagPrefix("cache-path") + MaxBlobLengthFlagName = withFlagPrefix("max-blob-length") +) + +func withFlagPrefix(s string) string { + return "eigenda." + s +} + +func withEnvPrefix(envPrefix, s string) []string { + return []string{envPrefix + "_EIGENDA_" + s} +} + +// CLIFlags ... used for Verifier configuration +// category is used to group the flags in the help output (see https://cli.urfave.org/v2/examples/flags/#grouping) +func CLIFlags(envPrefix, category string) []cli.Flag { + return []cli.Flag{ + &cli.BoolFlag{ + Name: CertVerificationEnabledFlagName, + Usage: "Whether to verify certificates received from EigenDA disperser.", + EnvVars: withEnvPrefix(envPrefix, "CERT_VERIFICATION_ENABLED"), + // TODO: ideally we'd want this to be turned on by default when eigenda backend is used (memstore.enabled=false) + Value: false, + Category: category, + }, + &cli.StringFlag{ + Name: EthRPCFlagName, + Usage: "JSON RPC node endpoint for the Ethereum network used for finalizing DA blobs. See available list here: https://docs.eigenlayer.xyz/eigenda/networks/", + EnvVars: withEnvPrefix(envPrefix, "ETH_RPC"), + Category: category, + }, + &cli.StringFlag{ + Name: SvcManagerAddrFlagName, + Usage: "The deployed EigenDA service manager address. The list can be found here: https://github.com/Layr-Labs/eigenlayer-middleware/?tab=readme-ov-file#current-mainnet-deployment", + EnvVars: withEnvPrefix(envPrefix, "SERVICE_MANAGER_ADDR"), + Category: category, + }, + &cli.Uint64Flag{ + Name: EthConfirmationDepthFlagName, + Usage: "The number of Ethereum blocks to wait before considering a submitted blob's DA batch submission confirmed. `0` means wait for inclusion only.", + EnvVars: withEnvPrefix(envPrefix, "ETH_CONFIRMATION_DEPTH"), + Value: 0, + Category: category, + }, + // kzg flags + &cli.StringFlag{ + Name: G1PathFlagName, + Usage: "Directory path to g1.point file.", + EnvVars: withEnvPrefix(envPrefix, "TARGET_KZG_G1_PATH"), + // TODO: should use absolute path wrt root directory to prevent future errors + // in case we move this file around + Value: "../resources/g1.point", + Category: category, + }, + &cli.StringFlag{ + Name: G2TauFlagName, + Usage: "Directory path to g2.point.powerOf2 file.", + EnvVars: withEnvPrefix(envPrefix, "TARGET_G2_TAU_PATH"), + Value: "../resources/g2.point.powerOf2", + Category: category, + }, + &cli.StringFlag{ + Name: CachePathFlagName, + Usage: "Directory path to SRS tables for caching.", + EnvVars: withEnvPrefix(envPrefix, "TARGET_CACHE_PATH"), + Value: "../resources/SRSTables/", + Category: category, + }, + // TODO: can we use a genericFlag for this, and automatically parse the string into a uint64? + &cli.StringFlag{ + Name: MaxBlobLengthFlagName, + Usage: "Maximum blob length to be written or read from EigenDA. Determines the number of SRS points loaded into memory for KZG commitments. Example units: '30MiB', '4Kb', '30MB'. Maximum size slightly exceeds 1GB.", + EnvVars: withEnvPrefix(envPrefix, "MAX_BLOB_LENGTH"), + Value: "16MiB", + Action: func(_ *cli.Context, maxBlobLengthStr string) error { + // parse the string to a uint64 and set the maxBlobLengthBytes var to be used by ReadConfig() + numBytes, err := utils.ParseBytesAmount(maxBlobLengthStr) + if err != nil { + return fmt.Errorf("failed to parse max blob length flag: %w", err) + } + if numBytes == 0 { + return fmt.Errorf("max blob length is 0") + } + if numBytes > MaxAllowedBlobSize { + return fmt.Errorf("excluding disperser constraints on max blob size, SRS points constrain the maxBlobLength configuration parameter to be less than than %d bytes", MaxAllowedBlobSize) + } + MaxBlobLengthBytes = numBytes + return nil + }, + // we also use this flag for memstore. + // should we duplicate the flag? Or is there a better way to handle this? + Category: category, + }, + } +} + +// this var is set by the action in the MaxBlobLengthFlagName flag +// TODO: there's def a better way to deal with this... perhaps a generic flag that can parse the string into a uint64? +var MaxBlobLengthBytes uint64 + +func ReadConfig(ctx *cli.Context) Config { + kzgCfg := &kzg.KzgConfig{ + G1Path: ctx.String(G1PathFlagName), + G2PowerOf2Path: ctx.String(G2TauFlagName), + CacheDir: ctx.String(CachePathFlagName), + SRSOrder: srsOrder, + SRSNumberToLoad: MaxBlobLengthBytes / 32, // # of fr.Elements + NumWorker: uint64(runtime.GOMAXPROCS(0)), // #nosec G115 + } + + return Config{ + KzgConfig: kzgCfg, + VerifyCerts: ctx.Bool(CertVerificationEnabledFlagName), + RPCURL: ctx.String(EthRPCFlagName), + SvcManagerAddr: ctx.String(SvcManagerAddrFlagName), + EthConfirmationDepth: uint64(ctx.Int64(EthConfirmationDepthFlagName)), // #nosec G115 + } +} diff --git a/verify/verifier.go b/verify/verifier.go index 77195a2f..5e374f0f 100644 --- a/verify/verifier.go +++ b/verify/verifier.go @@ -25,6 +25,7 @@ type Config struct { EthConfirmationDepth uint64 } +// TODO: right now verification and confirmation depth are tightly coupled. we should decouple them type Verifier struct { // kzgVerifier is needed to commit blobs to the memstore kzgVerifier *kzgverifier.Verifier