diff --git a/exp/services/ledgerexporter/main.go b/exp/services/ledgerexporter/main.go index ec168181f3..cf22cc50ee 100644 --- a/exp/services/ledgerexporter/main.go +++ b/exp/services/ledgerexporter/main.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/service/s3" "github.com/stellar/go/historyarchive" "github.com/stellar/go/ingest/ledgerbackend" "github.com/stellar/go/network" @@ -30,6 +31,7 @@ func main() { continueFromLatestLedger := flag.Bool("continue", false, "start export from the last exported ledger (as indicated in the target's /latest path)") endingLedger := flag.Uint("end-ledger", 0, "ledger at which to stop the export (must be a closed ledger), 0 means no ending") writeLatestPath := flag.Bool("write-latest-path", true, "update the value of the /latest path on the target") + captiveCoreUseDb := flag.Bool("captive-core-use-db", true, "configure captive core to store database on disk in working directory rather than in memory") flag.Parse() logger.SetLevel(supportlog.InfoLevel) @@ -52,6 +54,7 @@ func main() { CheckpointFrequency: 64, Log: logger.WithField("subservice", "stellar-core"), Toml: captiveCoreToml, + UseDB: *captiveCoreUseDb, } core, err := ledgerbackend.NewCaptive(captiveConfig) logFatalIf(err, "Could not create captive core instance") @@ -59,7 +62,8 @@ func main() { target, err := historyarchive.ConnectBackend( *targetUrl, storage.ConnectOptions{ - Context: context.Background(), + Context: context.Background(), + S3WriteACL: s3.ObjectCannedACLBucketOwnerFullControl, }, ) logFatalIf(err, "Could not connect to target") @@ -68,18 +72,23 @@ func main() { // Build the appropriate range for the given backend state. startLedger := uint32(*startingLedger) endLedger := uint32(*endingLedger) - if startLedger < 2 { - logger.Fatalf("-start-ledger must be >= 2") - } - if endLedger != 0 && endLedger < startLedger { - logger.Fatalf("-end-ledger must be >= -start-ledger") - } + + logger.Infof("processing requested range of -start-ledger=%v, -end-ledger=%v", startLedger, endLedger) if *continueFromLatestLedger { if startLedger != 0 { logger.Fatalf("-start-ledger and -continue cannot both be set") } startLedger = readLatestLedger(target) + logger.Infof("continue flag was enabled, next ledger found was %v", startLedger) } + + if startLedger < 2 { + logger.Fatalf("-start-ledger must be >= 2") + } + if endLedger != 0 && endLedger < startLedger { + logger.Fatalf("-end-ledger must be >= -start-ledger") + } + var ledgerRange ledgerbackend.Range if endLedger == 0 { ledgerRange = ledgerbackend.UnboundedRange(startLedger) @@ -91,7 +100,7 @@ func main() { err = core.PrepareRange(context.Background(), ledgerRange) logFatalIf(err, "could not prepare range") - for nextLedger := startLedger; nextLedger <= endLedger; { + for nextLedger := startLedger; endLedger < 1 || nextLedger <= endLedger; { ledger, err := core.GetLedger(context.Background(), nextLedger) if err != nil { logger.WithError(err).Warnf("could not fetch ledger %v, retrying", nextLedger) diff --git a/services/horizon/docker/ledgerexporter/Dockerfile b/services/horizon/docker/ledgerexporter/Dockerfile index a51d273fea..233026404b 100644 --- a/services/horizon/docker/ledgerexporter/Dockerfile +++ b/services/horizon/docker/ledgerexporter/Dockerfile @@ -27,6 +27,7 @@ RUN apt-get update && apt-get install -y stellar-core=${STELLAR_CORE_VERSION} RUN apt-get clean ADD captive-core-pubnet.cfg / +ADD captive-core-testnet.cfg / ADD start / RUN ["chmod", "+x", "start"] diff --git a/services/horizon/docker/ledgerexporter/captive-core-testnet.cfg b/services/horizon/docker/ledgerexporter/captive-core-testnet.cfg new file mode 100644 index 0000000000..0cd9b2f496 --- /dev/null +++ b/services/horizon/docker/ledgerexporter/captive-core-testnet.cfg @@ -0,0 +1,30 @@ +PEER_PORT=11725 +DATABASE = "sqlite3:///cc/stellar.db" + +UNSAFE_QUORUM=true +FAILURE_SAFETY=1 + +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="HIGH" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="core-testnet1.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_2" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP" +ADDRESS="core-testnet2.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_3" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z" +ADDRESS="core-testnet3.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}" \ No newline at end of file diff --git a/services/horizon/docker/ledgerexporter/ledgerexporter.yml b/services/horizon/docker/ledgerexporter/ledgerexporter.yml new file mode 100644 index 0000000000..420216eef1 --- /dev/null +++ b/services/horizon/docker/ledgerexporter/ledgerexporter.yml @@ -0,0 +1,125 @@ +# this file contains the ledgerexporter deployment and it's config artifacts. +# +# when applying the manifest on a cluster, make sure to include namespace destination, +# as the manifest does not specify namespace, otherwise it'll go in your current kubectl context. +# +# make sure to set the secrets values, substitue placeholders. +# +# $ kubectl apply -f ledgerexporter.yml -n horizon-dev +apiVersion: v1 +kind: ConfigMap +metadata: + annotations: + fluxcd.io/ignore: "true" + labels: + app: ledgerexporter + name: ledgerexporter-pubnet-env +data: + # when using core 'on disk', the earliest ledger to get streamed out after catchup to 2, is 3 + # whereas on in-memory it streas out 2, adjusted here, otherwise horizon ingest will abort + # and stop process with error that ledger 3 is not <= expected ledger of 2. + START: "0" + END: "0" + + # can only have CONTINUE or START set, not both. + CONTINUE: "true" + WRITE_LATEST_PATH: "true" + CAPTIVE_CORE_USE_DB: "true" + + # configure the network to export + HISTORY_ARCHIVE_URLS: "https://history.stellar.org/prd/core-live/core_live_001,https://history.stellar.org/prd/core-live/core_live_002,https://history.stellar.org/prd/core-live/core_live_003" + NETWORK_PASSPHRASE: "Public Global Stellar Network ; September 2015" + # can refer to canned cfg's for pubnet and testnet which are included on the image + # `/captive-core-pubnet.cfg` or `/captive-core-testnet.cfg`. + # If exporting a standalone network, then mount a volume to the pod container with your standalone core's .cfg, + # and set full path to that volume here + CAPTIVE_CORE_CONFIG: "/captive-core-pubnet.cfg" + + # example of testnet network config. + # HISTORY_ARCHIVE_URLS: "https://history.stellar.org/prd/core-testnet/core_testnet_001,https://history.stellar.org/prd/core-testnet/core_testnet_002" + # NETWORK_PASSPHRASE: "Test SDF Network ; September 2015" + # CAPTIVE_CORE_CONFIG: "/captive-core-testnet.cfg" + + # provide the url for the external s3 bucket to be populated + # update the ledgerexporter-pubnet-secret to have correct aws key/secret for access to the bucket + ARCHIVE_TARGET: "s3://horizon-ledgermeta-prodnet-test" +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + app: ledgerexporter + name: ledgerexporter-pubnet-secret +type: Opaque +data: + AWS_REGION: + AWS_ACCESS_KEY_ID: + AWS_SECRET_ACCESS_KEY: +--- +# running captive core with on-disk mode limits RAM to around 2G usage, but +# requires some dedicated disk storage space that has at least 3k IOPS for read/write. +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ledgerexporter-pubnet-core-storage +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Gi + storageClassName: default + volumeMode: Filesystem +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + fluxcd.io/ignore: "true" + deployment.kubernetes.io/revision: "3" + labels: + app: ledgerexporter-pubnet + name: ledgerexporter-pubnet-deployment +spec: + selector: + matchLabels: + app: ledgerexporter-pubnet + replicas: 1 + template: + metadata: + annotations: + fluxcd.io/ignore: "true" + # if we expect to add metrics at some point to ledgerexporter + # this just needs to be set to true + prometheus.io/port: "6060" + prometheus.io/scrape: "false" + labels: + app: ledgerexporter-pubnet + spec: + containers: + - envFrom: + - secretRef: + name: ledgerexporter-pubnet-secret + - configMapRef: + name: ledgerexporter-pubnet-env + image: stellar/horizon-ledgerexporter:latest + imagePullPolicy: Always + name: ledgerexporter-pubnet + resources: + limits: + cpu: 3 + memory: 8Gi + requests: + cpu: 500m + memory: 2Gi + volumeMounts: + - mountPath: /cc + name: core-storage + dnsPolicy: ClusterFirst + volumes: + - name: core-storage + persistentVolumeClaim: + claimName: ledgerexporter-pubnet-core-storage + + + diff --git a/services/horizon/docker/ledgerexporter/start b/services/horizon/docker/ledgerexporter/start index 68df30ab6b..11d863effa 100644 --- a/services/horizon/docker/ledgerexporter/start +++ b/services/horizon/docker/ledgerexporter/start @@ -1,12 +1,19 @@ #! /usr/bin/env bash set -e -START="${START:=0}" +START="${START:=2}" END="${END:=0}" CONTINUE="${CONTINUE:=false}" # Writing to /latest is disabled by default to avoid race conditions between parallel container runs WRITE_LATEST_PATH="${WRITE_LATEST_PATH:=false}" +# config defaults to pubnet core, any other network requires setting all 3 of these in container env +NETWORK_PASSPHRASE="${NETWORK_PASSPHRASE:=Public Global Stellar Network ; September 2015}" +HISTORY_ARCHIVE_URLS="${HISTORY_ARCHIVE_URLS:=https://s3-eu-west-1.amazonaws.com/history.stellar.org/prd/core-live/core_live_001}" +CAPTIVE_CORE_CONFIG="${CAPTIVE_CORE_CONFIG:=/captive-core-pubnet.cfg}" + +CAPTIVE_CORE_USE_DB="${CAPTIVE_CORE_USE_DB:=true}" + if [ -z "$ARCHIVE_TARGET" ]; then echo "error: undefined ARCHIVE_TARGET env variable" exit 1 @@ -39,9 +46,10 @@ fi echo "START: $START END: $END" export TRACY_NO_INVARIANT_CHECK=1 -/ledgerexporter --target $ARCHIVE_TARGET \ - --captive-core-toml-path /captive-core-pubnet.cfg \ - --history-archive-urls 'https://history.stellar.org/prd/core-live/core_live_001' --network-passphrase 'Public Global Stellar Network ; September 2015' \ - --continue="$CONTINUE" --write-latest-path="$WRITE_LATEST_PATH" --start-ledger "$START" --end-ledger "$END" +/ledgerexporter --target "$ARCHIVE_TARGET" \ + --captive-core-toml-path "$CAPTIVE_CORE_CONFIG" \ + --history-archive-urls "$HISTORY_ARCHIVE_URLS" --network-passphrase "$NETWORK_PASSPHRASE" \ + --continue="$CONTINUE" --write-latest-path="$WRITE_LATEST_PATH" \ + --start-ledger "$START" --end-ledger "$END" --captive-core-use-db="$CAPTIVE_CORE_USE_DB" echo "OK" diff --git a/support/storage/main.go b/support/storage/main.go index 3da7351212..3da6be608b 100644 --- a/support/storage/main.go +++ b/support/storage/main.go @@ -31,6 +31,9 @@ type ConnectOptions struct { // Wrap the Storage after connection. For example, to add a caching or introspection layer. Wrap func(Storage) (Storage, error) + + // When putting file object to s3 bucket, specify the ACL for the object. + S3WriteACL string } func ConnectBackend(u string, opts ConnectOptions) (Storage, error) { @@ -60,6 +63,7 @@ func ConnectBackend(u string, opts ConnectOptions) (Storage, error) { opts.S3Region, opts.S3Endpoint, opts.UnsignedRequests, + opts.S3WriteACL, ) case "gcs": diff --git a/support/storage/s3.go b/support/storage/s3.go index 88e99fc4f6..ac7b13fa80 100644 --- a/support/storage/s3.go +++ b/support/storage/s3.go @@ -12,15 +12,17 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/stellar/go/support/errors" ) type S3Storage struct { ctx context.Context - svc *s3.S3 + svc s3iface.S3API bucket string prefix string unsignedRequests bool + writeACLrule string } func NewS3Storage( @@ -30,6 +32,7 @@ func NewS3Storage( region string, endpoint string, unsignedRequests bool, + writeACLrule string, ) (Storage, error) { log.WithFields(log.Fields{"bucket": bucket, "prefix": prefix, @@ -52,6 +55,7 @@ func NewS3Storage( bucket: bucket, prefix: prefix, unsignedRequests: unsignedRequests, + writeACLrule: writeACLrule, } return &backend, nil } @@ -139,6 +143,13 @@ func (b *S3Storage) Size(pth string) (int64, error) { } } +func (b *S3Storage) GetACLWriteRule() string { + if b.writeACLrule == "" { + return s3.ObjectCannedACLPublicRead + } + return b.writeACLrule +} + func (b *S3Storage) PutFile(pth string, in io.ReadCloser) error { var buf bytes.Buffer _, err := buf.ReadFrom(in) @@ -150,7 +161,7 @@ func (b *S3Storage) PutFile(pth string, in io.ReadCloser) error { params := &s3.PutObjectInput{ Bucket: aws.String(b.bucket), Key: aws.String(key), - ACL: aws.String(s3.ObjectCannedACLPublicRead), + ACL: aws.String(b.GetACLWriteRule()), Body: bytes.NewReader(buf.Bytes()), } req, _ := b.svc.PutObjectRequest(params) diff --git a/support/storage/s3_test.go b/support/storage/s3_test.go new file mode 100644 index 0000000000..4064af9539 --- /dev/null +++ b/support/storage/s3_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package storage + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +type MockS3 struct { + mock.Mock + s3iface.S3API +} + +func TestWriteACLRuleOverride(t *testing.T) { + + mockS3 := &MockS3{} + s3Storage := S3Storage{ + ctx: context.Background(), + svc: mockS3, + bucket: "bucket", + prefix: "prefix", + unsignedRequests: false, + writeACLrule: s3.ObjectCannedACLBucketOwnerFullControl, + } + + aclRule := s3Storage.GetACLWriteRule() + assert.Equal(t, aclRule, s3.ObjectCannedACLBucketOwnerFullControl) +} + +func TestWriteACLRuleDefault(t *testing.T) { + + mockS3 := &MockS3{} + s3Storage := S3Storage{ + ctx: context.Background(), + svc: mockS3, + bucket: "bucket", + prefix: "prefix", + unsignedRequests: false, + writeACLrule: "", + } + + aclRule := s3Storage.GetACLWriteRule() + assert.Equal(t, aclRule, s3.ObjectCannedACLPublicRead) +}