diff --git a/.circleci/config.yml b/.circleci/config.yml index dd651a993c..a3c4a26022 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -229,7 +229,6 @@ commands: # Pulling the image helps with test running time command: | cd ~/go/src/github.com/stellar/go - docker pull stellar/quickstart:testing <<# parameters.enable-captive-core >>HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE=true<> go test -timeout 25m -v ./services/horizon/internal/integration/... #-----------------------------------------------------------------------------# @@ -433,6 +432,9 @@ jobs: command: | echo "export HORIZON_INTEGRATION_TESTS=true" >> $BASH_ENV echo "export HORIZON_BIN_DIR=~/go/src/github.com/stellar/go" >> $BASH_ENV + - run: + name: Pull latest Stellar Core image + command: docker pull stellar/stellar-core - install_golang - build_horizon # run the integration tests ... diff --git a/clients/stellarcore/client.go b/clients/stellarcore/client.go index 4c800fcd2c..7fdb62515a 100644 --- a/clients/stellarcore/client.go +++ b/clients/stellarcore/client.go @@ -8,6 +8,7 @@ import ( "net/http" "net/url" "path" + "strconv" "strings" "time" @@ -26,6 +27,31 @@ type Client struct { URL string } +// Upgrade upgrades the protocol version running on the stellar core instance +func (c *Client) Upgrade(ctx context.Context, version int) error { + queryParams := url.Values{} + queryParams.Add("mode", "set") + queryParams.Add("upgradetime", "1970-01-01T00:00:00Z") + queryParams.Add("protocolversion", strconv.Itoa(version)) + + req, err := c.simpleGet(ctx, "upgrades", queryParams) + if err != nil { + return errors.Wrap(err, "failed to create request") + } + + hresp, err := c.http().Do(req) + if err != nil { + return errors.Wrap(err, "http request errored") + } + defer hresp.Body.Close() + + if !(hresp.StatusCode >= 200 && hresp.StatusCode < 300) { + return errors.New("http request failed with non-200 status code") + } + + return nil +} + // Info calls the `info` command on the connected stellar core and returns the // provided response func (c *Client) Info(ctx context.Context) (resp *proto.InfoResponse, err error) { diff --git a/go.list b/go.list index 0f929e0e0e..3a9514db05 100644 --- a/go.list +++ b/go.list @@ -17,10 +17,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/client9/misspell v0.3.4 github.com/davecgh/go-spew v1.1.1 github.com/dgrijalva/jwt-go v3.2.0+incompatible -github.com/docker/distribution v2.7.1+incompatible -github.com/docker/docker v1.13.1 -github.com/docker/go-connections v0.4.0 -github.com/docker/go-units v0.4.0 github.com/eapache/go-resiliency v1.1.0 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 github.com/eapache/queue v1.1.0 @@ -85,7 +81,6 @@ github.com/moul/http2curl v0.0.0-20161031194548-4e24498b31db github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 github.com/onsi/ginkgo v1.7.0 github.com/onsi/gomega v1.4.3 -github.com/opencontainers/go-digest v1.0.0 github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin/zipkin-go v0.1.6 github.com/pierrec/lz4 v2.0.5+incompatible diff --git a/go.mod b/go.mod index 1723736ad7..1a8a6f6f4c 100644 --- a/go.mod +++ b/go.mod @@ -12,10 +12,6 @@ require ( github.com/asaskevich/govalidator v0.0.0-20180319081651-7d2e70ef918f github.com/aws/aws-sdk-go v1.25.25 github.com/dgrijalva/jwt-go v3.2.0+incompatible - github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/docker v1.13.1 - github.com/docker/go-connections v0.4.0 - github.com/docker/go-units v0.4.0 // indirect github.com/elazarl/go-bindata-assetfs v1.0.0 github.com/fatih/structs v1.0.0 // indirect github.com/gavv/monotime v0.0.0-20161010190848-47d58efa6955 // indirect @@ -51,7 +47,6 @@ require ( github.com/moul/http2curl v0.0.0-20161031194548-4e24498b31db // indirect github.com/onsi/ginkgo v1.7.0 github.com/onsi/gomega v1.4.3 - github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opentracing/opentracing-go v1.1.0 // indirect github.com/pkg/errors v0.8.1 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 diff --git a/go.sum b/go.sum index 9f6bf6a9c5..1042c2acfd 100644 --- a/go.sum +++ b/go.sum @@ -31,14 +31,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo= -github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -156,8 +148,6 @@ github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= diff --git a/services/horizon/docker/docker-compose.integration-tests.yml b/services/horizon/docker/docker-compose.integration-tests.yml new file mode 100644 index 0000000000..c31ee2eaba --- /dev/null +++ b/services/horizon/docker/docker-compose.integration-tests.yml @@ -0,0 +1,25 @@ +version: '3' +services: + core-postgres: + image: postgres:9.6.17-alpine + restart: on-failure + environment: + - POSTGRES_PASSWORD=mysecretpassword + - POSTGRES_DB=stellar + ports: + - "5641:5641" + command: ["-p", "5641"] + core: + image: stellar/stellar-core + depends_on: + - core-postgres + restart: on-failure + ports: + - "11625:11625" + - "11626:11626" + # add extra port for history archive server + - "1570:1570" + command: /start standalone + volumes: + - ./stellar-core-integration-tests.cfg:/stellar-core.cfg + - ./core-start.sh:/start diff --git a/services/horizon/docker/docker-compose.standalone.yml b/services/horizon/docker/docker-compose.standalone.yml index 0ba4b6fe9b..169915ce06 100644 --- a/services/horizon/docker/docker-compose.standalone.yml +++ b/services/horizon/docker/docker-compose.standalone.yml @@ -14,10 +14,9 @@ services: environment: - HISTORY_ARCHIVE_URLS=http://host.docker.internal:1570 - NETWORK_PASSPHRASE=Standalone Network ; February 2017 - # this container will invoke a request to upgrade stellar core to protocol 13 + # this container will invoke a request to upgrade stellar core to protocol 15 (by default) core-upgrade: restart: on-failure image: curlimages/curl:7.69.1 - command: ["-v", "-f", "http://host.docker.internal:11626/upgrades?mode=set&upgradetime=1970-01-01T00:00:00Z&protocolversion=13"] + command: ["-v", "-f", "http://host.docker.internal:11626/upgrades?mode=set&upgradetime=1970-01-01T00:00:00Z&protocolversion=${PROTOCOL_VERSION:-15}"] network_mode: '${NETWORK_MODE:-bridge}' - diff --git a/services/horizon/docker/stellar-core-integration-tests.cfg b/services/horizon/docker/stellar-core-integration-tests.cfg new file mode 100644 index 0000000000..035ff26122 --- /dev/null +++ b/services/horizon/docker/stellar-core-integration-tests.cfg @@ -0,0 +1,28 @@ +# simple configuration for a standalone test "network" +# see stellar-core_example.cfg for a description of the configuration parameters + +RUN_STANDALONE=false +MANUAL_CLOSE=true + +NETWORK_PASSPHRASE="Standalone Network ; February 2017" + +PEER_PORT=11625 +HTTP_PORT=11626 +PUBLIC_HTTP_PORT=true + +NODE_SEED="SACJC372QBSSKJYTV5A7LWT4NXWHTQO6GHG4QDAVC2XDPX6CNNXFZ4JK" + +NODE_IS_VALIDATOR=true +UNSAFE_QUORUM=true +FAILURE_SAFETY=0 + +DATABASE="postgresql://user=postgres password=mysecretpassword host=core-postgres port=5641 dbname=stellar" + +[QUORUM_SET] +THRESHOLD_PERCENT=100 +VALIDATORS=["GD5KD2KEZJIGTC63IGW6UMUSMVUVG5IHG64HUTFWCHVZH2N2IBOQN7PS"] + +[HISTORY.vs] +get="cp history/vs/{0} {1}" +put="cp {0} history/vs/{1}" +mkdir="mkdir -p history/vs/{0}" \ No newline at end of file diff --git a/services/horizon/internal/integration/protocol14_state_verifier_test.go b/services/horizon/internal/integration/protocol14_state_verifier_test.go index b0c5cfa804..72c694175e 100644 --- a/services/horizon/internal/integration/protocol14_state_verifier_test.go +++ b/services/horizon/internal/integration/protocol14_state_verifier_test.go @@ -106,7 +106,7 @@ func TestProtocol14StateVerifier(t *testing.T) { // Reach the first checkpoint ledger // Core will push to history archives *after* checkpoint ledger - itest.CloseCoreLedgersUntilSequence(firstCheckpoint + 1) + err = itest.CloseCoreLedgersUntilSequence(firstCheckpoint + 1) assert.NoError(t, err) for !itest.LedgerIngested(firstCheckpoint) { time.Sleep(time.Second) @@ -118,16 +118,17 @@ func TestProtocol14StateVerifier(t *testing.T) { } // Trigger state rebuild to check if ingesting from history archive works - itest.RunHorizonCLICommand("expingest", "trigger-state-rebuild") + err = itest.Horizon().HistoryQ().UpdateExpIngestVersion(0) + assert.NoError(t, err) // Wait for the second checkpoint ledger and state rebuild // Core will push to history archives *after* checkpoint ledger - itest.CloseCoreLedgersUntilSequence(secondCheckpoint + 1) + err = itest.CloseCoreLedgersUntilSequence(secondCheckpoint + 1) assert.NoError(t, err) // Wait for the third checkpoint ledger and state verification trigger // Core will push to history archives *after* checkpoint ledger - itest.CloseCoreLedgersUntilSequence(thirdCheckpoint + 1) + err = itest.CloseCoreLedgersUntilSequence(thirdCheckpoint + 1) assert.NoError(t, err) for !itest.LedgerIngested(thirdCheckpoint) { time.Sleep(time.Second) diff --git a/services/horizon/internal/integration/protocol14_test.go b/services/horizon/internal/integration/protocol14_test.go index 96134b1a5c..6f2319b2e5 100644 --- a/services/horizon/internal/integration/protocol14_test.go +++ b/services/horizon/internal/integration/protocol14_test.go @@ -98,11 +98,11 @@ func TestHappyClaimableBalances(t *testing.T) { a, b, c := keys[0], keys[1], keys[2] accountA, accountB, accountC := accounts[0], accounts[1], accounts[2] - /* - * Each sub-test is completely self-contained: at the end of the test, we - * start with a clean slate for each account. This lets us check with - * equality for things like "number of operations," etc. - */ + // + // Each sub-test is completely self-contained: at the end of the test, we + // start with a clean slate for each account. This lets us check with + // equality for things like "number of operations," etc. + // // We start simple: native asset, single destination, no predicate. t.Run("Simple/Native", func(t *testing.T) { diff --git a/services/horizon/internal/test/integration/integration.go b/services/horizon/internal/test/integration/integration.go index 01198371dd..b91683baf4 100644 --- a/services/horizon/internal/test/integration/integration.go +++ b/services/horizon/internal/test/integration/integration.go @@ -2,80 +2,61 @@ package integration import ( - "archive/tar" - "bytes" "context" "fmt" - "io" "io/ioutil" "os" + "os/exec" "os/signal" + "path" + "path/filepath" "strconv" "sync" "syscall" "testing" "time" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/client" - "github.com/docker/go-connections/nat" "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" sdk "github.com/stellar/go/clients/horizonclient" "github.com/stellar/go/clients/stellarcore" "github.com/stellar/go/keypair" proto "github.com/stellar/go/protocols/horizon" horizon "github.com/stellar/go/services/horizon/internal" + "github.com/stellar/go/services/horizon/internal/ledger" "github.com/stellar/go/support/db/dbtest" - "github.com/stellar/go/support/errors" "github.com/stellar/go/support/log" "github.com/stellar/go/txnbuild" "github.com/stellar/go/xdr" - "github.com/stretchr/testify/assert" ) const ( NetworkPassphrase = "Standalone Network ; February 2017" - stellarCorePostgresPassword = "integration-tests-password" + stellarCorePostgresPassword = "mysecretpassword" adminPort = 6060 + stellarCorePort = 11626 + stellarCorePostgresPort = 5641 + historyArchivePort = 1570 ) -var ( - stellarCorePort = mustPort("tcp", "11626") - stellarCorePostgresPort = mustPort("tcp", "5432") - historyArchivePort = mustPort("tcp", "1570") -) - -func mustPort(proto, port string) nat.Port { - p, err := nat.NewPort(proto, port) - panicIf(err) - return p -} - type Config struct { ProtocolVersion int32 SkipContainerCreation bool } type Test struct { - t *testing.T - config Config - cli client.APIClient - hclient *sdk.Client - cclient *stellarcore.Client - container container.ContainerCreateCreatedBody - app *horizon.App + t *testing.T + config Config + hclient *sdk.Client + cclient *stellarcore.Client + app *horizon.App } // NewTest starts a new environment for integration test at a given // protocol version and blocks until Horizon starts ingesting. // -// Warning: this requires: -// * Docker installed and all docker env variables set. -// * HORIZON_BIN_DIR env variable set to the directory with `horizon` binary to test. -// * Horizon binary must be built for GOOS=linux and GOARCH=amd64. +// Warning: this requires Docker Compose installed // // Skips the test if HORIZON_INTEGRATION_TESTS env variable is not set. func NewTest(t *testing.T, config Config) *Test { @@ -85,191 +66,107 @@ func NewTest(t *testing.T, config Config) *Test { i := &Test{t: t, config: config} - var err error - i.cli, err = client.NewEnvClient() - if err != nil { - t.Fatal(errors.Wrap(err, "error creating docker client")) - } - - image := "stellar/quickstart:testing" - skipCreation := os.Getenv("HORIZON_SKIP_CREATION") != "" - - if skipCreation { - t.Log("Trying to skip container creation...") - containers, _ := i.cli.ContainerList( - context.Background(), - types.ContainerListOptions{All: true, Quiet: true}) + composeDir := findDockerComposePath() + manualCloseYaml := path.Join(composeDir, "docker-compose.integration-tests.yml") - for _, container := range containers { - if container.Image == image { - i.container.ID = container.ID - break - } - } - - if i.container.ID != "" { - t.Logf("Found matching container: %s\n", i.container.ID) - } else { - t.Log("No matching container found.") - os.Unsetenv("HORIZON_SKIP_CREATION") - skipCreation = false - } + // Runs a docker-compose command applied to the above configs + runComposeCommand := func(args ...string) { + cmdline := append([]string{"-f", manualCloseYaml}, args...) + t.Log("Running", cmdline) + cmd := exec.Command("docker-compose", cmdline...) + _, innerErr := cmd.Output() + fatalIf(t, innerErr) } - if !skipCreation { - err = createTestContainer(i, image) - if err != nil { - t.Fatal(errors.Wrap(err, "error creating docker container")) - } - } + // Only run Stellar Core container and its dependencies + runComposeCommand("up", "--detach", "--quiet-pull", "--no-color", "core") - // At this point, any of the following actions failing will cause the dead - // container to stick around, failing any subsequent tests. Thus, we track a - // flag to determine whether or not we should do this. - doCleanup := true - cleanup := func() { - if doCleanup { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - i.cli.ContainerRemove( - ctx, i.container.ID, - types.ContainerRemoveOptions{Force: true}) - } + // FIXME: Only use horizon from quickstart container when testing captive core + if os.Getenv("HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE") != "" { + t.Skip("Testing with captive core isn't working yet.") } - defer cleanup() - - i.setupHorizonBinary() - t.Log("Starting container...") - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - err = i.cli.ContainerStart(ctx, i.container.ID, types.ContainerStartOptions{}) - if err != nil { - t.Fatal(errors.Wrap(err, "error starting docker container")) - } + i.cclient = &stellarcore.Client{URL: "http://localhost:" + strconv.Itoa(stellarCorePort)} + i.waitForCore() - ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() + i.startHorizon() + i.hclient = &sdk.Client{HorizonURL: "http://localhost:8000"} - containerInfo, err := i.cli.ContainerInspect(ctx, i.container.ID) - if err != nil { - i.t.Fatal(errors.Wrap(err, "error inspecting container")) - } - stellarCoreBinding := containerInfo.NetworkSettings.Ports[stellarCorePort][0] - coreURL := fmt.Sprintf("http://%s:%s", stellarCoreBinding.HostIP, stellarCoreBinding.HostPort) - // only use horizon from quickstart container when testing captive core - if os.Getenv("HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE") == "" { - i.startHorizon(containerInfo, coreURL) + // Register cleanup handlers (on panic and ctrl+c) so the containers are + // stopped even if ingestion or testing fails. + cleanup := func() { + if i.app != nil { + i.app.Close() + // Clear the ledger state otherwise the root response + // will contain ledger information from the previous test run + ledger.SetState(ledger.State{}) + } + runComposeCommand("down", "-v", "--remove-orphans") } + i.t.Cleanup(cleanup) - doCleanup = false - i.hclient = &sdk.Client{HorizonURL: "http://localhost:8000"} - i.cclient = &stellarcore.Client{URL: coreURL} - - // Register cleanup handlers (on panic and ctrl+c) so the container is - // removed even if ingestion or testing fails. - i.t.Cleanup(i.Close) c := make(chan os.Signal) signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { <-c - i.Close() - os.Exit(0) + cleanup() + os.Exit(int(syscall.SIGTERM)) }() - i.waitForCore() - i.waitForIngestionAndUpgrade() + i.waitForHorizon() return i } -func (i *Test) setupHorizonBinary() { - // only use horizon from quickstart container when testing captive core - if os.Getenv("HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE") == "" { - return - } - - if os.Getenv("HORIZON_BIN_DIR") == "" { - i.t.Fatal("HORIZON_BIN_DIR env variable not set") - } - - horizonBinaryContents, err := ioutil.ReadFile(os.Getenv("HORIZON_BIN_DIR") + "/horizon") - if err != nil { - i.t.Fatal(errors.Wrap(err, "error reading horizon binary file")) - } - - // Create a tar archive with horizon binary (required by docker API). - var buf bytes.Buffer - tw := tar.NewWriter(&buf) - hdr := &tar.Header{ - Name: "stellar-horizon", - Mode: 0755, - Size: int64(len(horizonBinaryContents)), - } - if err = tw.WriteHeader(hdr); err != nil { - i.t.Fatal(errors.Wrap(err, "error writing tar header")) - } - if _, err = tw.Write(horizonBinaryContents); err != nil { - i.t.Fatal(errors.Wrap(err, "error writing tar contents")) - } - if err = tw.Close(); err != nil { - i.t.Fatal(errors.Wrap(err, "error closing tar archive")) - } - - i.t.Log("Copying custom horizon binary...") - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - err = i.cli.CopyToContainer(ctx, i.container.ID, "/usr/bin/", &buf, types.CopyToContainerOptions{}) - if err != nil { - i.t.Fatal(errors.Wrap(err, "error copying custom horizon binary")) - } -} - -func (i *Test) startHorizon(containerInfo types.ContainerJSON, coreURL string) { - stellarCorePostgres := containerInfo.NetworkSettings.Ports[stellarCorePostgresPort][0] - stellarCorePostgresURL := fmt.Sprintf( - "postgres://stellar:%s@%s:%s/core", - stellarCorePostgresPassword, - stellarCorePostgres.HostIP, - stellarCorePostgres.HostPort, - ) - - historyArchive := containerInfo.NetworkSettings.Ports[historyArchivePort][0] - +func (i *Test) startHorizon() { horizonPostgresURL := dbtest.Postgres(i.t).DSN config, configOpts := horizon.Flags() - cmd := &cobra.Command{ Use: "horizon", Short: "client-facing api server for the stellar network", - Long: "client-facing api server for the stellar network. It acts as the interface between Stellar Core and applications that want to access the Stellar network. It allows you to submit transactions to the network, check the status of accounts, subscribe to event streams and more.", + Long: `client-facing api server for the stellar network. It acts as the +interface between Stellar Core and applications that want to access the Stellar +network. It allows you to submit transactions to the network, check the status +of accounts, subscribe to event streams and more.`, Run: func(cmd *cobra.Command, args []string) { i.app = horizon.NewAppFromFlags(config, configOpts) }, } + + // Ideally, we'd be pulling host/port information from the Docker Compose + // YAML file itself rather than hardcoding it. + hostname := "localhost" cmd.SetArgs([]string{ "--stellar-core-url", - coreURL, + fmt.Sprintf("http://%s:%d", hostname, stellarCorePort), "--history-archive-urls", - fmt.Sprintf("http://%s:%s", historyArchive.HostIP, historyArchive.HostPort), + fmt.Sprintf("http://%s:%d", hostname, historyArchivePort), "--ingest", "--db-url", horizonPostgresURL, "--stellar-core-db-url", - stellarCorePostgresURL, + fmt.Sprintf( + "postgres://postgres:%s@%s:%d/stellar?sslmode=disable", + stellarCorePostgresPassword, + hostname, + stellarCorePostgresPort, + ), "--network-passphrase", NetworkPassphrase, "--apply-migrations", "--admin-port", - strconv.Itoa(adminPort), + strconv.Itoa(i.AdminPort()), }) - configOpts.Init(cmd) + var err error + if err = configOpts.Init(cmd); err != nil { + i.t.Fatalf("Cannot initialize params: %s", err) + } - if err := cmd.Execute(); err != nil { + if err = cmd.Execute(); err != nil { i.t.Fatalf("cannot initialize horizon: %s", err) } - if err := i.app.Ingestion().BuildGenesisState(); err != nil { + if err = i.app.Ingestion().BuildGenesisState(); err != nil { i.t.Fatalf("cannot build genesis state: %s", err) } @@ -283,44 +180,59 @@ func (i *Test) waitForCore() { ctx, cancel := context.WithTimeout(context.Background(), time.Second) _, err := i.cclient.Info(ctx) cancel() - if err == nil { - break + if err != nil { + i.t.Logf("could not obtain info response: %v", err) + time.Sleep(time.Second) + continue + } + break + } + + { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err := i.cclient.Upgrade(ctx, int(i.config.ProtocolVersion)) + cancel() + if err != nil { + i.t.Fatalf("could not upgrade protocol: %v", err) } - time.Sleep(time.Second) } - // We need to wait for Core to be armed before closing the first ledger - // Otherwise, for some reason, the protocol version of the ledger stays at 0 - // TODO: instead of sleeping we should ensure Core's status (in GET /info) is "Armed" - // but, to do so, we should first expose it in Core's client. - time.Sleep(time.Second) if err := i.CloseCoreLedger(); err != nil { i.t.Fatalf("Failed to manually close the second ledger: %s", err) } - // Make sure that the Sleep above was successful - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - info, err := i.cclient.Info(ctx) - cancel() - if err != nil || !info.IsSynced() { - log.Fatal("failed to wait for Core to be synced") + { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + info, err := i.cclient.Info(ctx) + cancel() + if err != nil || !info.IsSynced() { + i.t.Fatal("failed to wait for Core to be synced") + } } } -func (i *Test) waitForIngestionAndUpgrade() { - for t := 30 * time.Second; t >= 0; t -= time.Second { +func (i *Test) waitForHorizon() { + for t := 30; t >= 0; t -= 1 { i.t.Log("Waiting for ingestion and protocol upgrade...") - root, _ := i.hclient.Root() - // We ignore errors here because it's likely connection error due to - // Horizon not running. We ensure that's is up and correct by checking - // the root response. - if root.IngestSequence > 0 && - root.HorizonSequence > 0 && - root.CurrentProtocolVersion == i.config.ProtocolVersion { - i.t.Log("Horizon ingesting and protocol version matches...") + root, err := i.hclient.Root() + if err != nil { + i.t.Logf("could not obtain root response %v", err) + time.Sleep(time.Second) + continue + } + + if root.HorizonSequence < 2 || + int(root.HorizonSequence) != int(root.IngestSequence) || + root.HorizonSequence < root.CoreSequence { + i.t.Logf("Horizon ingesting... %v", root) + time.Sleep(time.Second) + continue + } + + if root.CurrentProtocolVersion == i.config.ProtocolVersion { + i.t.Logf("Horizon protocol version matches... %v", root) return } - time.Sleep(time.Second) } i.t.Fatal("Horizon not ingesting...") @@ -331,6 +243,11 @@ func (i *Test) Client() *sdk.Client { return i.hclient } +// Horizon returns the horizon.App instance for the current integration test +func (i *Test) Horizon() *horizon.App { + return i.app +} + // LedgerIngested returns true if the ledger with a given sequence has been // ingested by Horizon. Panics in case of errors. func (i *Test) LedgerIngested(sequence uint32) bool { @@ -354,7 +271,7 @@ func (i *Test) LedgerClosed(sequence uint32) bool { // AdminPort returns Horizon admin port. func (i *Test) AdminPort() int { - return 6060 + return adminPort } // Metrics URL returns Horizon metrics URL. @@ -379,98 +296,6 @@ func (i *Test) CurrentTest() *testing.T { return i.t } -// Close stops and removes the docker container. -func (i *Test) Close() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - if i.app != nil { - i.app.Close() - } - - skipCreation := os.Getenv("HORIZON_SKIP_CREATION") != "" - if !skipCreation { - i.t.Logf("Removing container %s\n", i.container.ID) - i.cli.ContainerRemove( - ctx, i.container.ID, - types.ContainerRemoveOptions{Force: true}) - } else { - i.t.Logf("Stopping container %s\n", i.container.ID) - i.cli.ContainerStop(ctx, i.container.ID, nil) - } -} - -func createTestContainer(i *Test, image string) error { - t := i.CurrentTest() - t.Logf("Pulling %s...", image) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - // If your Internet (or docker.io) is down, integration tests should still try to run. - reader, err := i.cli.ImagePull(ctx, "docker.io/"+image, types.ImagePullOptions{}) - if err != nil { - t.Log(" error pulling docker image") - t.Log(" trying to find local image (might be out-dated)") - - args := filters.NewArgs() - args.Add("reference", image) - list, innerErr := i.cli.ImageList(ctx, types.ImageListOptions{Filters: args}) - if innerErr != nil || len(list) == 0 { - t.Fatal(errors.Wrap(err, "failed to find local image")) - } - t.Log(" using local", image) - } else { - defer reader.Close() - io.Copy(os.Stdout, reader) - } - - t.Log("Creating container...") - ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - containerConfig := &container.Config{ - Image: image, - Cmd: []string{ - "--standalone", - "--protocol-version", strconv.FormatInt(int64(i.config.ProtocolVersion), 10), - "--enable-core-manual-close", - }, - } - hostConfig := &container.HostConfig{} - - if os.Getenv("HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE") != "" { - containerConfig.Env = append(containerConfig.Env, - "ENABLE_CAPTIVE_CORE_INGESTION=true", - "STELLAR_CORE_BINARY_PATH=/opt/stellar/core/bin/start", - "STELLAR_CORE_CONFIG_PATH=/opt/stellar/core/etc/stellar-core.cfg", - ) - containerConfig.ExposedPorts = nat.PortSet{"8000": struct{}{}, "6060": struct{}{}} - hostConfig.PortBindings = map[nat.Port][]nat.PortBinding{ - nat.Port("8000"): {{HostIP: "127.0.0.1", HostPort: "8000"}}, - nat.Port("6060"): {{HostIP: "127.0.0.1", HostPort: "6060"}}, - } - } else { - containerConfig.Env = append(containerConfig.Env, - "POSTGRES_PASSWORD="+stellarCorePostgresPassword, - ) - containerConfig.ExposedPorts = nat.PortSet{ - stellarCorePort: struct{}{}, - stellarCorePostgresPort: struct{}{}, - historyArchivePort: struct{}{}, - } - hostConfig.PublishAllPorts = true - } - - i.container, err = i.cli.ContainerCreate( - ctx, - containerConfig, - hostConfig, - nil, - "horizon-integration", - ) - - return err -} - /* Utility functions for easier test case creation. */ // Creates new accounts via the master account. @@ -698,7 +523,6 @@ func (i *Test) CloseCoreLedger() error { // pace ourselves time.Sleep(50 * time.Millisecond) } - return nil } func (i *Test) GetCurrentCoreLedgerSequence() (int, error) { @@ -774,23 +598,62 @@ func (i *Test) LogFailedTx(txResponse proto.Transaction, horizonResult error) { "Transaction doesn't have success code.") } -func (i *Test) RunHorizonCLICommand(cmd ...string) { - fullCmd := append([]string{"/stellar/horizon/bin/horizon"}, cmd...) - id, err := i.cli.ContainerExecCreate( - context.Background(), - i.container.ID, - types.ExecConfig{ - Cmd: fullCmd, - }, - ) - panicIf(err) - err = i.cli.ContainerExecStart(context.Background(), id.ID, types.ExecStartCheck{}) - panicIf(err) -} - // Cluttering code with if err != nil is absolute nonsense. func panicIf(err error) { if err != nil { panic(err) } } + +func fatalIf(t *testing.T, err error) { + if err != nil { + t.Fatalf("error: %s", err) + } +} + +// Performs a best-effort attempt to find the project's Docker Compose files. +func findDockerComposePath() string { + // Lets you check if a particular directory contains a file. + directoryContainsFilename := func(dir string, filename string) bool { + files, innerErr := ioutil.ReadDir(dir) + panicIf(innerErr) + + for _, file := range files { + if file.Name() == filename { + return true + } + } + + return false + } + + current, err := os.Getwd() + panicIf(err) + + // + // We have a primary and backup attempt for finding the necessary docker + // files: via $GOPATH and via local directory traversal. + // + + if gopath := os.Getenv("GOPATH"); gopath != "" { + monorepo := path.Join(gopath, "stellar", "go") + if _, err = os.Stat(monorepo); !os.IsNotExist(err) { + current = monorepo + } + } + + // In either case, we try to walk up the tree until we find "go.mod", + // which we hope is the root directory of the project. + for !directoryContainsFilename(current, "go.mod") { + current, err = filepath.Abs(path.Join(current, "..")) + + // FIXME: This only works on *nix-like systems. + if err != nil || filepath.Base(current)[0] == filepath.Separator { + fmt.Println("Failed to establish project root directory.") + panic(err) + } + } + + // Directly jump down to the folder that should contain the configs + return path.Join(current, "services", "horizon", "docker") +}