From 69fa7cf93359d7ae8f8277596604a1b50221404e Mon Sep 17 00:00:00 2001 From: Adam Stokes <51892+adam-stokes@users.noreply.github.com> Date: Wed, 21 Apr 2021 11:22:36 -0400 Subject: [PATCH] v2 refactor (#1008) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Move kibana into internals, update fleet test suite * migrate docker-compose related code to internal layout * move docker related code to internal layout * move git related code to internal layout * move common attributes into internal common file system layout * move elasticsearch specifics into its own filesystem layout * move installer based code to internal layout * move shell related code to internal layout * move sanitizer code to internal layout * move io related code to internal layout * move utils into internal layout * Update package integration querying/altering * move curl to internal layout * move helm to internal layout * move kubectl into internal layout * move state internal filesystem * cleanup config in stand-alone * remove unused files * Uniquify the stand-alone step for checking agent status (#993) There were 2 steps identical in both the stand-alone and fleet test suites. Running the stand-alone test suite was picking up the step from the fleet test suite and trying to reference the FleetTestSuite structure which did not hold any of the agent information (like the hostname) for the stand alone tests. This fixes it so that the standalone test step is being referenced in the correct test suite. * Remove the agent config file parameters for stand alone (#983) * Update helm/metricbeat tests to use new layout * Fix policy endpoint update * fix panic on helm init * Fix step reference as this being merged seperately * Update function call to correct standalone step * Fix merge conflict * update ProfileEnv query/set for KibanaVersion * More fixes to agent endpoint security checks * update backend feature to call out endpoint in step * use common.TimeoutFactor in docker checkprocess state * Update adding endpoint integration * enable features for fleet server * not necessary to enroll after install * wait for filebeat/metricbeat before restarts * clear out fts.CurrentToken during beforeScenario * attach system integration on deploy * enroll if rpm * dont store fleet policy * update kibana config for latest fleet server * Update e2e/_suites/fleet/fleet.go * Update e2e/_suites/fleet/fleet.go * Update e2e/_suites/fleet/fleet.go * Update .pre-commit-config.yaml * Update e2e/Makefile * rename apt -> deb for installer type * execute docker start/stop with timeout between * fixes fleet_server scenario * Utilize fleet server in all tests * Fix enrollment url for fleet server * Query elasticsearch logs for endpoint security event changes * Increase search result size for ES * Fix issue with fleet server restarting continuously * unpin kibana pr now that most major breakage is resolved * force unenroll * for new fleet bootstrap on re-enrollment * Fix unenrollment * Add timeout safeguard to elastic-agent execution In some cases such as attempting to re-enroll with a revoked token, the elastic-agent will retry indefinitely. This fix adds a safeguard utilizing 'timeout' command prepended to the elastic-agent command so that it will timeout after TimeoutFactor Signed-off-by: Adam Stokes <51892+adam-stokes@users.noreply.github.com> Co-authored-by: Manuel de la Peña (cherry picked from commit 5f59670924df6deaa8d37e59e507c155ba6db529) # Conflicts: # e2e/Makefile # e2e/_suites/fleet/ingest_manager_test.go # e2e/_suites/fleet/stand-alone.go # e2e/_suites/fleet/world.go --- cli/cmd/deploy.go | 6 +- cli/cmd/run.go | 6 +- cli/cmd/stop.go | 6 +- cli/cmd/sync.go | 12 +- cli/cmd/sync_test.go | 6 +- cli/config/config.go | 4 +- cli/config/config_test.go | 2 +- cli/services/kibana.go | 311 ------ cli/services/kibana_test.go | 43 - e2e/Makefile | 3 + .../agent_endpoint_integration.feature | 8 +- .../fleet/features/backend_processes.feature | 4 +- e2e/_suites/fleet/fleet.go | 997 +++++------------- e2e/_suites/fleet/fleet_server.go | 100 -- e2e/_suites/fleet/ingest_manager_test.go | 114 +- e2e/_suites/fleet/installers.go | 456 -------- e2e/_suites/fleet/integrations.go | 405 ------- e2e/_suites/fleet/services.go | 453 -------- e2e/_suites/fleet/stand-alone.go | 105 +- e2e/_suites/fleet/world.go | 13 +- e2e/_suites/helm/helm_charts_test.go | 77 +- e2e/_suites/metricbeat/metricbeat_test.go | 81 +- e2e/steps/befores.go | 6 +- e2e/steps/configurations.go | 6 +- e2e/steps/processes.go | 42 - e2e/steps/services.go | 65 -- internal/common/defaults.go | 43 + internal/common/retry.go | 35 + .../manager.go => internal/compose/compose.go | 32 +- {cli/shell => internal/curl}/curl.go | 2 +- {cli => internal}/docker/docker.go | 157 ++- {e2e => internal/elasticsearch}/assertions.go | 4 +- .../elasticsearch/client.go | 24 +- internal/git/git.go | 187 ++++ internal/git/git_test.go | 103 ++ {cli/services => internal/helm}/helm.go | 14 +- internal/installer/base.go | 151 +++ .../installer/base_test.go | 2 +- internal/installer/deb.go | 132 +++ internal/installer/docker.go | 183 ++++ internal/installer/elasticagent.go | 145 +++ internal/installer/rpm.go | 136 +++ internal/installer/tar.go | 216 ++++ internal/io/io.go | 207 ++++ internal/io/io_test.go | 27 + internal/kibana/agents.go | 252 +++++ internal/kibana/client.go | 94 ++ internal/kibana/client_test.go | 24 + internal/kibana/fleet.go | 102 ++ internal/kibana/integrations.go | 313 ++++++ internal/kibana/policies.go | 149 +++ internal/kibana/server.go | 304 ++++++ internal/kibana/url_prefixes.go | 16 + {cli/services => internal/kubectl}/kubectl.go | 4 +- .../sanitizer}/sanitizer.go | 6 +- .../sanitizer}/sanitizer_test.go | 6 +- {cli => internal}/shell/shell.go | 0 {cli => internal}/shell/shell_test.go | 0 internal/state/state.go | 120 +++ internal/state/state_test.go | 70 ++ {e2e => internal/utils}/utils.go | 124 +-- {e2e => internal/utils}/utils_test.go | 2 +- 62 files changed, 3789 insertions(+), 2928 deletions(-) delete mode 100644 cli/services/kibana.go delete mode 100644 cli/services/kibana_test.go delete mode 100644 e2e/_suites/fleet/installers.go delete mode 100644 e2e/_suites/fleet/integrations.go delete mode 100644 e2e/_suites/fleet/services.go delete mode 100644 e2e/steps/processes.go delete mode 100644 e2e/steps/services.go create mode 100644 internal/common/defaults.go create mode 100644 internal/common/retry.go rename cli/services/manager.go => internal/compose/compose.go (87%) rename {cli/shell => internal/curl}/curl.go (99%) rename {cli => internal}/docker/docker.go (63%) rename {e2e => internal/elasticsearch}/assertions.go (97%) rename e2e/elasticsearch.go => internal/elasticsearch/client.go (93%) create mode 100644 internal/git/git.go create mode 100644 internal/git/git_test.go rename {cli/services => internal/helm}/helm.go (90%) create mode 100644 internal/installer/base.go rename e2e/_suites/fleet/services_test.go => internal/installer/base_test.go (99%) create mode 100644 internal/installer/deb.go create mode 100644 internal/installer/docker.go create mode 100644 internal/installer/elasticagent.go create mode 100644 internal/installer/rpm.go create mode 100644 internal/installer/tar.go create mode 100644 internal/io/io.go create mode 100644 internal/io/io_test.go create mode 100644 internal/kibana/agents.go create mode 100644 internal/kibana/client.go create mode 100644 internal/kibana/client_test.go create mode 100644 internal/kibana/fleet.go create mode 100644 internal/kibana/integrations.go create mode 100644 internal/kibana/policies.go create mode 100644 internal/kibana/server.go create mode 100644 internal/kibana/url_prefixes.go rename {cli/services => internal/kubectl}/kubectl.go (98%) rename {cli/services => internal/sanitizer}/sanitizer.go (88%) rename {cli/services => internal/sanitizer}/sanitizer_test.go (77%) rename {cli => internal}/shell/shell.go (100%) rename {cli => internal}/shell/shell_test.go (100%) create mode 100644 internal/state/state.go create mode 100644 internal/state/state_test.go rename {e2e => internal/utils}/utils.go (83%) rename {e2e => internal/utils}/utils_test.go (99%) diff --git a/cli/cmd/deploy.go b/cli/cmd/deploy.go index fa675c64e3..9522f1e1b4 100644 --- a/cli/cmd/deploy.go +++ b/cli/cmd/deploy.go @@ -8,7 +8,7 @@ import ( "context" "github.com/elastic/e2e-testing/cli/config" - "github.com/elastic/e2e-testing/cli/services" + "github.com/elastic/e2e-testing/internal/compose" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -63,7 +63,7 @@ func buildDeployServiceCommand(srv string) *cobra.Command { Short: `Deploys a ` + srv + ` service`, Long: `Deploys a ` + srv + ` service, adding it to a running profile, identified by its name`, Run: func(cmd *cobra.Command, args []string) { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() env := map[string]string{} env = config.PutServiceEnvironment(env, srv, versionToRun) @@ -85,7 +85,7 @@ func buildUndeployServiceCommand(srv string) *cobra.Command { Short: `Undeploys a ` + srv + ` service`, Long: `Undeploys a ` + srv + ` service, removing it from a running profile, identified by its name`, Run: func(cmd *cobra.Command, args []string) { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() env := map[string]string{} env = config.PutServiceEnvironment(env, srv, versionToRun) diff --git a/cli/cmd/run.go b/cli/cmd/run.go index 59e824f28c..e67b6d5229 100644 --- a/cli/cmd/run.go +++ b/cli/cmd/run.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/elastic/e2e-testing/cli/config" - "github.com/elastic/e2e-testing/cli/services" + "github.com/elastic/e2e-testing/internal/compose" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -63,7 +63,7 @@ func buildRunServiceCommand(srv string) *cobra.Command { Short: `Runs a ` + srv + ` service`, Long: `Runs a ` + srv + ` service, spinning up a Docker container for it and exposing its internal configuration so that you are able to connect to it in an easy manner`, Run: func(cmd *cobra.Command, args []string) { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() env := config.PutServiceEnvironment(map[string]string{}, srv, versionToRun) @@ -91,7 +91,7 @@ func buildRunProfileCommand(key string, profile config.Profile) *cobra.Command { Short: `Runs the ` + profile.Name + ` profile`, Long: `Runs the ` + profile.Name + ` profile, spinning up the Services that compound it`, Run: func(cmd *cobra.Command, args []string) { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() env := map[string]string{ "profileVersion": versionToRun, diff --git a/cli/cmd/stop.go b/cli/cmd/stop.go index 35f3681cda..20c2deb323 100644 --- a/cli/cmd/stop.go +++ b/cli/cmd/stop.go @@ -8,7 +8,7 @@ import ( "context" "github.com/elastic/e2e-testing/cli/config" - "github.com/elastic/e2e-testing/cli/services" + "github.com/elastic/e2e-testing/internal/compose" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -55,7 +55,7 @@ func buildStopServiceCommand(srv string) *cobra.Command { Short: `Stops a ` + srv + ` service`, Long: `Stops a ` + srv + ` service, stoppping its Docker container`, Run: func(cmd *cobra.Command, args []string) { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() err := serviceManager.StopCompose(context.Background(), false, []string{srv}) if err != nil { @@ -73,7 +73,7 @@ func buildStopProfileCommand(key string, profile config.Profile) *cobra.Command Short: `Stops the ` + profile.Name + ` profile`, Long: `Stops the ` + profile.Name + ` profile, stopping the Services that compound it`, Run: func(cmd *cobra.Command, args []string) { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() err := serviceManager.StopCompose(context.Background(), true, []string{key}) if err != nil { diff --git a/cli/cmd/sync.go b/cli/cmd/sync.go index a423df0da7..aaf7c5ce77 100644 --- a/cli/cmd/sync.go +++ b/cli/cmd/sync.go @@ -13,9 +13,9 @@ import ( "strings" "github.com/elastic/e2e-testing/cli/config" - git "github.com/elastic/e2e-testing/cli/internal" - io "github.com/elastic/e2e-testing/cli/internal" - "github.com/elastic/e2e-testing/cli/services" + git "github.com/elastic/e2e-testing/internal/git" + io "github.com/elastic/e2e-testing/internal/io" + "github.com/elastic/e2e-testing/internal/sanitizer" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -197,7 +197,7 @@ func copyIntegrationsComposeFiles(beats git.Project, pattern string, target stri } type service interface{} -type compose struct { +type composeFile struct { Version string `yaml:"version"` Services map[string]service `yaml:"services"` } @@ -212,7 +212,7 @@ func sanitizeComposeFile(composeFilePath string, targetFilePath string) error { return err } - c := compose{} + c := composeFile{} err = yaml.Unmarshal(bytes, &c) if err != nil { log.WithFields(log.Fields{ @@ -276,7 +276,7 @@ func sanitizeConfigurationFile(serviceName string, configFilePath string) error // prepend modules header content = "metricbeat.modules:\n" + content - serviceSanitizer := services.GetConfigSanitizer(serviceName) + serviceSanitizer := sanitizer.GetConfigSanitizer(serviceName) content = serviceSanitizer.Sanitize(content) log.WithFields(log.Fields{ diff --git a/cli/cmd/sync_test.go b/cli/cmd/sync_test.go index e44eee27c0..4202d27e5b 100644 --- a/cli/cmd/sync_test.go +++ b/cli/cmd/sync_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/Flaque/filet" - io "github.com/elastic/e2e-testing/cli/internal" + "github.com/elastic/e2e-testing/internal/io" "github.com/stretchr/testify/assert" "gopkg.in/yaml.v2" ) @@ -28,7 +28,7 @@ func TestSanitizeComposeFile_Multiple(t *testing.T) { bytes, err := io.ReadFile(target) assert.Nil(t, err) - c := compose{} + c := composeFile{} err = yaml.Unmarshal(bytes, &c) assert.Nil(t, err) @@ -73,7 +73,7 @@ func TestSanitizeComposeFile_Single(t *testing.T) { bytes, err := io.ReadFile(target) assert.Nil(t, err) - c := compose{} + c := composeFile{} err = yaml.Unmarshal(bytes, &c) assert.Nil(t, err) diff --git a/cli/config/config.go b/cli/config/config.go index 1e0400ab2d..33cb8b776c 100644 --- a/cli/config/config.go +++ b/cli/config/config.go @@ -11,8 +11,8 @@ import ( "path/filepath" "strings" - io "github.com/elastic/e2e-testing/cli/internal" - shell "github.com/elastic/e2e-testing/cli/shell" + io "github.com/elastic/e2e-testing/internal/io" + shell "github.com/elastic/e2e-testing/internal/shell" packr "github.com/gobuffalo/packr/v2" homedir "github.com/mitchellh/go-homedir" diff --git a/cli/config/config_test.go b/cli/config/config_test.go index a0ef09b858..ecb8db5e65 100644 --- a/cli/config/config_test.go +++ b/cli/config/config_test.go @@ -11,7 +11,7 @@ import ( "strings" "testing" - io "github.com/elastic/e2e-testing/cli/internal" + "github.com/elastic/e2e-testing/internal/io" "github.com/Flaque/filet" "github.com/sirupsen/logrus" diff --git a/cli/services/kibana.go b/cli/services/kibana.go deleted file mode 100644 index 8d91d6911a..0000000000 --- a/cli/services/kibana.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package services - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "strings" - "time" - - backoff "github.com/cenkalti/backoff/v4" - curl "github.com/elastic/e2e-testing/cli/shell" - log "github.com/sirupsen/logrus" - "go.elastic.co/apm" -) - -// KibanaBaseURL All URLs running on localhost as Kibana is expected to be exposed there -const kibanaBaseURL = "http://localhost:5601" - -const endpointMetadataURL = "/api/endpoint/metadata" - -const ingestManagerAgentPoliciesURL = "/api/fleet/agent_policies" -const ingestManagerAgentPolicyURL = ingestManagerAgentPoliciesURL + "/%s" - -const ingestManagerIntegrationDeleteURL = "/api/fleet/package_policies/delete" -const ingestManagerIntegrationPoliciesURL = "/api/fleet/package_policies" -const ingestManagerIntegrationPolicyURL = ingestManagerIntegrationPoliciesURL + "/%s" - -const ingestManagerIntegrationsURL = "/api/fleet/epm/packages?experimental=true&category=" -const ingestManagerIntegrationURL = "/api/fleet/epm/packages/%s-%s" - -// KibanaClient manages calls to Kibana APIs -type KibanaClient struct { - baseURL string - url string -} - -// NewKibanaClient returns a kibana client -func NewKibanaClient() *KibanaClient { - return &KibanaClient{ - baseURL: kibanaBaseURL, - } -} - -func (k *KibanaClient) getURL() string { - return k.baseURL + k.url -} - -func (k *KibanaClient) withURL(path string) *KibanaClient { - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - - k.url = path - - return k -} - -// AddIntegrationToPolicy sends a POST request to add an integration to a policy -func (k *KibanaClient) AddIntegrationToPolicy(policy interface{}) (string, error) { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(&policy); err != nil { - return "", err - } - payload := buf.String() - k.withURL(ingestManagerIntegrationPoliciesURL) - - postReq := createDefaultHTTPRequest(k.getURL()) - postReq.Payload = payload - - body, err := curl.Post(postReq) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": k.getURL(), - "payload": payload, - "policy": policy, - }).Error("Could not add integration to configuration") - return "", err - } - - return body, err -} - -// DeleteIntegrationFromPolicy sends a POST request to delete an integration from policy -func (k *KibanaClient) DeleteIntegrationFromPolicy(packageConfigID string) (string, error) { - payload := `{"packagePolicyIds":["` + packageConfigID + `"]}` - - k.withURL(ingestManagerIntegrationDeleteURL) - - postReq := createDefaultHTTPRequest(k.getURL()) - postReq.Payload = payload - - body, err := curl.Post(postReq) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": k.getURL(), - "payload": payload, - }).Error("Could not delete integration from configuration") - return "", err - } - - return body, err -} - -// GetBaseURL retrieves the base URl where Kibana is listening -func (k *KibanaClient) GetBaseURL() string { - return k.baseURL -} - -// GetIntegration sends a GET request to fetch an integration by name and version -func (k *KibanaClient) GetIntegration(packageName string, version string) (string, error) { - k.withURL(fmt.Sprintf(ingestManagerIntegrationURL, packageName, version)) - - getReq := createDefaultHTTPRequest(k.getURL()) - - body, err := curl.Get(getReq) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": k.getURL(), - }).Error("Could not get the integration from Package Registry") - return "", err - } - - return body, err -} - -// GetIntegrationFromAgentPolicy sends a GET request to fetch an integration from a policy -func (k *KibanaClient) GetIntegrationFromAgentPolicy(agentPolicyID string) (string, error) { - k.withURL(fmt.Sprintf(ingestManagerAgentPolicyURL, agentPolicyID)) - - getReq := createDefaultHTTPRequest(k.getURL()) - - body, err := curl.Get(getReq) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "policyID": agentPolicyID, - "url": k.getURL(), - }).Error("Could not get integration packages from the policy") - return "", err - } - - return body, err -} - -// GetIntegrations sends a GET request to fetch latest version for all installed integrations -func (k *KibanaClient) GetIntegrations() (string, error) { - k.withURL(ingestManagerIntegrationsURL) - - getReq := createDefaultHTTPRequest(k.getURL()) - - body, err := curl.Get(getReq) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": k.getURL(), - }).Error("Could not get Integrations") - return "", err - } - - return body, err -} - -// GetMetadataFromSecurityApp sends a POST request to retrieve metadata from Security App -func (k *KibanaClient) GetMetadataFromSecurityApp() (string, error) { - k.withURL(endpointMetadataURL) - - postReq := createDefaultHTTPRequest(k.getURL()) - body, err := curl.Post(postReq) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": k.getURL(), - }).Error("Could not get endpoint metadata") - return "", err - } - - return body, err -} - -// InstallIntegrationAssets sends a POST request to Fleet installing the assets for an integration -func (k *KibanaClient) InstallIntegrationAssets(integration string, version string) (string, error) { - k.withURL(fmt.Sprintf(ingestManagerIntegrationURL, integration, version)) - - postReq := createDefaultHTTPRequest(k.getURL()) - - body, err := curl.Post(postReq) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": k.getURL(), - }).Error("Could not install assets for the integration") - return "", err - } - - return body, err -} - -// UpdateIntegrationPackageConfig sends a PUT request to Fleet updating integration -// configuration -func (k *KibanaClient) UpdateIntegrationPackageConfig(packageConfigID string, payload string) (string, error) { - k.withURL(fmt.Sprintf(ingestManagerIntegrationPolicyURL, packageConfigID)) - - putReq := createDefaultHTTPRequest(k.getURL()) - putReq.Payload = payload - - body, err := curl.Put(putReq) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": k.getURL(), - }).Error("Could not update integration configuration") - return "", err - } - - return body, err -} - -// WaitForKibana waits for kibana running in localhost:5601 to be healthy, returning false -// if kibana does not get healthy status in a defined number of minutes. -func (k *KibanaClient) WaitForKibana(ctx context.Context, maxTimeoutMinutes time.Duration) (bool, error) { - k.withURL("/status") - - var ( - initialInterval = 500 * time.Millisecond - randomizationFactor = 0.5 - multiplier = 2.0 - maxInterval = 5 * time.Second - maxElapsedTime = maxTimeoutMinutes - ) - - exp := backoff.NewExponentialBackOff() - exp.InitialInterval = initialInterval - exp.RandomizationFactor = randomizationFactor - exp.Multiplier = multiplier - exp.MaxInterval = maxInterval - exp.MaxElapsedTime = maxElapsedTime - - retryCount := 1 - - kibanaStatus := func() error { - span, _ := apm.StartSpanOptions(ctx, "Health", "kibana.health", apm.SpanOptions{ - Parent: apm.SpanFromContext(ctx).TraceContext(), - }) - defer span.End() - - r := curl.HTTPRequest{ - BasicAuthUser: "elastic", - BasicAuthPassword: "changeme", - URL: k.getURL(), - } - - _, err := curl.Get(r) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "retry": retryCount, - "statusEndpoint": r.URL, - "elapsedTime": exp.GetElapsedTime(), - }).Warn("The Kibana instance is not healthy yet") - - retryCount++ - - return err - } - - log.WithFields(log.Fields{ - "retries": retryCount, - "statusEndpoint": r.URL, - "elapsedTime": exp.GetElapsedTime(), - }).Info("The Kibana instance is healthy") - - return nil - } - - err := backoff.Retry(kibanaStatus, exp) - if err != nil { - return false, err - } - - return true, nil -} - -// createDefaultHTTPRequest Creates a default HTTP request, including the basic auth, -// JSON content type header, and a specific header that is required by Kibana -func createDefaultHTTPRequest(url string) curl.HTTPRequest { - return curl.HTTPRequest{ - BasicAuthUser: "elastic", - BasicAuthPassword: "changeme", - Headers: map[string]string{ - "Content-Type": "application/json", - "kbn-xsrf": "e2e-tests", - }, - URL: url, - } -} diff --git a/cli/services/kibana_test.go b/cli/services/kibana_test.go deleted file mode 100644 index 7d2bdf7e56..0000000000 --- a/cli/services/kibana_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package services - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGetBaseURL(t *testing.T) { - client := NewKibanaClient() - assert.NotNil(t, client) - - assert.Equal(t, "http://localhost:5601", client.GetBaseURL()) -} - -func TestNewClient(t *testing.T) { - client := NewKibanaClient() - - assert.NotNil(t, client) - assert.Equal(t, "http://localhost:5601", client.getURL()) -} - -func TestNewKibanaClientWithPathStartingWithSlash(t *testing.T) { - client := NewKibanaClient().withURL("/with_slash") - assert.NotNil(t, client) - assert.Equal(t, "http://localhost:5601/with_slash", client.getURL()) -} - -func TestNewKibanaClientWithPathStartingWithoutSlash(t *testing.T) { - client := NewKibanaClient().withURL("without_slash") - assert.NotNil(t, client) - assert.Equal(t, "http://localhost:5601/without_slash", client.getURL()) -} - -func TestNewKibanaClientWithMultiplePathsKeepsLastOne(t *testing.T) { - client := NewKibanaClient().withURL("/with_slash").withURL("lastOne") - assert.NotNil(t, client) - assert.Equal(t, "http://localhost:5601/lastOne", client.getURL()) -} diff --git a/e2e/Makefile b/e2e/Makefile index 380e5b685a..2737618994 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -83,7 +83,10 @@ functional-test: install-godog ELASTIC_APM_ENVIRONMENT="${ELASTIC_APM_ENVIRONMENT}" \ ELASTIC_APM_SECRET_TOKEN="${APM_SECRET_TOKEN}" \ ELASTIC_APM_SERVER_URL="${APM_SERVER_URL}" \ +<<<<<<< HEAD KIBANA_VERSION=pr97021 \ +======= +>>>>>>> 5f596709... v2 refactor (#1008) godog --format=${FORMAT} ${TAGS_FLAG} ${TAGS_VALUE} .PHONY: lint diff --git a/e2e/_suites/fleet/features/agent_endpoint_integration.feature b/e2e/_suites/fleet/features/agent_endpoint_integration.feature index d74de99922..9dd1986863 100644 --- a/e2e/_suites/fleet/features/agent_endpoint_integration.feature +++ b/e2e/_suites/fleet/features/agent_endpoint_integration.feature @@ -22,7 +22,7 @@ Examples: Debian @endpoint-policy-check Scenario Outline: Deploying an Endpoint makes policies to appear in the Security App - When an Endpoint is successfully deployed with a "" Agent using "tar" installer + When an "Endpoint" is successfully deployed with a "" Agent using "tar" installer Then the policy response will be shown in the Security App @centos @@ -37,7 +37,7 @@ Examples: Debian @set-policy-and-check-changes Scenario Outline: Changing an Agent policy is reflected in the Security App - Given an Endpoint is successfully deployed with a "" Agent using "tar" installer + Given an "Endpoint" is successfully deployed with a "" Agent using "tar" installer When the policy is updated to have "malware" in "detect" mode Then the policy will reflect the change in the Security App @@ -53,7 +53,7 @@ Examples: Debian @deploy-endpoint-then-unenroll-agent Scenario Outline: Un-enrolling Elastic Agent stops Elastic Endpoint - Given an Endpoint is successfully deployed with a "" Agent using "tar" installer + Given an "Endpoint" is successfully deployed with a "" Agent using "tar" installer When the agent is un-enrolled Then the agent is listed in Fleet as "inactive" And the host name is not shown in the Administration view in the Security App @@ -70,7 +70,7 @@ Examples: Debian @deploy-endpoint-then-remove-it-from-policy Scenario Outline: Removing Endpoint from Agent policy stops the connected Endpoint - Given an Endpoint is successfully deployed with a "" Agent using "tar" installer + Given an "Endpoint" is successfully deployed with a "" Agent using "tar" installer When the "Endpoint Security" integration is "removed" in the policy Then the agent is listed in Fleet as "online" But the host name is not shown in the Administration view in the Security App diff --git a/e2e/_suites/fleet/features/backend_processes.feature b/e2e/_suites/fleet/features/backend_processes.feature index 5e61d0e9f3..35df13c32e 100644 --- a/e2e/_suites/fleet/features/backend_processes.feature +++ b/e2e/_suites/fleet/features/backend_processes.feature @@ -144,7 +144,7 @@ Examples: Debian @deploy-endpoint-then-unenroll-agent Scenario Outline: Un-enrolling Elastic Agent stops Elastic Endpoint - Given an Endpoint is successfully deployed with a "" Agent using "tar" installer + Given an "Endpoint" is successfully deployed with a "" Agent using "tar" installer When the agent is un-enrolled Then the "elastic-endpoint" process is in the "stopped" state on the host @@ -160,7 +160,7 @@ Examples: Debian @deploy-endpoint-then-remove-it-from-policy Scenario Outline: Removing Endpoint from Agent policy stops the connected Endpoint - Given an Endpoint is successfully deployed with a "" Agent using "tar" installer + Given an "Endpoint" is successfully deployed with a "" Agent using "tar" installer When the "Endpoint Security" integration is "removed" in the policy Then the "elastic-endpoint" process is in the "stopped" state on the host diff --git a/e2e/_suites/fleet/fleet.go b/e2e/_suites/fleet/fleet.go index eb9565f972..f632d9a21f 100644 --- a/e2e/_suites/fleet/fleet.go +++ b/e2e/_suites/fleet/fleet.go @@ -10,58 +10,53 @@ import ( "strings" "time" - "github.com/Jeffail/gabs/v2" "github.com/cenkalti/backoff/v4" "github.com/cucumber/godog" - "github.com/elastic/e2e-testing/cli/services" - curl "github.com/elastic/e2e-testing/cli/shell" - shell "github.com/elastic/e2e-testing/cli/shell" - "github.com/elastic/e2e-testing/e2e" - "github.com/elastic/e2e-testing/e2e/steps" + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/compose" + "github.com/elastic/e2e-testing/internal/docker" + "github.com/elastic/e2e-testing/internal/elasticsearch" + "github.com/elastic/e2e-testing/internal/installer" + "github.com/elastic/e2e-testing/internal/kibana" + "github.com/elastic/e2e-testing/internal/shell" + "github.com/elastic/e2e-testing/internal/utils" "github.com/google/uuid" "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) -const fleetAgentsURL = kibanaBaseURL + "/api/fleet/agents" -const fleetAgentEventsURL = kibanaBaseURL + "/api/fleet/agents/%s/events" -const fleetAgentsUnEnrollURL = kibanaBaseURL + "/api/fleet/agents/%s/unenroll" -const fleetAgentUpgradeURL = kibanaBaseURL + "/api/fleet/agents/%s/upgrade" -const fleetEnrollmentTokenURL = kibanaBaseURL + "/api/fleet/enrollment-api-keys" -const fleetSetupURL = kibanaBaseURL + "/api/fleet/agents/setup" -const ingestManagerAgentPoliciesURL = kibanaBaseURL + "/api/fleet/agent_policies" -const ingestManagerDataStreamsURL = kibanaBaseURL + "/api/fleet/data_streams" - const actionADDED = "added" const actionREMOVED = "removed" // FleetTestSuite represents the scenarios for Fleet-mode type FleetTestSuite struct { - Image string // base image used to install the agent - InstallerType string - Installers map[string]ElasticAgentInstaller + // integrations Cleanup bool - ElasticAgentStopped bool // will be used to signal when the agent process can be called again in the tear-down stage - PolicyID string // will be used to manage tokens CurrentToken string // current enrollment token CurrentTokenID string // current enrollment tokenID + ElasticAgentStopped bool // will be used to signal when the agent process can be called again in the tear-down stage Hostname string // the hostname of the container + Image string // base image used to install the agent + InstallerType string + Installers map[string]installer.ElasticAgentInstaller + Integration kibana.IntegrationPackage // the installed integration + Policy kibana.Policy + FleetPolicy kibana.Policy + PolicyUpdatedAt string // the moment the policy was updated Version string // current elastic-agent version - // integrations - Integration IntegrationPackage // the installed integration - PolicyUpdatedAt string // the moment the policy was updated + kibanaClient *kibana.Client } // afterScenario destroys the state created by a scenario func (fts *FleetTestSuite) afterScenario() { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() serviceName := fts.Image if log.IsLevelEnabled(log.DebugLevel) { - installer := fts.getInstaller() + agentInstaller := fts.getInstaller() - err := installer.PrintLogsFn(fts.Hostname) + err := agentInstaller.PrintLogsFn(fts.Hostname) if err != nil { log.WithFields(log.Fields{ "containerName": fts.Hostname, @@ -71,14 +66,14 @@ func (fts *FleetTestSuite) afterScenario() { // only call it when the elastic-agent is present if !fts.ElasticAgentStopped { - err := installer.UninstallFn() + err := agentInstaller.UninstallFn() if err != nil { log.Warnf("Could not uninstall the agent after the scenario: %v", err) } } } - err := fts.unenrollHostname(true) + err := fts.unenrollHostname() if err != nil { log.WithFields(log.Fields{ "err": err, @@ -86,13 +81,14 @@ func (fts *FleetTestSuite) afterScenario() { }).Warn("The agentIDs for the hostname could not be unenrolled") } + developerMode := shell.GetEnvBool("DEVELOPER_MODE") if !developerMode { - _ = serviceManager.RemoveServicesFromCompose(context.Background(), FleetProfileName, []string{serviceName + "-systemd"}, profileEnv) + _ = serviceManager.RemoveServicesFromCompose(context.Background(), common.FleetProfileName, []string{serviceName + "-systemd"}, common.ProfileEnv) } else { log.WithField("service", serviceName).Info("Because we are running in development mode, the service won't be stopped") } - err = fts.removeToken() + err = fts.kibanaClient.DeleteEnrollmentAPIKey(fts.CurrentTokenID) if err != nil { log.WithFields(log.Fields{ "err": err, @@ -100,17 +96,30 @@ func (fts *FleetTestSuite) afterScenario() { }).Warn("The enrollment token could not be deleted") } - err = deleteIntegrationFromPolicy(fts.Integration, fts.PolicyID) + // Cleanup all package policies + packagePolicies, err := fts.kibanaClient.ListPackagePolicies() if err != nil { log.WithFields(log.Fields{ - "err": err, - "packageConfigID": fts.Integration.packageConfigID, - "configurationID": fts.PolicyID, - }).Warn("The integration could not be deleted from the configuration") + "err": err, + "policy": fts.FleetPolicy, + }).Error("The package policies could not be found") + } + for _, pkgPolicy := range packagePolicies { + // Do not remove the fleet server package integration otherwise fleet server fails to bootstrap + if !strings.Contains(pkgPolicy.Name, "fleet_server") && pkgPolicy.PolicyID == fts.FleetPolicy.ID { + err = fts.kibanaClient.DeleteIntegrationFromPolicy(pkgPolicy) + if err != nil { + log.WithFields(log.Fields{ + "err": err, + "packagePolicy": pkgPolicy, + }).Error("The integration could not be deleted from the configuration") + } + } } // clean up fields fts.CurrentTokenID = "" + fts.CurrentToken = "" fts.Image = "" fts.Hostname = "" } @@ -120,23 +129,29 @@ func (fts *FleetTestSuite) beforeScenario() { fts.Cleanup = false fts.ElasticAgentStopped = false - fts.Version = agentVersion + fts.Version = common.AgentVersion - // create policy with system monitoring enabled - defaultPolicy, err := getAgentDefaultPolicy("is_default") + policy, err := fts.kibanaClient.GetDefaultPolicy(false) if err != nil { log.WithFields(log.Fields{ "err": err, }).Warn("The default policy could not be obtained") - return } + fts.Policy = policy + + fleetPolicy, err := fts.kibanaClient.GetDefaultPolicy(true) + if err != nil { + log.WithFields(log.Fields{ + "err": err, + }).Warn("The default fleet server policy could not be obtained") + } + fts.FleetPolicy = fleetPolicy - fts.PolicyID = defaultPolicy.Path("id").Data().(string) } func (fts *FleetTestSuite) contributeSteps(s *godog.ScenarioContext) { - s.Step(`^a "([^"]*)" agent is deployed to Fleet with "([^"]*)" installer$`, fts.anAgentIsDeployedToFleetWithInstaller) + s.Step(`^a "([^"]*)" agent is deployed to Fleet with "([^"]*)" installer$`, fts.anAgentIsDeployedToFleetWithInstallerInFleetMode) s.Step(`^a "([^"]*)" agent "([^"]*)" is deployed to Fleet with "([^"]*)" installer$`, fts.anStaleAgentIsDeployedToFleetWithInstaller) s.Step(`^agent is in version "([^"]*)"$`, fts.agentInVersion) s.Step(`^agent is upgraded to version "([^"]*)"$`, fts.anAgentIsUpgraded) @@ -157,7 +172,7 @@ func (fts *FleetTestSuite) contributeSteps(s *godog.ScenarioContext) { s.Step(`^the "([^"]*)" datasource is shown in the policy as added$`, fts.thePolicyShowsTheDatasourceAdded) s.Step(`^the host name is shown in the Administration view in the Security App as "([^"]*)"$`, fts.theHostNameIsShownInTheAdminViewInTheSecurityApp) s.Step(`^the host name is not shown in the Administration view in the Security App$`, fts.theHostNameIsNotShownInTheAdminViewInTheSecurityApp) - s.Step(`^an Endpoint is successfully deployed with a "([^"]*)" Agent using "([^"]*)" installer$`, fts.anEndpointIsSuccessfullyDeployedWithAgentAndInstalller) + s.Step(`^an "([^"]*)" is successfully deployed with a "([^"]*)" Agent using "([^"]*)" installer$`, fts.anIntegrationIsSuccessfullyDeployedWithAgentAndInstaller) s.Step(`^the policy response will be shown in the Security App$`, fts.thePolicyResponseWillBeShownInTheSecurityApp) s.Step(`^the policy is updated to have "([^"]*)" in "([^"]*)" mode$`, fts.thePolicyIsUpdatedToHaveMode) s.Step(`^the policy will reflect the change in the Security App$`, fts.thePolicyWillReflectTheChangeInTheSecurityApp) @@ -170,37 +185,37 @@ func (fts *FleetTestSuite) anStaleAgentIsDeployedToFleetWithInstaller(image, ver agentVersionBackup := fts.Version defer func() { fts.Version = agentVersionBackup }() - agentStaleVersion = shell.GetEnv("ELASTIC_AGENT_STALE_VERSION", agentStaleVersion) + common.AgentStaleVersion = shell.GetEnv("ELASTIC_AGENT_STALE_VERSION", common.AgentStaleVersion) // check if stale version is an alias - v, err := e2e.GetElasticArtifactVersion(agentStaleVersion) + v, err := utils.GetElasticArtifactVersion(common.AgentStaleVersion) if err != nil { log.WithFields(log.Fields{ "error": err, - "version": agentStaleVersion, + "version": common.AgentStaleVersion, }).Error("Failed to get stale version") return err } - agentStaleVersion = v + common.AgentStaleVersion = v useCISnapshots := shell.GetEnvBool("BEATS_USE_CI_SNAPSHOTS") - if useCISnapshots && !strings.HasSuffix(agentStaleVersion, "-SNAPSHOT") { - agentStaleVersion += "-SNAPSHOT" + if useCISnapshots && !strings.HasSuffix(common.AgentStaleVersion, "-SNAPSHOT") { + common.AgentStaleVersion += "-SNAPSHOT" } switch version { case "stale": - version = agentStaleVersion + version = common.AgentStaleVersion case "latest": - version = agentVersion + version = common.AgentVersion default: - version = agentStaleVersion + version = common.AgentStaleVersion } fts.Version = version // prepare installer for stale version if fts.Version != agentVersionBackup { - i := GetElasticAgentInstaller(image, installerType, fts.Version) + i := installer.GetElasticAgentInstaller(image, installerType, fts.Version) fts.Installers[fmt.Sprintf("%s-%s-%s", image, installerType, version)] = i } @@ -208,24 +223,24 @@ func (fts *FleetTestSuite) anStaleAgentIsDeployedToFleetWithInstaller(image, ver } func (fts *FleetTestSuite) installCerts() error { - installer := fts.getInstaller() - if installer.InstallCertsFn == nil { + agentInstaller := fts.getInstaller() + if agentInstaller.InstallCertsFn == nil { log.WithFields(log.Fields{ - "installer": installer, + "installer": agentInstaller, "version": fts.Version, - "agentVersion": agentVersion, - "agentStaleVersion": agentStaleVersion, + "agentVersion": common.AgentVersion, + "agentStaleVersion": common.AgentStaleVersion, }).Error("No installer found") return errors.New("no installer found") } - err := installer.InstallCertsFn() + err := agentInstaller.InstallCertsFn() if err != nil { log.WithFields(log.Fields{ - "agentVersion": agentVersion, - "agentStaleVersion": agentStaleVersion, + "agentVersion": common.AgentVersion, + "agentStaleVersion": common.AgentStaleVersion, "error": err, - "installer": installer, + "installer": agentInstaller, "version": fts.Version, }).Error("Could not install the certificates") return err @@ -237,45 +252,32 @@ func (fts *FleetTestSuite) installCerts() error { func (fts *FleetTestSuite) anAgentIsUpgraded(desiredVersion string) error { switch desiredVersion { case "stale": - desiredVersion = agentStaleVersion + desiredVersion = common.AgentStaleVersion case "latest": - desiredVersion = agentVersion + desiredVersion = common.AgentVersion default: - desiredVersion = agentVersion + desiredVersion = common.AgentVersion } - return fts.upgradeAgent(desiredVersion) + return fts.kibanaClient.UpgradeAgent(fts.Hostname, desiredVersion) } func (fts *FleetTestSuite) agentInVersion(version string) error { switch version { case "stale": - version = agentStaleVersion + version = common.AgentStaleVersion case "latest": - version = agentVersion + version = common.AgentVersion } agentInVersionFn := func() error { - agentID, err := getAgentID(fts.Hostname) - if err != nil { - return err - } - - r := createDefaultHTTPRequest(fleetAgentsURL + "/" + agentID) - body, err := curl.Get(r) + agent, err := fts.kibanaClient.GetAgentByHostname(fts.Hostname) if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": r.GetURL(), - }).Error("Could not get agent in Fleet") return err } - jsonResponse, err := gabs.ParseJSON([]byte(body)) - - retrievedVersion := jsonResponse.Path("item.local_metadata.elastic.agent.version").Data().(string) - if isSnapshot := jsonResponse.Path("item.local_metadata.elastic.agent.snapshot").Data().(bool); isSnapshot { + retrievedVersion := agent.LocalMetadata.Elastic.Agent.Version + if isSnapshot := agent.LocalMetadata.Elastic.Agent.Snapshot; isSnapshot { retrievedVersion += "-SNAPSHOT" } @@ -286,8 +288,8 @@ func (fts *FleetTestSuite) agentInVersion(version string) error { return nil } - maxTimeout := time.Duration(timeoutFactor) * time.Minute * 2 - exp := e2e.GetExponentialBackOff(maxTimeout) + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute * 2 + exp := common.GetExponentialBackOff(maxTimeout) return backoff.Retry(agentInVersionFn, exp) } @@ -307,40 +309,39 @@ func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstallerAndFleetServer(i fts.Image = image fts.InstallerType = installerType - installer := fts.getInstaller() + agentInstaller := fts.getInstaller() - profile := installer.profile // name of the runtime dependencies compose file + profile := agentInstaller.Profile // name of the runtime dependencies compose file - serviceName := ElasticAgentServiceName // name of the service + serviceName := common.ElasticAgentServiceName // name of the service containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", serviceName, 1) // name of the container - uuid := uuid.New().String() - // enroll the agent with a new token - tokenJSONObject, err := createFleetToken("Test token for "+uuid, fts.PolicyID) + enrollmentKey, err := fts.kibanaClient.CreateEnrollmentAPIKey(fts.FleetPolicy) if err != nil { return err } - fts.CurrentToken = tokenJSONObject.Path("api_key").Data().(string) - fts.CurrentTokenID = tokenJSONObject.Path("id").Data().(string) + fts.CurrentToken = enrollmentKey.APIKey + fts.CurrentTokenID = enrollmentKey.ID + + var fleetConfig *kibana.FleetConfig + fleetConfig, err = deployAgentToFleet(agentInstaller, containerName, fts.CurrentToken, bootstrapFleetServer) - var fleetConfig *FleetConfig - fleetConfig, err = deployAgentToFleet(installer, containerName, fts.CurrentToken, bootstrapFleetServer) fts.Cleanup = true if err != nil { return err } // the installation process for TAR includes the enrollment - if installer.installerType != "tar" { - err = installer.EnrollFn(fleetConfig) + if agentInstaller.InstallerType != "tar" { + err = agentInstaller.EnrollFn(fleetConfig) if err != nil { return err } } // get container hostname once - hostname, err := steps.GetContainerHostname(containerName) + hostname, err := docker.GetContainerHostname(containerName) if err != nil { return err } @@ -349,39 +350,50 @@ func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstallerAndFleetServer(i return err } -func (fts *FleetTestSuite) getInstaller() ElasticAgentInstaller { +func (fts *FleetTestSuite) getInstaller() installer.ElasticAgentInstaller { // check if the agent is already cached if i, exists := fts.Installers[fts.Image+"-"+fts.InstallerType+"-"+fts.Version]; exists { return i } - installer := GetElasticAgentInstaller(fts.Image, fts.InstallerType, fts.Version) + agentInstaller := installer.GetElasticAgentInstaller(fts.Image, fts.InstallerType, fts.Version) // cache the new installer - fts.Installers[fts.Image+"-"+fts.InstallerType+"-"+fts.Version] = installer + fts.Installers[fts.Image+"-"+fts.InstallerType+"-"+fts.Version] = agentInstaller - return installer + return agentInstaller } func (fts *FleetTestSuite) processStateChangedOnTheHost(process string, state string) error { - profile := FleetProfileName + profile := common.FleetProfileName - installer := fts.getInstaller() + agentInstaller := fts.getInstaller() - serviceName := installer.service // name of the service + serviceName := agentInstaller.Service // name of the service if state == "started" { - return systemctlRun(profile, installer.image, serviceName, "start") + return installer.SystemctlRun(profile, agentInstaller.Image, serviceName, "start") } else if state == "restarted" { - return systemctlRun(profile, installer.image, serviceName, "restart") + err := installer.SystemctlRun(profile, agentInstaller.Image, serviceName, "stop") + if err != nil { + return err + } + + utils.Sleep(time.Duration(common.TimeoutFactor) * 10 * time.Second) + + err = installer.SystemctlRun(profile, agentInstaller.Image, serviceName, "start") + if err != nil { + return err + } + return nil } else if state == "uninstalled" { - err := installer.UninstallFn() + err := agentInstaller.UninstallFn() if err != nil { return err } // signal that the elastic-agent was uninstalled - if process == ElasticAgentProcessName { + if process == common.ElasticAgentProcessName { fts.ElasticAgentStopped = true } @@ -395,7 +407,7 @@ func (fts *FleetTestSuite) processStateChangedOnTheHost(process string, state st "process": process, }).Trace("Stopping process on the service") - err := systemctlRun(profile, installer.image, serviceName, "stop") + err := installer.SystemctlRun(profile, agentInstaller.Image, serviceName, "stop") if err != nil { log.WithFields(log.Fields{ "action": state, @@ -411,20 +423,15 @@ func (fts *FleetTestSuite) processStateChangedOnTheHost(process string, state st // we are using the Docker client instead of docker-compose // because it does not support returning the output of a // command: it simply returns error level - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", ElasticAgentServiceName, 1) + containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", common.ElasticAgentServiceName, 1) - return steps.CheckProcessStateOnTheHost(containerName, process, "stopped", timeoutFactor) + return docker.CheckProcessStateOnTheHost(containerName, process, "stopped", common.TimeoutFactor) } func (fts *FleetTestSuite) setup() error { log.Trace("Creating Fleet setup") - err := createFleetConfiguration() - if err != nil { - return err - } - - err = checkFleetConfiguration() + err := fts.kibanaClient.RecreateFleet() if err != nil { return err } @@ -436,16 +443,20 @@ func (fts *FleetTestSuite) theAgentIsListedInFleetWithStatus(desiredStatus strin return theAgentIsListedInFleetWithStatus(desiredStatus, fts.Hostname) } -func theAgentIsListedInFleetWithStatus(desiredStatus, hostname string) error { +func theAgentIsListedInFleetWithStatus(desiredStatus string, hostname string) error { log.Tracef("Checking if agent is listed in Fleet as %s", desiredStatus) - maxTimeout := time.Duration(timeoutFactor) * time.Minute * 2 + kibanaClient, err := kibana.NewClient() + if err != nil { + return err + } + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute * 2 retryCount := 1 - exp := e2e.GetExponentialBackOff(maxTimeout) + exp := common.GetExponentialBackOff(maxTimeout) agentOnlineFn := func() error { - agentID, err := getAgentID(hostname) + agentID, err := kibanaClient.GetAgentIDByHostname(hostname) if err != nil { retryCount++ return err @@ -467,7 +478,8 @@ func theAgentIsListedInFleetWithStatus(desiredStatus, hostname string) error { return fmt.Errorf("The agent is not present in Fleet in the '%s' status, but it should", desiredStatus) } - isAgentInStatus, err := isAgentInStatus(agentID, desiredStatus) + agentStatus, err := kibanaClient.GetAgentStatusByHostname(hostname) + isAgentInStatus := strings.EqualFold(agentStatus, desiredStatus) if err != nil || !isAgentInStatus { if err == nil { err = fmt.Errorf("The Agent is not in the %s status yet", desiredStatus) @@ -497,7 +509,7 @@ func theAgentIsListedInFleetWithStatus(desiredStatus, hostname string) error { return nil } - err := backoff.Retry(agentOnlineFn, exp) + err = backoff.Retry(agentOnlineFn, exp) if err != nil { return err } @@ -506,17 +518,17 @@ func theAgentIsListedInFleetWithStatus(desiredStatus, hostname string) error { } func (fts *FleetTestSuite) theFileSystemAgentFolderIsEmpty() error { - installer := fts.getInstaller() + agentInstaller := fts.getInstaller() - profile := installer.profile // name of the runtime dependencies compose file + profile := agentInstaller.Profile // name of the runtime dependencies compose file // name of the container for the service: // we are using the Docker client instead of docker-compose // because it does not support returning the output of a // command: it simply returns error level - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", ElasticAgentServiceName, 1) + containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", common.ElasticAgentServiceName, 1) - content, err := installer.listElasticAgentWorkingDirContent(containerName) + content, err := agentInstaller.ListElasticAgentWorkingDirContent(containerName) if err != nil { return err } @@ -529,26 +541,29 @@ func (fts *FleetTestSuite) theFileSystemAgentFolderIsEmpty() error { } func (fts *FleetTestSuite) theHostIsRestarted() error { - serviceManager := services.NewServiceManager() - - installer := fts.getInstaller() + agentInstaller := fts.getInstaller() - profile := installer.profile // name of the runtime dependencies compose file - image := installer.image // image of the service - service := installer.service // name of the service + profile := agentInstaller.Profile // name of the runtime dependencies compose file + image := agentInstaller.Image // image of the service + service := agentInstaller.Service // name of the service - composes := []string{ - profile, // profile name - image, // service + containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", common.ElasticAgentServiceName, 1) + _, err := shell.Execute(context.Background(), ".", "docker", "stop", containerName) + if err != nil { + log.WithFields(log.Fields{ + "image": image, + "service": service, + }).Error("Could not stop the service") } - err := serviceManager.RunCommand(profile, composes, []string{"restart", service}, profileEnv) + utils.Sleep(time.Duration(common.TimeoutFactor) * 10 * time.Second) + + _, err = shell.Execute(context.Background(), ".", "docker", "start", containerName) if err != nil { log.WithFields(log.Fields{ "image": image, "service": service, - }).Error("Could not restart the service") - return err + }).Error("Could not start the service") } log.WithFields(log.Fields{ @@ -562,13 +577,13 @@ func (fts *FleetTestSuite) systemPackageDashboardsAreListedInFleet() error { log.Trace("Checking system Package dashboards in Fleet") dataStreamsCount := 0 - maxTimeout := time.Duration(timeoutFactor) * time.Minute + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute retryCount := 1 - exp := e2e.GetExponentialBackOff(maxTimeout) + exp := common.GetExponentialBackOff(maxTimeout) countDataStreamsFn := func() error { - dataStreams, err := getDataStreams() + dataStreams, err := fts.kibanaClient.GetDataStreams() if err != nil { log.WithFields(log.Fields{ "retry": retryCount, @@ -619,21 +634,24 @@ func (fts *FleetTestSuite) systemPackageDashboardsAreListedInFleet() error { } func (fts *FleetTestSuite) theAgentIsUnenrolled() error { - return fts.unenrollHostname(false) + return fts.unenrollHostname() } func (fts *FleetTestSuite) theAgentIsReenrolledOnTheHost() error { log.Trace("Re-enrolling the agent on the host with same token") - installer := fts.getInstaller() + agentInstaller := fts.getInstaller() - // a restart does not need to bootstrap the Fleet Server again - cfg, err := NewFleetConfig(fts.CurrentToken, false, false) + // a re-enroll does need to bootstrap the Fleet Server again + // during an unenroll the fleet server exits as there is no longer + // and agent id associated with the enrollment. When fleet server + // restarts it needs a new agent to associate with the boostrap + cfg, err := kibana.NewFleetConfig(fts.CurrentToken, true, false) if err != nil { return err } - err = installer.EnrollFn(cfg) + err = agentInstaller.EnrollFn(cfg) if err != nil { return err } @@ -647,7 +665,7 @@ func (fts *FleetTestSuite) theEnrollmentTokenIsRevoked() error { "tokenID": fts.CurrentTokenID, }).Trace("Revoking enrollment token") - err := fts.removeToken() + err := fts.kibanaClient.DeleteEnrollmentAPIKey(fts.CurrentTokenID) if err != nil { return err } @@ -662,60 +680,33 @@ func (fts *FleetTestSuite) theEnrollmentTokenIsRevoked() error { func (fts *FleetTestSuite) thePolicyShowsTheDatasourceAdded(packageName string) error { log.WithFields(log.Fields{ - "policyID": fts.PolicyID, + "policyID": fts.FleetPolicy.ID, "package": packageName, }).Trace("Checking if the policy shows the package added") maxTimeout := time.Minute retryCount := 1 - exp := e2e.GetExponentialBackOff(maxTimeout) - - integration, err := getIntegrationFromAgentPolicy(packageName, fts.PolicyID) - if err != nil { - return err - } - fts.Integration = integration + exp := common.GetExponentialBackOff(maxTimeout) configurationIsPresentFn := func() error { - defaultPolicy, err := getAgentDefaultPolicy("is_default") + packagePolicy, err := fts.kibanaClient.GetIntegrationFromAgentPolicy(packageName, fts.FleetPolicy) if err != nil { log.WithFields(log.Fields{ - "error": err, - "policyID": fts.PolicyID, - "retry": retryCount, - }).Warn("An error retrieving the policy happened") - + "packagePolicy": packagePolicy, + "policy": fts.FleetPolicy, + "retry": retryCount, + "error": err, + }).Warn("The integration was not found in the policy") retryCount++ - return err } - packagePolicies := defaultPolicy.Path("package_policies") - - for _, child := range packagePolicies.Children() { - id := child.Data().(string) - if id == fts.Integration.packageConfigID { - log.WithFields(log.Fields{ - "packageConfigID": fts.Integration.packageConfigID, - "policyID": fts.PolicyID, - }).Info("The integration was found in the policy") - return nil - } - } - - log.WithFields(log.Fields{ - "packageConfigID": fts.Integration.packageConfigID, - "policyID": fts.PolicyID, - "retry": retryCount, - }).Warn("The integration was not found in the policy") - retryCount++ - return err } - err = backoff.Retry(configurationIsPresentFn, exp) + err := backoff.Retry(configurationIsPresentFn, exp) if err != nil { return err } @@ -725,61 +716,74 @@ func (fts *FleetTestSuite) thePolicyShowsTheDatasourceAdded(packageName string) func (fts *FleetTestSuite) theIntegrationIsOperatedInThePolicy(packageName string, action string) error { log.WithFields(log.Fields{ - "action": action, - "policyID": fts.PolicyID, - "package": packageName, + "action": action, + "policy": fts.FleetPolicy, + "package": packageName, }).Trace("Doing an operation for a package on a policy") - if strings.ToLower(action) == actionADDED { - name, version, err := getIntegrationLatestVersion(packageName) - if err != nil { - return err - } + integration, err := fts.kibanaClient.GetIntegrationByPackageName(packageName) + if err != nil { + return err + } - fts.Integration, err = getIntegration(name, version) - if err != nil { - return err + if strings.ToLower(action) == actionADDED { + packageDataStream := kibana.PackageDataStream{ + Name: integration.Name, + Description: integration.Title, + Namespace: "default", + PolicyID: fts.FleetPolicy.ID, + Enabled: true, + Package: integration, + Inputs: []kibana.Input{}, } - integrationPolicyID, err := addIntegrationToPolicy(fts.Integration, fts.PolicyID) - if err != nil { - return err + if strings.EqualFold(integration.Name, "linux") { + packageDataStream.Inputs = []kibana.Input{ + { + Type: "linux/metrics", + Enabled: true, + Streams: []interface{}{ + map[string]interface{}{ + "id": "linux/metrics-linux.memory-" + uuid.New().String(), + "enabled": true, + "data_stream": map[string]interface{}{ + "dataset": "linux.memory", + "type": "metrics", + }, + }, + }, + Vars: map[string]kibana.Var{ + "period": { + Value: "1s", + Type: "string", + }, + }, + }, + } } - fts.Integration.packageConfigID = integrationPolicyID - return nil + return fts.kibanaClient.AddIntegrationToPolicy(packageDataStream) } else if strings.ToLower(action) == actionREMOVED { - integration, err := getIntegrationFromAgentPolicy(packageName, fts.PolicyID) + packageDataStream, err := fts.kibanaClient.GetIntegrationFromAgentPolicy(integration.Name, fts.FleetPolicy) if err != nil { return err } - fts.Integration = integration - - err = deleteIntegrationFromPolicy(fts.Integration, fts.PolicyID) - if err != nil { - log.WithFields(log.Fields{ - "err": err, - "packageConfigID": fts.Integration.packageConfigID, - "policyID": fts.PolicyID, - }).Error("The integration could not be deleted from the policy") - return err - } - return nil + return fts.kibanaClient.DeleteIntegrationFromPolicy(packageDataStream) } - return godog.ErrPending + return nil } func (fts *FleetTestSuite) theHostNameIsNotShownInTheAdminViewInTheSecurityApp() error { log.Trace("Checking if the hostname is not shown in the Administration view in the Security App") - maxTimeout := time.Duration(timeoutFactor) * time.Minute + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute retryCount := 1 - exp := e2e.GetExponentialBackOff(maxTimeout) + exp := common.GetExponentialBackOff(maxTimeout) agentListedInSecurityFn := func() error { - host, err := isAgentListedInSecurityApp(fts.Hostname) + host, err := fts.kibanaClient.IsAgentListedInSecurityApp(fts.Hostname) if err != nil { log.WithFields(log.Fields{ "elapsedTime": exp.GetElapsedTime(), @@ -794,19 +798,6 @@ func (fts *FleetTestSuite) theHostNameIsNotShownInTheAdminViewInTheSecurityApp() return err } - if host != nil { - log.WithFields(log.Fields{ - "elapsedTime": exp.GetElapsedTime(), - "host": host, - "hostname": fts.Hostname, - "retry": retryCount, - }).Warn("The host is still present in the Administration view in the Security App") - - retryCount++ - - return fmt.Errorf("The host %s is still present in the Administration view in the Security App", fts.Hostname) - } - log.WithFields(log.Fields{ "elapsedTime": exp.GetElapsedTime(), "hostname": fts.Hostname, @@ -826,13 +817,13 @@ func (fts *FleetTestSuite) theHostNameIsNotShownInTheAdminViewInTheSecurityApp() func (fts *FleetTestSuite) theHostNameIsShownInTheAdminViewInTheSecurityApp(status string) error { log.Trace("Checking if the hostname is shown in the Admin view in the Security App") - maxTimeout := time.Duration(timeoutFactor) * time.Minute + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute retryCount := 1 - exp := e2e.GetExponentialBackOff(maxTimeout) + exp := common.GetExponentialBackOff(maxTimeout) agentListedInSecurityFn := func() error { - matches, err := isAgentListedInSecurityAppWithStatus(fts.Hostname, status) + matches, err := fts.kibanaClient.IsAgentListedInSecurityAppWithStatus(fts.Hostname, status) if err != nil || !matches { log.WithFields(log.Fields{ "elapsedTime": exp.GetElapsedTime(), @@ -866,8 +857,8 @@ func (fts *FleetTestSuite) theHostNameIsShownInTheAdminViewInTheSecurityApp(stat return nil } -func (fts *FleetTestSuite) anEndpointIsSuccessfullyDeployedWithAgentAndInstalller(image string, installer string) error { - err := fts.anAgentIsDeployedToFleetWithInstaller(image, installer) +func (fts *FleetTestSuite) anIntegrationIsSuccessfullyDeployedWithAgentAndInstaller(integration string, image string, agentInstaller string) error { + err := fts.anAgentIsDeployedToFleetWithInstallerInFleetMode(image, agentInstaller) if err != nil { return err } @@ -877,23 +868,22 @@ func (fts *FleetTestSuite) anEndpointIsSuccessfullyDeployedWithAgentAndInstallle return err } - // we use integration's title - return fts.theIntegrationIsOperatedInThePolicy(elasticEnpointIntegrationTitle, actionADDED) + return fts.theIntegrationIsOperatedInThePolicy(integration, actionADDED) } func (fts *FleetTestSuite) thePolicyResponseWillBeShownInTheSecurityApp() error { - agentID, err := getAgentID(fts.Hostname) + agentID, err := fts.kibanaClient.GetAgentIDByHostname(fts.Hostname) if err != nil { return err } - maxTimeout := time.Duration(timeoutFactor) * time.Minute + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute retryCount := 1 - exp := e2e.GetExponentialBackOff(maxTimeout) + exp := common.GetExponentialBackOff(maxTimeout) getEventsFn := func() error { - listed, err := isPolicyResponseListedInSecurityApp(agentID) + listed, err := fts.kibanaClient.IsPolicyResponseListedInSecurityApp(agentID) if err != nil { log.WithFields(log.Fields{ "elapsedTime": exp.GetElapsedTime(), @@ -947,51 +937,52 @@ func (fts *FleetTestSuite) thePolicyIsUpdatedToHaveMode(name string, mode string return godog.ErrPending } - integration, err := getIntegrationFromAgentPolicy(elasticEnpointIntegrationTitle, fts.PolicyID) + packageDS, err := fts.kibanaClient.GetIntegrationFromAgentPolicy("endpoint", fts.FleetPolicy) + if err != nil { return err } - fts.Integration = integration + fts.Integration = packageDS.Package - integrationJSON := fts.Integration.json - - // prune fields not allowed in the API side - prunedFields := []string{ - "created_at", "created_by", "id", "revision", "updated_at", "updated_by", - } - for _, f := range prunedFields { - integrationJSON.Delete(f) + for _, item := range packageDS.Inputs { + if item.Type == "endpoint" { + item.Config.(map[string]interface{})["policy"].(map[string]interface{})["value"].(map[string]interface{})["windows"].(map[string]interface{})["malware"].(map[string]interface{})["mode"] = mode + item.Config.(map[string]interface{})["policy"].(map[string]interface{})["value"].(map[string]interface{})["mac"].(map[string]interface{})["malware"].(map[string]interface{})["mode"] = mode + } } + log.WithFields(log.Fields{ + "inputs": packageDS.Inputs, + }).Trace("Upgrading integration package config") - // wee only support Windows and Mac, not Linux - integrationJSON.SetP(mode, "inputs.0.config.policy.value.windows."+name+".mode") - integrationJSON.SetP(mode, "inputs.0.config.policy.value.mac."+name+".mode") - - response, err := updateIntegrationPackageConfig(fts.Integration.packageConfigID, integrationJSON.String()) + updatedAt, err := fts.kibanaClient.UpdateIntegrationPackagePolicy(packageDS) if err != nil { return err } // we use a string because we are not able to process what comes in the event, so we will do // an alphabetical order, as they share same layout but different millis and timezone format - updatedAt := response.Path("item.updated_at").Data().(string) fts.PolicyUpdatedAt = updatedAt return nil } func (fts *FleetTestSuite) thePolicyWillReflectTheChangeInTheSecurityApp() error { - agentID, err := getAgentID(fts.Hostname) + agentID, err := fts.kibanaClient.GetAgentIDByHostname(fts.Hostname) if err != nil { return err } - maxTimeout := time.Duration(timeoutFactor) * time.Minute * 2 + pkgPolicy, err := fts.kibanaClient.GetIntegrationFromAgentPolicy("endpoint", fts.FleetPolicy) + if err != nil { + return err + } + + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute * 2 retryCount := 1 - exp := e2e.GetExponentialBackOff(maxTimeout) + exp := common.GetExponentialBackOff(maxTimeout) getEventsFn := func() error { - err := getAgentEvents("endpoint-security", agentID, fts.Integration.packageConfigID, fts.PolicyUpdatedAt) + err := fts.kibanaClient.GetAgentEvents("endpoint-security", agentID, pkgPolicy.ID, fts.PolicyUpdatedAt) if err != nil { log.WithFields(log.Fields{ "elapsedTime": exp.GetElapsedTime(), @@ -1025,16 +1016,16 @@ func (fts *FleetTestSuite) theVersionOfThePackageIsInstalled(version string, pac "version": version, }).Trace("Checking if package version is installed") - name, version, err := getIntegrationLatestVersion(packageName) + integration, err := fts.kibanaClient.GetIntegrationByPackageName(packageName) if err != nil { return err } - installedIntegration, err := installIntegrationAssets(name, version) + _, err = fts.kibanaClient.InstallIntegrationAssets(integration) if err != nil { return err } - fts.Integration = installedIntegration + fts.Integration = integration return nil } @@ -1042,20 +1033,20 @@ func (fts *FleetTestSuite) theVersionOfThePackageIsInstalled(version string, pac func (fts *FleetTestSuite) anAttemptToEnrollANewAgentFails() error { log.Trace("Enrolling a new agent with an revoked token") - installer := fts.getInstaller() + agentInstaller := fts.getInstaller() - profile := installer.profile // name of the runtime dependencies compose file + profile := agentInstaller.Profile // name of the runtime dependencies compose file - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", ElasticAgentServiceName, 2) // name of the new container + containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", common.ElasticAgentServiceName, 2) // name of the new container - fleetConfig, err := deployAgentToFleet(installer, containerName, fts.CurrentToken, false) + fleetConfig, err := deployAgentToFleet(agentInstaller, containerName, fts.CurrentToken, false) // the installation process for TAR includes the enrollment - if installer.installerType != "tar" { + if agentInstaller.InstallerType != "tar" { if err != nil { return err } - err = installer.EnrollFn(fleetConfig) + err = agentInstaller.EnrollFn(fleetConfig) if err == nil { err = fmt.Errorf("The agent was enrolled although the token was previously revoked") @@ -1087,50 +1078,22 @@ func (fts *FleetTestSuite) anAttemptToEnrollANewAgentFails() error { return err } -func (fts *FleetTestSuite) removeToken() error { - revokeTokenURL := fleetEnrollmentTokenURL + "/" + fts.CurrentTokenID - deleteReq := createDefaultHTTPRequest(revokeTokenURL) - - body, err := curl.Delete(deleteReq) - if err != nil { - log.WithFields(log.Fields{ - "tokenID": fts.CurrentTokenID, - "body": body, - "error": err, - "url": revokeTokenURL, - }).Error("Could not delete token") - return err - } - - log.WithFields(log.Fields{ - "tokenID": fts.CurrentTokenID, - }).Debug("The token was deleted") - - return nil -} - // unenrollHostname deletes the statuses for an existing agent, filtering by hostname -func (fts *FleetTestSuite) unenrollHostname(force bool) error { +func (fts *FleetTestSuite) unenrollHostname() error { log.Tracef("Un-enrolling all agentIDs for %s", fts.Hostname) - jsonParsed, err := getOnlineAgents(true) + agents, err := fts.kibanaClient.ListAgents() if err != nil { return err } - hosts := jsonParsed.Path("list").Children() - - for _, host := range hosts { - hostname := host.Path("local_metadata.host.hostname").Data().(string) - // a hostname has an agentID by status - if hostname == fts.Hostname { - agentID := host.Path("id").Data().(string) + for _, agent := range agents { + if agent.LocalMetadata.Host.HostName == fts.Hostname { log.WithFields(log.Fields{ "hostname": fts.Hostname, - "agentID": agentID, }).Debug("Un-enrolling agent in Fleet") - err := unenrollAgent(agentID, force) + err := fts.kibanaClient.UnEnrollAgent(agent.LocalMetadata.Host.HostName) if err != nil { return err } @@ -1140,30 +1103,6 @@ func (fts *FleetTestSuite) unenrollHostname(force bool) error { return nil } -func (fts *FleetTestSuite) upgradeAgent(version string) error { - agentID, err := getAgentID(fts.Hostname) - if err != nil { - return err - } - - upgradeReq := curl.HTTPRequest{ - BasicAuthUser: "elastic", - BasicAuthPassword: "changeme", - Headers: map[string]string{ - "Content-Type": "application/json", - "kbn-xsrf": "true", - }, - URL: fmt.Sprintf(fleetAgentUpgradeURL, agentID), - Payload: `{"version":"` + version + `", "force": true}`, - } - - if content, err := curl.Post(upgradeReq); err != nil { - return errors.Wrap(err, content) - } - - return nil -} - func (fts *FleetTestSuite) checkDataStream() error { query := map[string]interface{}{ "query": map[string]interface{}{ @@ -1228,145 +1167,34 @@ func (fts *FleetTestSuite) checkDataStream() error { indexName := "metrics-linux.memory-default" - _, err := e2e.WaitForNumberOfHits(context.Background(), indexName, query, 1, time.Minute) + _, err := elasticsearch.WaitForNumberOfHits(context.Background(), indexName, query, 1, time.Minute) if err != nil { log.WithFields(log.Fields{ "error": err, - }).Warn(e2e.WaitForIndices()) + }).Warn(elasticsearch.WaitForIndices()) } return err } -// checkFleetConfiguration checks that Fleet configuration is not missing -// any requirements and is read. To achieve it, a GET request is executed -func checkFleetConfiguration() error { - getReq := curl.HTTPRequest{ - BasicAuthUser: "elastic", - BasicAuthPassword: "changeme", - Headers: map[string]string{ - "Content-Type": "application/json", - "kbn-xsrf": "e2e-tests", - }, - URL: fleetSetupURL, - } - - log.Trace("Ensuring Fleet setup was initialised") - responseBody, err := curl.Get(getReq) - if err != nil { - log.WithFields(log.Fields{ - "responseBody": responseBody, - }).Error("Could not check Kibana setup for Fleet") - return err - } - - if !strings.Contains(responseBody, `"isReady":true,"missing_requirements":[]`) { - err = fmt.Errorf("Kibana has not been initialised: %s", responseBody) - log.Error(err.Error()) - return err - } - - log.WithFields(log.Fields{ - "responseBody": responseBody, - }).Info("Kibana setup initialised") - - return nil -} - -// createFleetConfiguration sends a POST request to Fleet forcing the -// recreation of the configuration -func createFleetConfiguration() error { - postReq := createDefaultHTTPRequest(fleetSetupURL) - postReq.Payload = `{ - "forceRecreate": true - }` - - body, err := curl.Post(postReq) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": fleetSetupURL, - }).Error("Could not initialise Fleet setup") - return err - } - - log.WithFields(log.Fields{ - "responseBody": body, - }).Info("Fleet setup done") - - return nil -} - -// createDefaultHTTPRequest Creates a default HTTP request, including the basic auth, -// JSON content type header, and a specific header that is required by Kibana -func createDefaultHTTPRequest(url string) curl.HTTPRequest { - return curl.HTTPRequest{ - BasicAuthUser: "elastic", - BasicAuthPassword: "changeme", - Headers: map[string]string{ - "Content-Type": "application/json", - "kbn-xsrf": "e2e-tests", - }, - URL: url, - } -} - -// createFleetToken sends a POST request to Fleet creating a new token with a name -func createFleetToken(name string, policyID string) (*gabs.Container, error) { - postReq := createDefaultHTTPRequest(fleetEnrollmentTokenURL) - postReq.Payload = `{ - "policy_id": "` + policyID + `", - "name": "` + name + `" - }` - - body, err := curl.Post(postReq) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": fleetSetupURL, - }).Error("Could not create Fleet token") - return nil, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return nil, err - } - - tokenItem := jsonParsed.Path("item") - - log.WithFields(log.Fields{ - "tokenId": tokenItem.Path("id").Data().(string), - "apiKeyId": tokenItem.Path("api_key_id").Data().(string), - }).Debug("Fleet token created") - - return tokenItem, nil -} - -func deployAgentToFleet(installer ElasticAgentInstaller, containerName string, token string, bootstrapFleetServer bool) (*FleetConfig, error) { - profile := installer.profile // name of the runtime dependencies compose file - service := installer.service // name of the service - serviceTag := installer.tag // docker tag of the service +func deployAgentToFleet(agentInstaller installer.ElasticAgentInstaller, containerName string, token string, bootstrapFleetServer bool) (*kibana.FleetConfig, error) { + profile := agentInstaller.Profile // name of the runtime dependencies compose file + service := agentInstaller.Service // name of the service + serviceTag := agentInstaller.Tag // docker tag of the service envVarsPrefix := strings.ReplaceAll(service, "-", "_") // let's start with Centos 7 - profileEnv[envVarsPrefix+"Tag"] = serviceTag + common.ProfileEnv[envVarsPrefix+"Tag"] = serviceTag // we are setting the container name because Centos service could be reused by any other test suite - profileEnv[envVarsPrefix+"ContainerName"] = containerName + common.ProfileEnv[envVarsPrefix+"ContainerName"] = containerName // define paths where the binary will be mounted - profileEnv[envVarsPrefix+"AgentBinarySrcPath"] = installer.binaryPath - profileEnv[envVarsPrefix+"AgentBinaryTargetPath"] = "/" + installer.name + common.ProfileEnv[envVarsPrefix+"AgentBinarySrcPath"] = agentInstaller.BinaryPath + common.ProfileEnv[envVarsPrefix+"AgentBinaryTargetPath"] = "/" + agentInstaller.Name - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() - err := serviceManager.AddServicesToCompose(context.Background(), profile, []string{service}, profileEnv) + err := serviceManager.AddServicesToCompose(context.Background(), profile, []string{service}, common.ProfileEnv) if err != nil { log.WithFields(log.Fields{ "service": service, @@ -1375,279 +1203,20 @@ func deployAgentToFleet(installer ElasticAgentInstaller, containerName string, t return nil, err } - err = installer.PreInstallFn() + err = agentInstaller.PreInstallFn() if err != nil { return nil, err } - cfg, cfgError := NewFleetConfig(token, bootstrapFleetServer, false) + cfg, cfgError := kibana.NewFleetConfig(token, bootstrapFleetServer, false) if cfgError != nil { return nil, cfgError } - err = installer.InstallFn(cfg) - if err != nil { - return nil, err - } - - return cfg, installer.PostInstallFn() -} - -// getAgentDefaultPolicy sends a GET request to Fleet for the existing default policy, using the -// "defaultPolicyFieldName" passed as parameter as field to be used to find the policy in list -// of fleet policies -func getAgentDefaultPolicy(defaultPolicyFieldName string) (*gabs.Container, error) { - r := createDefaultHTTPRequest(ingestManagerAgentPoliciesURL) - body, err := curl.Get(r) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": ingestManagerAgentPoliciesURL, - }).Error("Could not get Fleet's policies") - return nil, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return nil, err - } - - // data streams should contain array of elements - policies := jsonParsed.Path("items") - - log.WithFields(log.Fields{ - "count": len(policies.Children()), - }).Trace("Fleet policies retrieved") - - for _, policy := range policies.Children() { - if !policy.Exists(defaultPolicyFieldName) { - continue - } - - if policy.Path(defaultPolicyFieldName).Data().(bool) { - log.WithFields(log.Fields{ - "field": defaultPolicyFieldName, - "policy": policy, - }).Trace("Default Policy was found") - return policy, nil - } - } - - return nil, fmt.Errorf("Default policy was not found with '%s' field equals to 'true'", defaultPolicyFieldName) -} - -func getAgentEvents(applicationName string, agentID string, packagePolicyID string, updatedAt string) error { - url := fmt.Sprintf(fleetAgentEventsURL, agentID) - getReq := createDefaultHTTPRequest(url) - getReq.QueryString = "page=1&perPage=20" - - body, err := curl.Get(getReq) - if err != nil { - log.WithFields(log.Fields{ - "agentID": agentID, - "application": applicationName, - "body": body, - "error": err, - "packagePolicyID": packagePolicyID, - "url": url, - }).Error("Could not get agent events from Fleet") - return err - } - - jsonResponse, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return err - } - - listItems := jsonResponse.Path("list").Children() - for _, item := range listItems { - message := item.Path("message").Data().(string) - // we use a string because we are not able to process what comes in the event, so we will do - // an alphabetical order, as they share same layout but different millis and timezone format - timestamp := item.Path("timestamp").Data().(string) - - log.WithFields(log.Fields{ - "agentID": agentID, - "application": applicationName, - "event_at": timestamp, - "message": message, - "packagePolicyID": packagePolicyID, - "updated_at": updatedAt, - }).Trace("Event found") - - matches := (strings.Contains(message, applicationName) && - strings.Contains(message, "["+agentID+"]: State changed to") && - strings.Contains(message, "Protecting with policy {"+packagePolicyID+"}")) - - if matches && timestamp > updatedAt { - log.WithFields(log.Fields{ - "application": applicationName, - "event_at": timestamp, - "packagePolicyID": packagePolicyID, - "updated_at": updatedAt, - "message": message, - }).Info("Event after the update was found") - return nil - } - } - - return fmt.Errorf("No %s events where found for the agent in the %s policy", applicationName, packagePolicyID) -} - -// getAgentID sends a GET request to Fleet for a existing hostname -// This method will retrieve the only agent ID for a hostname in the online status -func getAgentID(agentHostname string) (string, error) { - log.Tracef("Retrieving agentID for %s", agentHostname) - - jsonParsed, err := getOnlineAgents(false) - if err != nil { - return "", err - } - - hosts := jsonParsed.Path("list").Children() - - for _, host := range hosts { - hostname := host.Path("local_metadata.host.hostname").Data().(string) - if hostname == agentHostname { - agentID := host.Path("id").Data().(string) - log.WithFields(log.Fields{ - "hostname": agentHostname, - "agentID": agentID, - }).Debug("Agent listed in Fleet with online status") - return agentID, nil - } - } - - return "", nil -} - -// getDataStreams sends a GET request to Fleet for the existing data-streams -// if called prior to any Agent being deployed it should return a list of -// zero data streams as: { "data_streams": [] }. If called after the Agent -// is running, it will return a list of (currently in 7.8) 20 streams -func getDataStreams() (*gabs.Container, error) { - r := createDefaultHTTPRequest(ingestManagerDataStreamsURL) - body, err := curl.Get(r) + err = agentInstaller.InstallFn(cfg) if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": ingestManagerDataStreamsURL, - }).Error("Could not get Fleet's data streams for the agent") return nil, err } - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return nil, err - } - - // data streams should contain array of elements - dataStreams := jsonParsed.Path("data_streams") - - log.WithFields(log.Fields{ - "count": len(dataStreams.Children()), - }).Debug("Data Streams retrieved") - - return dataStreams, nil -} - -// getOnlineAgents sends a GET request to Fleet for the existing online agents -// Will return the JSON object representing the response of querying Fleet's Agents -// endpoint -func getOnlineAgents(showInactive bool) (*gabs.Container, error) { - r := createDefaultHTTPRequest(fleetAgentsURL) - // let's not URL encode the querystring, as it seems Kibana is not handling - // the request properly, returning an 400 Bad Request error with this message: - // [request query.page=1&perPage=20&showInactive=true]: definition for this key is missing - r.EncodeURL = false - r.QueryString = fmt.Sprintf("page=1&perPage=20&showInactive=%t", showInactive) - - body, err := curl.Get(r) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": r.GetURL(), - }).Error("Could not get Fleet's online agents") - return nil, err - } - - jsonResponse, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return nil, err - } - - return jsonResponse, nil -} - -// isAgentInStatus extracts the status for an agent, identified by its hostname -// It will query Fleet's agents endpoint -func isAgentInStatus(agentID string, desiredStatus string) (bool, error) { - r := createDefaultHTTPRequest(fleetAgentsURL + "/" + agentID) - body, err := curl.Get(r) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": r.GetURL(), - }).Error("Could not get agent in Fleet") - return false, err - } - - jsonResponse, err := gabs.ParseJSON([]byte(body)) - - log.WithFields(log.Fields{ - "agentID": agentID, - "desiredStatus": desiredStatus, - }).Info(jsonResponse) - - agentStatus := jsonResponse.Path("item.status").Data().(string) - - return (strings.ToLower(agentStatus) == strings.ToLower(desiredStatus)), nil -} - -func unenrollAgent(agentID string, force bool) error { - unEnrollURL := fmt.Sprintf(fleetAgentsUnEnrollURL, agentID) - postReq := createDefaultHTTPRequest(unEnrollURL) - - if force { - postReq.Payload = `{ - "force": true - }` - } - - body, err := curl.Post(postReq) - if err != nil { - log.WithFields(log.Fields{ - "agentID": agentID, - "body": body, - "error": err, - "url": unEnrollURL, - }).Error("Could unenroll agent") - return err - } - - log.WithFields(log.Fields{ - "agentID": agentID, - }).Debug("Fleet agent was unenrolled") - - return nil + return cfg, agentInstaller.PostInstallFn() } diff --git a/e2e/_suites/fleet/fleet_server.go b/e2e/_suites/fleet/fleet_server.go index b207474410..e634ade08e 100644 --- a/e2e/_suites/fleet/fleet_server.go +++ b/e2e/_suites/fleet/fleet_server.go @@ -4,107 +4,7 @@ package main -import ( - "fmt" - - log "github.com/sirupsen/logrus" -) - -// FleetConfig represents the configuration for Fleet Server when building the enrollment command -type FleetConfig struct { - EnrollmentToken string - ElasticsearchPort int - ElasticsearchURI string - ElasticsearchCredentials string - KibanaPort int - KibanaURI string - // server - BootstrapFleetServer bool - ServerPolicyID string -} - -// NewFleetConfig builds a new configuration for the fleet agent, defaulting ES credentials, URI and port. -// If the 'bootstrappFleetServer' flag is true, the it will create the config for the initial fleet server -// used to bootstrap Fleet Server -// If the 'fleetServerMode' flag is true, the it will create the config for an agent using an existing Fleet -// Server to connect to Fleet. It will also retrieve the default policy ID for fleet server -func NewFleetConfig(token string, bootstrapFleetServer bool, fleetServerMode bool) (*FleetConfig, error) { - cfg := &FleetConfig{ - BootstrapFleetServer: bootstrapFleetServer, - EnrollmentToken: token, - ElasticsearchCredentials: "elastic:changeme", - ElasticsearchPort: 9200, - ElasticsearchURI: "elasticsearch", - KibanaPort: 5601, - KibanaURI: "kibana", - } - - if fleetServerMode { - defaultFleetServerPolicy, err := getAgentDefaultPolicy("is_default_fleet_server") - if err != nil { - return nil, err - } - - cfg.ServerPolicyID = defaultFleetServerPolicy.Path("id").Data().(string) - - log.WithFields(log.Fields{ - "elasticsearch": cfg.ElasticsearchURI, - "elasticsearchPort": cfg.ElasticsearchPort, - "policyID": cfg.ServerPolicyID, - "token": cfg.EnrollmentToken, - }).Debug("Fleet Server config created") - } - - return cfg, nil -} - -func (cfg FleetConfig) flags() []string { - if cfg.BootstrapFleetServer { - // TO-DO: remove all code to calculate the fleet-server policy, because it's inferred by the fleet-server - return []string{ - "--force", - "--fleet-server-es", fmt.Sprintf("http://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.ElasticsearchURI, cfg.ElasticsearchPort), - } - } - - /* - // agent using an already bootstrapped fleet-server - fleetServerHost := "https://hostname_of_the_bootstrapped_fleet_server:8220" - return []string{ - "-e", "-v", "--force", "--insecure", - // ensure the enrollment belongs to the default policy - "--enrollment-token=" + cfg.EnrollmentToken, - "--url", fleetServerHost, - } - */ - - baseFlags := []string{"-e", "-v", "--force", "--insecure", "--enrollment-token=" + cfg.EnrollmentToken} - - if cfg.ServerPolicyID != "" { - baseFlags = append(baseFlags, "--fleet-server-insecure-http", "--fleet-server", fmt.Sprintf("http://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.ElasticsearchURI, cfg.ElasticsearchPort), "--fleet-server-host=http://0.0.0.0", "--fleet-server-policy", cfg.ServerPolicyID) - } - - return append(baseFlags, "--kibana-url", fmt.Sprintf("http://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.KibanaURI, cfg.KibanaPort)) -} - func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstallerInFleetMode(image string, installerType string) error { fts.ElasticAgentStopped = true return fts.anAgentIsDeployedToFleetWithInstallerAndFleetServer(image, installerType, true) } - -// bootstrapFleetServer runs a command for the elastic-agent -func bootstrapFleetServer(profile string, image string, service string, binary string, cfg *FleetConfig) error { - log.Debug("Bootstrapping Fleet Server") - - args := []string{ - "-f", "--fleet-server-insecure-http", - "--fleet-server", fmt.Sprintf("http://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.ElasticsearchURI, cfg.ElasticsearchPort), - } - - err := runElasticAgentCommand(profile, image, service, binary, "install", args) - if err != nil { - return fmt.Errorf("Failed to install the agent with subcommand: %v", err) - } - - return nil -} diff --git a/e2e/_suites/fleet/ingest_manager_test.go b/e2e/_suites/fleet/ingest_manager_test.go index 347a11b380..c9f967dbb3 100644 --- a/e2e/_suites/fleet/ingest_manager_test.go +++ b/e2e/_suites/fleet/ingest_manager_test.go @@ -14,9 +14,13 @@ import ( "github.com/cucumber/godog" "github.com/cucumber/messages-go/v10" "github.com/elastic/e2e-testing/cli/config" - "github.com/elastic/e2e-testing/cli/services" - "github.com/elastic/e2e-testing/cli/shell" - "github.com/elastic/e2e-testing/e2e" + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/compose" + "github.com/elastic/e2e-testing/internal/elasticsearch" + "github.com/elastic/e2e-testing/internal/installer" + "github.com/elastic/e2e-testing/internal/kibana" + "github.com/elastic/e2e-testing/internal/shell" + "github.com/elastic/e2e-testing/internal/utils" log "github.com/sirupsen/logrus" ) @@ -25,58 +29,86 @@ var imts IngestManagerTestSuite func setUpSuite() { config.Init() - kibanaClient = services.NewKibanaClient() - - developerMode = shell.GetEnvBool("DEVELOPER_MODE") + kibanaClient, err := kibana.NewClient() + if err != nil { + log.Error(err) + os.Exit(1) + } + developerMode := shell.GetEnvBool("DEVELOPER_MODE") if developerMode { log.Info("Running in Developer mode 💻: runtime dependencies between different test runs will be reused to speed up dev cycle") } // check if base version is an alias - v, err := e2e.GetElasticArtifactVersion(agentVersionBase) + v, err := utils.GetElasticArtifactVersion(common.AgentVersionBase) if err != nil { log.WithFields(log.Fields{ "error": err, - "version": agentVersionBase, + "version": common.AgentVersionBase, }).Fatal("Failed to get agent base version, aborting") } - agentVersionBase = v + common.AgentVersionBase = v - timeoutFactor = shell.GetEnvInteger("TIMEOUT_FACTOR", timeoutFactor) - agentVersion = shell.GetEnv("BEAT_VERSION", agentVersionBase) + common.TimeoutFactor = shell.GetEnvInteger("TIMEOUT_FACTOR", common.TimeoutFactor) + common.AgentVersion = shell.GetEnv("BEAT_VERSION", common.AgentVersionBase) + + common.AgentStaleVersion = shell.GetEnv("ELASTIC_AGENT_STALE_VERSION", common.AgentStaleVersion) + // check if stale version is an alias + v, err = utils.GetElasticArtifactVersion(common.AgentStaleVersion) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "version": common.AgentStaleVersion, + }).Fatal("Failed to get agent stale version, aborting") + } + common.AgentStaleVersion = v + + useCISnapshots := shell.GetEnvBool("BEATS_USE_CI_SNAPSHOTS") + if useCISnapshots && !strings.HasSuffix(common.AgentStaleVersion, "-SNAPSHOT") { + common.AgentStaleVersion += "-SNAPSHOT" + } // check if version is an alias - v, err = e2e.GetElasticArtifactVersion(agentVersion) + v, err = utils.GetElasticArtifactVersion(common.AgentVersion) if err != nil { log.WithFields(log.Fields{ "error": err, - "version": agentVersion, + "version": common.AgentVersion, }).Fatal("Failed to get agent version, aborting") } - agentVersion = v + common.AgentVersion = v - stackVersion = shell.GetEnv("STACK_VERSION", stackVersion) - v, err = e2e.GetElasticArtifactVersion(stackVersion) + common.StackVersion = shell.GetEnv("STACK_VERSION", common.StackVersion) + v, err = utils.GetElasticArtifactVersion(common.StackVersion) if err != nil { log.WithFields(log.Fields{ "error": err, - "version": stackVersion, + "version": common.StackVersion, }).Fatal("Failed to get stack version, aborting") } - stackVersion = v + common.StackVersion = v - kibanaVersion = shell.GetEnv("KIBANA_VERSION", "") - if kibanaVersion == "" { + common.KibanaVersion = shell.GetEnv("KIBANA_VERSION", "") + if common.KibanaVersion == "" { // we want to deploy a released version for Kibana // if not set, let's use stackVersion - kibanaVersion = stackVersion + common.KibanaVersion, err = utils.GetElasticArtifactVersion(common.StackVersion) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "version": common.KibanaVersion, + }).Fatal("Failed to get kibana version, aborting") + } } imts = IngestManagerTestSuite{ Fleet: &FleetTestSuite{ - Installers: map[string]ElasticAgentInstaller{}, // do not pre-initialise the map + kibanaClient: kibanaClient, + Installers: map[string]installer.ElasticAgentInstaller{}, // do not pre-initialise the map + }, + StandAlone: &StandAloneTestSuite{ + kibanaClient: kibanaClient, }, - StandAlone: &StandAloneTestSuite{}, } } @@ -108,13 +140,14 @@ func InitializeIngestManagerTestScenario(ctx *godog.ScenarioContext) { } func InitializeIngestManagerTestSuite(ctx *godog.TestSuiteContext) { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() ctx.BeforeSuite(func() { setUpSuite() log.Trace("Installing Fleet runtime dependencies") +<<<<<<< HEAD workDir, _ := os.Getwd() profileEnv = map[string]string{ "kibanaConfigPath": path.Join(workDir, "configurations", "kibana.config.yml"), @@ -124,20 +157,29 @@ func InitializeIngestManagerTestSuite(ctx *godog.TestSuiteContext) { profileEnv["kibanaDockerNamespace"] = "kibana" if strings.HasPrefix(kibanaVersion, "pr") { +======= + common.ProfileEnv = map[string]string{ + "kibanaVersion": common.KibanaVersion, + "stackVersion": common.StackVersion, + } + + common.ProfileEnv["kibanaDockerNamespace"] = "kibana" + if strings.HasPrefix(common.KibanaVersion, "pr") || utils.IsCommit(common.KibanaVersion) { +>>>>>>> 5f596709... v2 refactor (#1008) // because it comes from a PR - profileEnv["kibanaDockerNamespace"] = "observability-ci" + common.ProfileEnv["kibanaDockerNamespace"] = "observability-ci" } - profile := FleetProfileName - err := serviceManager.RunCompose(context.Background(), true, []string{profile}, profileEnv) + profile := common.FleetProfileName + err := serviceManager.RunCompose(context.Background(), true, []string{profile}, common.ProfileEnv) if err != nil { log.WithFields(log.Fields{ "profile": profile, }).Fatal("Could not run the runtime dependencies for the profile.") } - minutesToBeHealthy := time.Duration(timeoutFactor) * time.Minute - healthy, err := e2e.WaitForElasticsearch(context.Background(), minutesToBeHealthy) + minutesToBeHealthy := time.Duration(common.TimeoutFactor) * time.Minute + healthy, err := elasticsearch.WaitForElasticsearch(context.Background(), minutesToBeHealthy) if !healthy { log.WithFields(log.Fields{ "error": err, @@ -145,7 +187,14 @@ func InitializeIngestManagerTestSuite(ctx *godog.TestSuiteContext) { }).Fatal("The Elasticsearch cluster could not get the healthy status") } - healthyKibana, err := kibanaClient.WaitForKibana(context.Background(), minutesToBeHealthy) + kibanaClient, err := kibana.NewClient() + if err != nil { + log.WithFields(log.Fields{ + "error": err, + }).Fatal("Unable to create kibana client") + } + + healthyKibana, err := kibanaClient.WaitForReady(minutesToBeHealthy) if !healthyKibana { log.WithFields(log.Fields{ "error": err, @@ -159,9 +208,10 @@ func InitializeIngestManagerTestSuite(ctx *godog.TestSuiteContext) { }) ctx.AfterSuite(func() { + developerMode := shell.GetEnvBool("DEVELOPER_MODE") if !developerMode { log.Debug("Destroying Fleet runtime dependencies") - profile := FleetProfileName + profile := common.FleetProfileName err := serviceManager.StopCompose(context.Background(), true, []string{profile}) if err != nil { @@ -174,7 +224,7 @@ func InitializeIngestManagerTestSuite(ctx *godog.TestSuiteContext) { installers := imts.Fleet.Installers for k, v := range installers { - agentPath := v.binaryPath + agentPath := v.BinaryPath if _, err := os.Stat(agentPath); err == nil { err = os.Remove(agentPath) if err != nil { diff --git a/e2e/_suites/fleet/installers.go b/e2e/_suites/fleet/installers.go deleted file mode 100644 index 894553ff43..0000000000 --- a/e2e/_suites/fleet/installers.go +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "context" - "fmt" - "strings" - - "github.com/elastic/e2e-testing/cli/docker" - "github.com/elastic/e2e-testing/e2e" - "github.com/elastic/e2e-testing/e2e/steps" - log "github.com/sirupsen/logrus" -) - -// InstallerPackage represents the operations that can be performed by an installer package type -type InstallerPackage interface { - Install(cfg *FleetConfig) error - InstallCerts() error - PrintLogs(containerName string) error - Postinstall() error - Preinstall() error - Uninstall() error -} - -// BasePackage holds references to basic state for all installers -type BasePackage struct { - binaryName string - commitFile string - image string - logFile string - profile string - service string -} - -// extractPackage depends on the underlying OS, so 'cmds' must contain the specific instructions for the OS -func (i *BasePackage) extractPackage(cmds []string) error { - err := steps.ExecCommandInService(i.profile, i.image, i.service, cmds, profileEnv, false) - if err != nil { - log.WithFields(log.Fields{ - "command": cmds, - "error": err, - "image": i.image, - "service": i.service, - }).Error("Could not extract agent package in the box") - - return err - } - - return nil -} - -// Postinstall executes operations after installing a DEB package -func (i *BasePackage) Postinstall() error { - err := systemctlRun(i.profile, i.image, i.service, "enable") - if err != nil { - return err - } - return systemctlRun(i.profile, i.image, i.service, "start") -} - -// PrintLogs prints logs for the agent -func (i *BasePackage) PrintLogs(containerName string) error { - err := i.resolveLogFile(containerName) - if err != nil { - return fmt.Errorf("Could not resolve log file: %v", err) - } - - cmd := []string{ - "cat", i.logFile, - } - - err = steps.ExecCommandInService(i.profile, i.image, i.service, cmd, profileEnv, false) - if err != nil { - return err - } - - return nil -} - -// resolveLogFile retrieves the full path of the log file in the underlying Docker container -// calculating the hash commit if necessary -func (i *BasePackage) resolveLogFile(containerName string) error { - if strings.Contains(i.logFile, "%s") { - hash, err := getElasticAgentHash(containerName, i.commitFile) - if err != nil { - log.WithFields(log.Fields{ - "containerName": containerName, - "error": err, - }).Error("Could not get agent hash in the container") - - return err - } - - i.logFile = fmt.Sprintf(i.logFile, hash) - } - - return nil -} - -// DEBPackage implements operations for a DEB installer -type DEBPackage struct { - BasePackage -} - -// NewDEBPackage creates an instance for the DEB installer -func NewDEBPackage(binaryName string, profile string, image string, service string, commitFile string, logFile string) *DEBPackage { - return &DEBPackage{ - BasePackage: BasePackage{ - binaryName: binaryName, - commitFile: commitFile, - image: image, - profile: profile, - service: service, - }, - } -} - -// Install installs a DEB package -func (i *DEBPackage) Install(cfg *FleetConfig) error { - return i.extractPackage([]string{"apt", "install", "/" + i.binaryName, "-y"}) -} - -// InstallCerts installs the certificates for a DEB package -func (i *DEBPackage) InstallCerts() error { - return installCertsForDebian(i.profile, i.image, i.service) -} -func installCertsForDebian(profile string, image string, service string) error { - if err := steps.ExecCommandInService(profile, image, service, []string{"apt-get", "update"}, profileEnv, false); err != nil { - return err - } - if err := steps.ExecCommandInService(profile, image, service, []string{"apt", "install", "ca-certificates", "-y"}, profileEnv, false); err != nil { - return err - } - if err := steps.ExecCommandInService(profile, image, service, []string{"update-ca-certificates", "-f"}, profileEnv, false); err != nil { - return err - } - return nil -} - -// Preinstall executes operations before installing a DEB package -func (i *DEBPackage) Preinstall() error { - log.Trace("No preinstall commands for DEB packages") - return nil -} - -// Uninstall uninstalls a DEB package -func (i *DEBPackage) Uninstall() error { - log.Trace("No uninstall commands for DEB packages") - return nil -} - -// DockerPackage implements operations for a DEB installer -type DockerPackage struct { - BasePackage - installerPath string - ubi8 bool - // optional fields - arch string - artifact string - originalVersion string - OS string - version string -} - -// NewDockerPackage creates an instance for the Docker installer -func NewDockerPackage(binaryName string, profile string, image string, service string, installerPath string, ubi8 bool, commitFile string, logFile string) *DockerPackage { - return &DockerPackage{ - BasePackage: BasePackage{ - binaryName: binaryName, - commitFile: commitFile, - image: image, - logFile: logFile, - profile: profile, - service: service, - }, - installerPath: installerPath, - ubi8: ubi8, - } -} - -// Install installs a Docker package -func (i *DockerPackage) Install(cfg *FleetConfig) error { - log.Trace("No install commands for Docker packages") - return nil -} - -// InstallCerts installs the certificates for a Docker package -func (i *DockerPackage) InstallCerts() error { - log.Trace("No install certs commands for Docker packages") - return nil -} - -// Preinstall executes operations before installing a Docker package -func (i *DockerPackage) Preinstall() error { - err := docker.LoadImage(i.installerPath) - if err != nil { - return err - } - - // we need to tag the loaded image because its tag relates to the target branch - return docker.TagImage( - "docker.elastic.co/beats/"+i.artifact+":"+agentVersionBase, - "docker.elastic.co/observability-ci/"+i.artifact+":"+i.originalVersion+"-amd64", - ) -} - -// Postinstall executes operations after installing a Docker package -func (i *DockerPackage) Postinstall() error { - log.Trace("No postinstall commands for Docker packages") - return nil -} - -// Uninstall uninstalls a Docker package -func (i *DockerPackage) Uninstall() error { - log.Trace("No uninstall commands for Docker packages") - return nil -} - -// WithArch sets the architecture -func (i *DockerPackage) WithArch(arch string) *DockerPackage { - i.arch = arch - return i -} - -// WithArtifact sets the artifact -func (i *DockerPackage) WithArtifact(artifact string) *DockerPackage { - i.artifact = artifact - return i -} - -// WithOS sets the OS -func (i *DockerPackage) WithOS(OS string) *DockerPackage { - i.OS = OS - return i -} - -// WithVersion sets the version -func (i *DockerPackage) WithVersion(version string) *DockerPackage { - i.version = e2e.CheckPRVersion(version, agentVersionBase) // sanitize version - i.originalVersion = version - return i -} - -// RPMPackage implements operations for a RPM installer -type RPMPackage struct { - BasePackage -} - -// NewRPMPackage creates an instance for the RPM installer -func NewRPMPackage(binaryName string, profile string, image string, service string, commitFile string, logFile string) *RPMPackage { - return &RPMPackage{ - BasePackage: BasePackage{ - binaryName: binaryName, - commitFile: commitFile, - image: image, - logFile: logFile, - profile: profile, - service: service, - }, - } -} - -// Install installs a RPM package -func (i *RPMPackage) Install(cfg *FleetConfig) error { - return i.extractPackage([]string{"yum", "localinstall", "/" + i.binaryName, "-y"}) -} - -// InstallCerts installs the certificates for a RPM package -func (i *RPMPackage) InstallCerts() error { - return installCertsForCentos(i.profile, i.image, i.service) -} -func installCertsForCentos(profile string, image string, service string) error { - if err := steps.ExecCommandInService(profile, image, service, []string{"yum", "check-update"}, profileEnv, false); err != nil { - return err - } - if err := steps.ExecCommandInService(profile, image, service, []string{"yum", "install", "ca-certificates", "-y"}, profileEnv, false); err != nil { - return err - } - if err := steps.ExecCommandInService(profile, image, service, []string{"update-ca-trust", "force-enable"}, profileEnv, false); err != nil { - return err - } - if err := steps.ExecCommandInService(profile, image, service, []string{"update-ca-trust", "extract"}, profileEnv, false); err != nil { - return err - } - return nil -} - -// Preinstall executes operations before installing a RPM package -func (i *RPMPackage) Preinstall() error { - log.Trace("No preinstall commands for RPM packages") - return nil -} - -// Uninstall uninstalls a RPM package -func (i *RPMPackage) Uninstall() error { - log.Trace("No uninstall commands for RPM packages") - return nil -} - -// TARPackage implements operations for a RPM installer -type TARPackage struct { - BasePackage - // optional fields - arch string - artifact string - OS string - OSFlavour string // at this moment, centos or debian - version string -} - -// NewTARPackage creates an instance for the RPM installer -func NewTARPackage(binaryName string, profile string, image string, service string, commitFile string, logFile string) *TARPackage { - return &TARPackage{ - BasePackage: BasePackage{ - binaryName: binaryName, - commitFile: commitFile, - image: image, - logFile: logFile, - profile: profile, - service: service, - }, - } -} - -// Install installs a TAR package -func (i *TARPackage) Install(cfg *FleetConfig) error { - // install the elastic-agent to /usr/bin/elastic-agent using command - binary := fmt.Sprintf("/elastic-agent/%s", i.artifact) - - args := cfg.flags() - - err := runElasticAgentCommand(i.profile, i.image, i.service, binary, "install", args) - if err != nil { - return fmt.Errorf("Failed to install the agent with subcommand: %v", err) - } - - return nil -} - -// InstallCerts installs the certificates for a TAR package, using the right OS package manager -func (i *TARPackage) InstallCerts() error { - if i.OSFlavour == "centos" { - return installCertsForCentos(i.profile, i.image, i.service) - } else if i.OSFlavour == "debian" { - return installCertsForDebian(i.profile, i.image, i.service) - } - - log.WithFields(log.Fields{ - "arch": i.arch, - "OS": i.OS, - "OSFlavour": i.OSFlavour, - }).Debug("Installation of certificates was skipped because of unknown OS flavour") - - return nil -} - -// Postinstall executes operations after installing a TAR package -func (i *TARPackage) Postinstall() error { - log.Trace("No postinstall commands for TAR installer") - return nil -} - -// Preinstall executes operations before installing a TAR package -func (i *TARPackage) Preinstall() error { - err := i.extractPackage([]string{"tar", "-xvf", "/" + i.binaryName}) - if err != nil { - return err - } - - // simplify layout - cmds := [][]string{ - {"rm", "-fr", "/elastic-agent"}, - {"mv", fmt.Sprintf("/%s-%s-%s-%s", i.artifact, i.version, i.OS, i.arch), "/elastic-agent"}, - } - for _, cmd := range cmds { - err = steps.ExecCommandInService(i.profile, i.image, i.service, cmd, profileEnv, false) - if err != nil { - log.WithFields(log.Fields{ - "command": cmd, - "error": err, - "image": i.image, - "service": i.service, - "version": i.version, - }).Error("Could not extract agent package in the box") - - return err - } - } - - return nil -} - -// Uninstall uninstalls a TAR package -func (i *TARPackage) Uninstall() error { - args := []string{"-f"} - - return runElasticAgentCommand(i.profile, i.image, i.service, ElasticAgentProcessName, "uninstall", args) -} - -// WithArch sets the architecture -func (i *TARPackage) WithArch(arch string) *TARPackage { - i.arch = arch - return i -} - -// WithArtifact sets the artifact -func (i *TARPackage) WithArtifact(artifact string) *TARPackage { - i.artifact = artifact - return i -} - -// WithOS sets the OS -func (i *TARPackage) WithOS(OS string) *TARPackage { - i.OS = OS - return i -} - -// WithOSFlavour sets the OS flavour, at this moment centos or debian -func (i *TARPackage) WithOSFlavour(OSFlavour string) *TARPackage { - i.OSFlavour = OSFlavour - return i -} - -// WithVersion sets the version -func (i *TARPackage) WithVersion(version string) *TARPackage { - i.version = version - return i -} - -// getElasticAgentHash uses Elastic Agent's home dir to read the file with agent's build hash -// it will return the first six characters of the hash (short hash) -func getElasticAgentHash(containerName string, commitFile string) (string, error) { - cmd := []string{ - "cat", commitFile, - } - - fullHash, err := docker.ExecCommandIntoContainer(context.Background(), containerName, "root", cmd) - if err != nil { - return "", err - } - - runes := []rune(fullHash) - shortHash := string(runes[0:6]) - - log.WithFields(log.Fields{ - "commitFile": commitFile, - "containerName": containerName, - "hash": fullHash, - "shortHash": shortHash, - }).Debug("Agent build hash found") - - return shortHash, nil -} diff --git a/e2e/_suites/fleet/integrations.go b/e2e/_suites/fleet/integrations.go deleted file mode 100644 index e8627fbc9b..0000000000 --- a/e2e/_suites/fleet/integrations.go +++ /dev/null @@ -1,405 +0,0 @@ -package main - -import ( - "fmt" - "github.com/google/uuid" - "strings" - - "github.com/Jeffail/gabs/v2" - log "github.com/sirupsen/logrus" -) - -// title for the Elastic Endpoint integration in the package registry. -// This value could change depending on the version of the package registry -// We are using the title because the feature files have to be super readable -// and the title is more readable than the name -const elasticEnpointIntegrationTitle = "Endpoint Security" - -// IntegrationPackage used to share information about a integration -type IntegrationPackage struct { - packageConfigID string `json:"packageConfigId"` - Name string `json:"name"` - Title string `json:"title"` - Version string `json:"version"` - json *gabs.Container // json representation of the integration -} - -// Policy is a policy -type Policy struct { - ID string `json:"id,omitempty"` - Name string `json:"name"` - Description string `json:"description"` - Namespace string `json:"namespace"` - Enabled bool `json:"enabled"` - AgentPolicyID string `json:"policy_id"` - OutputID string `json:"output_id"` - Inputs []Input `json:"inputs"` - Package IntegrationPackage `json:"package"` -} - -// Input is a policy input -type Input struct { - Type string `json:"type"` - Enabled bool `json:"enabled"` - Streams []interface{} `json:"streams"` - Vars map[string]Var `json:"vars,omitempty"` -} - -// Var is an input var -type Var struct { - Value interface{} `json:"value"` - Type string `json:"type"` -} - -// addIntegrationToPolicy sends a POST request to Fleet adding an integration to a configuration -func addIntegrationToPolicy(integrationPackage IntegrationPackage, policyID string) (string, error) { - - policy := Policy{ - AgentPolicyID: policyID, - Name: integrationPackage.Name + "-test-name", - Description: integrationPackage.Title + "-test-description", - Namespace: "default", - Enabled: true, - Package: integrationPackage, - Inputs: []Input{}, - } - - if policy.Package.Name == "linux" { - policy.Inputs = []Input{ - { - Type: "linux/metrics", - Enabled: true, - Streams: []interface{}{ - map[string]interface{}{ - "id": "linux/metrics-linux.memory-" + uuid.New().String(), - "enabled": true, - "data_stream": map[string]interface{}{ - "dataset": "linux.memory", - "type": "metrics", - }, - }, - }, - Vars: map[string]Var{ - "period": { - Value: "1s", - Type: "string", - }, - }, - }, - } - } - - body, err := kibanaClient.AddIntegrationToPolicy(policy) - if err != nil { - return "", err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return "", err - } - - integrationConfigurationID := jsonParsed.Path("item.id").Data().(string) - - log.WithFields(log.Fields{ - "policyID": policyID, - "integrationConfigurationID": integrationConfigurationID, - "integration": integrationPackage.Name, - "version": integrationPackage.Version, - }).Info("Integration added to the configuration") - - return integrationConfigurationID, nil -} - -// deleteIntegrationFromPolicy sends a POST request to Fleet deleting an integration from a configuration -func deleteIntegrationFromPolicy(integrationPackage IntegrationPackage, policyID string) error { - _, err := kibanaClient.DeleteIntegrationFromPolicy(integrationPackage.packageConfigID) - if err != nil { - return err - } - - log.WithFields(log.Fields{ - "policyID": policyID, - "integration": integrationPackage.Name, - "packageConfigId": integrationPackage.packageConfigID, - "version": integrationPackage.Version, - }).Info("Integration deleted from the configuration") - - return nil -} - -// getIntegration returns metadata from an integration from Fleet, without the package ID -func getIntegration(packageName string, version string) (IntegrationPackage, error) { - body, err := kibanaClient.GetIntegration(packageName, version) - if err != nil { - return IntegrationPackage{}, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse get response into JSON") - return IntegrationPackage{}, err - } - - response := jsonParsed.Path("response") - integrationPackage := IntegrationPackage{ - Name: response.Path("name").Data().(string), - Title: response.Path("title").Data().(string), - Version: response.Path("latestVersion").Data().(string), - } - - return integrationPackage, nil -} - -// getIntegrationFromAgentPolicy inspects the integrations added to an agent policy, returning the -// a struct representing the package, including the packageID for the integration in the policy -func getIntegrationFromAgentPolicy(packageName string, agentPolicyID string) (IntegrationPackage, error) { - body, err := kibanaClient.GetIntegrationFromAgentPolicy(agentPolicyID) - if err != nil { - return IntegrationPackage{}, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return IntegrationPackage{}, err - } - - packagePolicies := jsonParsed.Path("item.package_policies").Children() - for _, packagePolicy := range packagePolicies { - title := packagePolicy.Path("package.title").Data().(string) - if title == packageName { - integrationPackage := IntegrationPackage{ - packageConfigID: packagePolicy.Path("id").Data().(string), - Name: packagePolicy.Path("package.name").Data().(string), - Title: title, - Version: packagePolicy.Path("package.version").Data().(string), - json: packagePolicy, - } - - log.WithFields(log.Fields{ - "package": integrationPackage, - "policyID": agentPolicyID, - }).Debug("Package policy found in the configuration") - - return integrationPackage, nil - } - } - - return IntegrationPackage{}, fmt.Errorf("%s package policy not found in the configuration", packageName) -} - -// getIntegrationLatestVersion sends a GET request to Fleet for the existing integrations -// checking if the desired integration exists in the package registry. If so, it will -// return name and version (latest) of the integration -func getIntegrationLatestVersion(integrationName string) (string, string, error) { - body, err := kibanaClient.GetIntegrations() - if err != nil { - return "", "", err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return "", "", err - } - - // data streams should contain array of elements - integrations := jsonParsed.Path("response").Children() - - log.WithFields(log.Fields{ - "count": len(integrations), - }).Trace("Integrations retrieved") - - for _, integration := range integrations { - title := integration.Path("title").Data().(string) - if strings.ToLower(title) == strings.ToLower(integrationName) { - name := integration.Path("name").Data().(string) - version := integration.Path("version").Data().(string) - log.WithFields(log.Fields{ - "name": name, - "title": title, - "version": version, - }).Debug("Integration in latest version found") - return name, version, nil - } - } - - return "", "", fmt.Errorf("The %s integration was not found", integrationName) -} - -// getMetadataFromSecurityApp sends a POST request to Endpoint retrieving the metadata that -// is listed in the Security App -func getMetadataFromSecurityApp() (*gabs.Container, error) { - body, err := kibanaClient.GetMetadataFromSecurityApp() - if err != nil { - return nil, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return nil, err - } - - hosts := jsonParsed.Path("hosts") - - log.WithFields(log.Fields{ - "hosts": hosts, - }).Trace("Hosts in the Security App") - - return hosts, nil -} - -// installIntegration sends a POST request to Fleet installing the assets for an integration -func installIntegrationAssets(integration string, version string) (IntegrationPackage, error) { - body, err := kibanaClient.InstallIntegrationAssets(integration, version) - if err != nil { - return IntegrationPackage{}, err - } - - log.WithFields(log.Fields{ - "integration": integration, - "version": version, - }).Info("Assets for the integration where installed") - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse install response into JSON") - return IntegrationPackage{}, err - } - response := jsonParsed.Path("response").Index(0) - - packageConfigID := response.Path("id").Data().(string) - - // get the integration again in the case it's already installed - integrationPackage, err := getIntegration(integration, version) - if err != nil { - return IntegrationPackage{}, err - } - - integrationPackage.packageConfigID = packageConfigID - - return integrationPackage, nil -} - -// isAgentListedInSecurityApp retrieves the hosts from Endpoint to check if a hostname -// is listed in the Security App. For that, we will inspect the metadata, and will iterate -// through the hosts, until we get the proper hostname. -func isAgentListedInSecurityApp(hostName string) (*gabs.Container, error) { - hosts, err := getMetadataFromSecurityApp() - if err != nil { - return nil, err - } - - for _, host := range hosts.Children() { - metadataHostname := host.Path("metadata.host.hostname").Data().(string) - if metadataHostname == hostName { - log.WithFields(log.Fields{ - "hostname": hostName, - }).Debug("Hostname for the agent listed in the Security App") - - return host, nil - } - } - - return nil, nil -} - -// isAgentListedInSecurityAppWithStatus inspects the metadata field for a hostname, obtained from -// the security App. We will check if the status matches the desired status, returning an error -// if the agent is not present in the Security App -func isAgentListedInSecurityAppWithStatus(hostName string, desiredStatus string) (bool, error) { - host, err := isAgentListedInSecurityApp(hostName) - if err != nil { - log.WithFields(log.Fields{ - "hostname": hostName, - "error": err, - }).Error("There was an error getting the agent in the Administration view in the Security app") - return false, err - } - - if host == nil { - return false, fmt.Errorf("The host %s is not listed in the Administration view in the Security App", hostName) - } - - hostStatus := host.Path("host_status").Data().(string) - log.WithFields(log.Fields{ - "desiredStatus": desiredStatus, - "hostname": hostName, - "status": hostStatus, - }).Debug("Hostname for the agent listed with desired status in the Administration view in the Security App") - - return (hostStatus == desiredStatus), nil -} - -// isPolicyResponseListedInSecurityApp sends a POST request to Endpoint to check if a hostname -// is listed in the Security App. For that, we will inspect the metadata, and will iterate -// through the hosts, until we get the policy status, finally checking for the success -// status. -func isPolicyResponseListedInSecurityApp(agentID string) (bool, error) { - hosts, err := getMetadataFromSecurityApp() - if err != nil { - return false, err - } - - for _, host := range hosts.Children() { - metadataAgentID := host.Path("metadata.elastic.agent.id").Data().(string) - name := host.Path("metadata.Endpoint.policy.applied.name").Data().(string) - status := host.Path("metadata.Endpoint.policy.applied.status").Data().(string) - if metadataAgentID == agentID { - log.WithFields(log.Fields{ - "agentID": agentID, - "name": name, - "status": status, - }).Debug("Policy response for the agent listed in the Security App") - - return (status == "success"), nil - } - } - - return false, nil -} - -// updateIntegrationPackageConfig sends a PUT request to Fleet updating integration -// configuration -func updateIntegrationPackageConfig(packageConfigID string, payload string) (*gabs.Container, error) { - body, err := kibanaClient.UpdateIntegrationPackageConfig(packageConfigID, payload) - if err != nil { - return nil, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return nil, err - } - - log.WithFields(log.Fields{ - "policyID": packageConfigID, - }).Debug("Configuration for the integration was updated") - - return jsonParsed, nil -} diff --git a/e2e/_suites/fleet/services.go b/e2e/_suites/fleet/services.go deleted file mode 100644 index a0ada0e9e5..0000000000 --- a/e2e/_suites/fleet/services.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "context" - "strings" - - "github.com/elastic/e2e-testing/cli/docker" - "github.com/elastic/e2e-testing/e2e" - "github.com/elastic/e2e-testing/e2e/steps" - log "github.com/sirupsen/logrus" -) - -// ElasticAgentInstaller represents how to install an agent, depending of the box type -type ElasticAgentInstaller struct { - artifactArch string // architecture of the artifact - artifactExtension string // extension of the artifact - artifactName string // name of the artifact - artifactOS string // OS of the artifact - artifactVersion string // version of the artifact - binaryPath string // the local path where the agent for the binary is located - EnrollFn func(cfg *FleetConfig) error - image string // docker image - installerType string - InstallFn func(cfg *FleetConfig) error - InstallCertsFn func() error - name string // the name for the binary - processName string // name of the elastic-agent process - profile string // parent docker-compose file - PostInstallFn func() error - PreInstallFn func() error - PrintLogsFn func(containerName string) error - service string // name of the service - tag string // docker tag - UninstallFn func() error - workingDir string // location of the application -} - -// listElasticAgentWorkingDirContent list Elastic Agent's working dir content -func (i *ElasticAgentInstaller) listElasticAgentWorkingDirContent(containerName string) (string, error) { - cmd := []string{ - "ls", "-l", i.workingDir, - } - - content, err := docker.ExecCommandIntoContainer(context.Background(), containerName, "root", cmd) - if err != nil { - return "", err - } - - log.WithFields(log.Fields{ - "workingDir": i.workingDir, - "containerName": containerName, - "content": content, - }).Debug("Agent working dir content") - - return content, nil -} - -// runElasticAgentCommand runs a command for the elastic-agent -func runElasticAgentCommand(profile string, image string, service string, process string, command string, arguments []string) error { - return runElasticAgentCommandWithEnv(profile, image, service, process, command, arguments, map[string]string{}) -} - -// runElasticAgentCommandWithEnv runs a command with env for the elastic-agent -func runElasticAgentCommandWithEnv(profile string, image string, service string, process string, command string, arguments []string, env map[string]string) error { - cmds := []string{ - process, command, - } - cmds = append(cmds, arguments...) - - // append passed env to profile env - for k, v := range env { - profileEnv[k] = v - } - - err := steps.ExecCommandInService(profile, image, service, cmds, profileEnv, false) - if err != nil { - log.WithFields(log.Fields{ - "command": cmds, - "profile": profile, - "service": service, - "error": err, - }).Error("Could not run agent command in the box") - - return err - } - - return nil -} - -// downloadAgentBinary it downloads the binary and stores the location of the downloaded file -// into the installer struct, to be used else where -// If the environment variable ELASTIC_AGENT_DOWNLOAD_URL exists, then the artifact to be downloaded will -// be defined by that value -// Else if the environment variable BEATS_LOCAL_PATH is set, then the artifact -// to be used will be defined by the local snapshot produced by the local build. -// Else, if the environment variable BEATS_USE_CI_SNAPSHOTS is set, then the artifact -// to be downloaded will be defined by the latest snapshot produced by the Beats CI. -func downloadAgentBinary(artifactName string, artifact string, version string) (string, error) { - imagePath, err := e2e.FetchBeatsBinary(artifactName, artifact, version, agentVersionBase, timeoutFactor, true) - if err != nil { - return "", err - } - - return imagePath, nil -} - -// GetElasticAgentInstaller returns an installer from a docker image -func GetElasticAgentInstaller(image string, installerType string, version string) ElasticAgentInstaller { - log.WithFields(log.Fields{ - "image": image, - "installer": installerType, - }).Debug("Configuring installer for the agent") - - var installer ElasticAgentInstaller - var err error - if "centos" == image && "tar" == installerType { - installer, err = newTarInstaller("centos", "latest", version) - } else if "centos" == image && "systemd" == installerType { - installer, err = newCentosInstaller("centos", "latest", version) - } else if "debian" == image && "tar" == installerType { - installer, err = newTarInstaller("debian", "stretch", version) - } else if "debian" == image && "systemd" == installerType { - installer, err = newDebianInstaller("debian", "stretch", version) - } else if "docker" == image && "default" == installerType { - installer, err = newDockerInstaller(false, version) - } else if "docker" == image && "ubi8" == installerType { - installer, err = newDockerInstaller(true, version) - } else { - log.WithFields(log.Fields{ - "image": image, - "installer": installerType, - }).Fatal("Sorry, we currently do not support this installer") - return ElasticAgentInstaller{} - } - - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "image": image, - "installer": installerType, - }).Fatal("Sorry, we could not download the installer") - } - return installer -} - -func isSystemdBased(image string) bool { - return strings.HasSuffix(image, "-systemd") -} - -// newCentosInstaller returns an instance of the Centos installer for a specific version -func newCentosInstaller(image string, tag string, version string) (ElasticAgentInstaller, error) { - image = image + "-systemd" // we want to consume systemd boxes - service := image - profile := FleetProfileName - - // extract the agent in the box, as it's mounted as a volume - artifact := "elastic-agent" - os := "linux" - arch := "x86_64" - extension := "rpm" - - binaryName := e2e.BuildArtifactName(artifact, version, agentVersionBase, os, arch, extension, false) - binaryPath, err := downloadAgentBinary(binaryName, artifact, version) - if err != nil { - log.WithFields(log.Fields{ - "artifact": artifact, - "version": version, - "os": os, - "arch": arch, - "extension": extension, - "error": err, - }).Error("Could not download the binary for the agent") - return ElasticAgentInstaller{}, err - } - - enrollFn := func(cfg *FleetConfig) error { - return runElasticAgentCommand(profile, image, service, ElasticAgentProcessName, "enroll", cfg.flags()) - } - - workingDir := "/var/lib/elastic-agent" - binDir := workingDir + "/data/elastic-agent-%s/" - - commitFile := "/etc/elastic-agent/.elastic-agent.active.commit" - - logsDir := binDir + "logs/" - logFileName := "elastic-agent-json.log" - logFile := logsDir + "/" + logFileName - - installerPackage := NewRPMPackage(binaryName, profile, image, service, commitFile, logFile) - - return ElasticAgentInstaller{ - artifactArch: arch, - artifactExtension: extension, - artifactName: artifact, - artifactOS: os, - artifactVersion: version, - binaryPath: binaryPath, - EnrollFn: enrollFn, - image: image, - InstallFn: installerPackage.Install, - InstallCertsFn: installerPackage.InstallCerts, - installerType: "rpm", - name: binaryName, - PostInstallFn: installerPackage.Postinstall, - PreInstallFn: installerPackage.Preinstall, - PrintLogsFn: installerPackage.PrintLogs, - processName: ElasticAgentProcessName, - profile: profile, - service: service, - tag: tag, - UninstallFn: installerPackage.Uninstall, - workingDir: workingDir, - }, nil -} - -// newDebianInstaller returns an instance of the Debian installer for a specific version -func newDebianInstaller(image string, tag string, version string) (ElasticAgentInstaller, error) { - image = image + "-systemd" // we want to consume systemd boxes - service := image - profile := FleetProfileName - - // extract the agent in the box, as it's mounted as a volume - artifact := "elastic-agent" - os := "linux" - arch := "amd64" - extension := "deb" - - binaryName := e2e.BuildArtifactName(artifact, version, agentVersionBase, os, arch, extension, false) - binaryPath, err := downloadAgentBinary(binaryName, artifact, version) - if err != nil { - log.WithFields(log.Fields{ - "artifact": artifact, - "version": version, - "os": os, - "arch": arch, - "extension": extension, - "error": err, - }).Error("Could not download the binary for the agent") - return ElasticAgentInstaller{}, err - } - - enrollFn := func(cfg *FleetConfig) error { - return runElasticAgentCommand(profile, image, service, ElasticAgentProcessName, "enroll", cfg.flags()) - } - - workingDir := "/var/lib/elastic-agent" - binDir := workingDir + "/data/elastic-agent-%s/" - - commitFile := "/etc/elastic-agent/.elastic-agent.active.commit" - - logsDir := binDir + "logs/" - logFileName := "elastic-agent-json.log" - logFile := logsDir + "/" + logFileName - - installerPackage := NewDEBPackage(binaryName, profile, image, service, commitFile, logFile) - - return ElasticAgentInstaller{ - artifactArch: arch, - artifactExtension: extension, - artifactName: artifact, - artifactOS: os, - artifactVersion: version, - binaryPath: binaryPath, - EnrollFn: enrollFn, - image: image, - InstallFn: installerPackage.Install, - InstallCertsFn: installerPackage.InstallCerts, - installerType: "deb", - name: binaryName, - PostInstallFn: installerPackage.Postinstall, - PreInstallFn: installerPackage.Preinstall, - PrintLogsFn: installerPackage.PrintLogs, - processName: ElasticAgentProcessName, - profile: profile, - service: service, - tag: tag, - UninstallFn: installerPackage.Uninstall, - workingDir: workingDir, - }, nil -} - -// newDockerInstaller returns an instance of the Docker installer -func newDockerInstaller(ubi8 bool, version string) (ElasticAgentInstaller, error) { - image := "elastic-agent" - service := image - profile := FleetProfileName - - // extract the agent in the box, as it's mounted as a volume - artifact := "elastic-agent" - - artifactName := artifact - if ubi8 { - artifactName = "elastic-agent-ubi8" - image = "elastic-agent-ubi8" - } - - os := "linux" - arch := "amd64" - extension := "tar.gz" - - binaryName := e2e.BuildArtifactName(artifactName, version, agentVersionBase, os, arch, extension, true) - binaryPath, err := downloadAgentBinary(binaryName, artifact, version) - if err != nil { - log.WithFields(log.Fields{ - "artifact": artifact, - "version": version, - "os": os, - "arch": arch, - "extension": extension, - "error": err, - }).Error("Could not download the binary for the agent") - return ElasticAgentInstaller{}, err - } - - homeDir := "/usr/share/elastic-agent" - workingDir := homeDir - binDir := homeDir + "/data/elastic-agent-%s/" - - commitFile := homeDir + ".elastic-agent.active.commit" - - logsDir := binDir + "logs/" - logFileName := "elastic-agent-json.log" - logFile := logsDir + "/" + logFileName - - enrollFn := func(cfg *FleetConfig) error { - return nil - } - - installerPackage := NewDockerPackage(binaryName, profile, artifactName, service, binaryPath, ubi8, commitFile, logFile). - WithArch(arch). - WithArtifact(artifactName). - WithOS(os). - WithVersion(version) - - return ElasticAgentInstaller{ - artifactArch: arch, - artifactExtension: extension, - artifactName: artifact, - artifactOS: os, - artifactVersion: version, - binaryPath: binaryPath, - EnrollFn: enrollFn, - image: image, - InstallFn: installerPackage.Install, - InstallCertsFn: installerPackage.InstallCerts, - installerType: "docker", - name: binaryName, - PostInstallFn: installerPackage.Postinstall, - PreInstallFn: installerPackage.Preinstall, - PrintLogsFn: installerPackage.PrintLogs, - processName: ElasticAgentProcessName, - profile: profile, - service: service, - tag: version, - UninstallFn: installerPackage.Uninstall, - workingDir: workingDir, - }, nil -} - -// newTarInstaller returns an instance of the Debian installer for a specific version -func newTarInstaller(image string, tag string, version string) (ElasticAgentInstaller, error) { - dockerImage := image + "-systemd" // we want to consume systemd boxes - service := dockerImage - profile := FleetProfileName - - // extract the agent in the box, as it's mounted as a volume - artifact := "elastic-agent" - os := "linux" - arch := "x86_64" - extension := "tar.gz" - - binaryName := e2e.BuildArtifactName(artifact, version, agentVersionBase, os, arch, extension, false) - binaryPath, err := downloadAgentBinary(binaryName, artifact, version) - if err != nil { - log.WithFields(log.Fields{ - "artifact": artifact, - "version": version, - "os": os, - "arch": arch, - "extension": extension, - "error": err, - }).Error("Could not download the binary for the agent") - return ElasticAgentInstaller{}, err - } - - workingDir := "/opt/Elastic/Agent" - - commitFile := "/elastic-agent/.elastic-agent.active.commit" - - logsDir := workingDir + "/data/elastic-agent-%s/logs/" - logFileName := "elastic-agent-json.log" - logFile := logsDir + "/" + logFileName - - enrollFn := func(cfg *FleetConfig) error { - return runElasticAgentCommand(profile, dockerImage, service, ElasticAgentProcessName, "enroll", cfg.flags()) - } - - // - installerPackage := NewTARPackage(binaryName, profile, dockerImage, service, commitFile, logFile). - WithArch(arch). - WithArtifact(artifact). - WithOS(os). - WithOSFlavour(image). - WithVersion(e2e.CheckPRVersion(version, agentVersionBase)) // sanitize version - - return ElasticAgentInstaller{ - artifactArch: arch, - artifactExtension: extension, - artifactName: artifact, - artifactOS: os, - artifactVersion: version, - binaryPath: binaryPath, - EnrollFn: enrollFn, - image: dockerImage, - InstallFn: installerPackage.Install, - InstallCertsFn: installerPackage.InstallCerts, - installerType: "tar", - name: binaryName, - PostInstallFn: installerPackage.Postinstall, - PreInstallFn: installerPackage.Preinstall, - PrintLogsFn: installerPackage.PrintLogs, - processName: ElasticAgentProcessName, - profile: profile, - service: service, - tag: tag, - UninstallFn: installerPackage.Uninstall, - workingDir: workingDir, - }, nil -} - -func systemctlRun(profile string, image string, service string, command string) error { - cmd := []string{"systemctl", command, ElasticAgentProcessName} - err := steps.ExecCommandInService(profile, image, service, cmd, profileEnv, false) - if err != nil { - log.WithFields(log.Fields{ - "command": cmd, - "error": err, - "service": service, - }).Errorf("Could not %s the service", command) - - return err - } - - log.WithFields(log.Fields{ - "command": cmd, - "service": service, - }).Trace("Systemctl executed") - return nil -} diff --git a/e2e/_suites/fleet/stand-alone.go b/e2e/_suites/fleet/stand-alone.go index 7eda9defdf..dd1fa46dc1 100644 --- a/e2e/_suites/fleet/stand-alone.go +++ b/e2e/_suites/fleet/stand-alone.go @@ -11,12 +11,17 @@ import ( "strings" "time" + "github.com/cenkalti/backoff/v4" "github.com/cucumber/godog" - "github.com/elastic/e2e-testing/cli/docker" - "github.com/elastic/e2e-testing/cli/services" - shell "github.com/elastic/e2e-testing/cli/shell" - "github.com/elastic/e2e-testing/e2e" - "github.com/elastic/e2e-testing/e2e/steps" + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/compose" + "github.com/elastic/e2e-testing/internal/docker" + "github.com/elastic/e2e-testing/internal/elasticsearch" + "github.com/elastic/e2e-testing/internal/installer" + "github.com/elastic/e2e-testing/internal/kibana" + "github.com/elastic/e2e-testing/internal/shell" + "github.com/elastic/e2e-testing/internal/utils" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -29,19 +34,21 @@ type StandAloneTestSuite struct { // date controls for queries AgentStoppedDate time.Time RuntimeDependenciesStartDate time.Time + kibanaClient *kibana.Client } // afterScenario destroys the state created by a scenario func (sats *StandAloneTestSuite) afterScenario() { - serviceManager := services.NewServiceManager() - serviceName := ElasticAgentServiceName + serviceManager := compose.NewServiceManager() + serviceName := common.ElasticAgentServiceName if log.IsLevelEnabled(log.DebugLevel) { _ = sats.getContainerLogs() } + developerMode := shell.GetEnvBool("DEVELOPER_MODE") if !developerMode { - _ = serviceManager.RemoveServicesFromCompose(context.Background(), FleetProfileName, []string{serviceName}, profileEnv) + _ = serviceManager.RemoveServicesFromCompose(context.Background(), common.FleetProfileName, []string{serviceName}, common.ProfileEnv) } else { log.WithField("service", serviceName).Info("Because we are running in development mode, the service won't be stopped") } @@ -68,11 +75,33 @@ func (sats *StandAloneTestSuite) contributeSteps(s *godog.ScenarioContext) { s.Step(`^there is new data in the index from agent$`, sats.thereIsNewDataInTheIndexFromAgent) s.Step(`^the "([^"]*)" docker container is stopped$`, sats.theDockerContainerIsStopped) s.Step(`^there is no new data in the index after agent shuts down$`, sats.thereIsNoNewDataInTheIndexAfterAgentShutsDown) - s.Step(`^the stand-alone agent is listed in Fleet as "([^"]*)"$`, sats.theAgentIsListedInFleetWithStatus) + s.Step(`^the stand-alone agent is listed in Fleet as "([^"]*)"$`, sats.theStandaloneAgentIsListedInFleetWithStatus) } -func (sats *StandAloneTestSuite) theAgentIsListedInFleetWithStatus(desiredStatus string) error { - return theAgentIsListedInFleetWithStatus(desiredStatus, sats.Hostname) +func (sats *StandAloneTestSuite) theStandaloneAgentIsListedInFleetWithStatus(desiredStatus string) error { + waitForAgents := func() error { + agents, err := sats.kibanaClient.ListAgents() + if err != nil { + return err + } + + if len(agents) == 0 { + return errors.New("No agents found") + } + + agentZero := agents[0] + hostname := agentZero.LocalMetadata.Host.HostName + + return theAgentIsListedInFleetWithStatus(desiredStatus, hostname) + } + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute * 2 + exp := common.GetExponentialBackOff(maxTimeout) + + err := backoff.Retry(waitForAgents, exp) + if err != nil { + return err + } + return nil } func (sats *StandAloneTestSuite) aStandaloneAgentIsDeployedWithFleetServerMode(image string) error { @@ -87,7 +116,7 @@ func (sats *StandAloneTestSuite) startAgent(image string, env map[string]string) log.Trace("Deploying an agent to Fleet") - dockerImageTag := agentVersion + dockerImageTag := common.AgentVersion useCISnapshots := shell.GetEnvBool("BEATS_USE_CI_SNAPSHOTS") beatsLocalPath := shell.GetEnv("BEATS_LOCAL_PATH", "") @@ -95,48 +124,62 @@ func (sats *StandAloneTestSuite) startAgent(image string, env map[string]string) // load the docker images that were already: // a. downloaded from the GCP bucket // b. fetched from the local beats binaries - dockerInstaller := GetElasticAgentInstaller("docker", image, agentVersion) + dockerInstaller := installer.GetElasticAgentInstaller("docker", image, common.AgentVersion) dockerInstaller.PreInstallFn() dockerImageTag += "-amd64" } +<<<<<<< HEAD configurationFilePath, err := steps.FetchBeatConfiguration(true, "elastic-agent", "elastic-agent.docker.yml") if err != nil { return err } serviceManager := services.NewServiceManager() +======= + serviceManager := compose.NewServiceManager() +>>>>>>> 5f596709... v2 refactor (#1008) - profileEnv["elasticAgentDockerImageSuffix"] = "" + common.ProfileEnv["elasticAgentDockerImageSuffix"] = "" if image != "default" { - profileEnv["elasticAgentDockerImageSuffix"] = "-" + image + common.ProfileEnv["elasticAgentDockerImageSuffix"] = "-" + image } - profileEnv["elasticAgentDockerNamespace"] = e2e.GetDockerNamespaceEnvVar("beats") + common.ProfileEnv["elasticAgentDockerNamespace"] = utils.GetDockerNamespaceEnvVar("beats") - containerName := fmt.Sprintf("%s_%s_%d", FleetProfileName, ElasticAgentServiceName, 1) + containerName := fmt.Sprintf("%s_%s_%d", common.FleetProfileName, common.ElasticAgentServiceName, 1) +<<<<<<< HEAD sats.AgentConfigFilePath = configurationFilePath profileEnv["elasticAgentContainerName"] = containerName profileEnv["elasticAgentConfigFile"] = sats.AgentConfigFilePath profileEnv["elasticAgentPlatform"] = "linux/amd64" profileEnv["elasticAgentTag"] = dockerImageTag +======= + common.ProfileEnv["elasticAgentContainerName"] = containerName + common.ProfileEnv["elasticAgentPlatform"] = "linux/amd64" + common.ProfileEnv["elasticAgentTag"] = dockerImageTag +>>>>>>> 5f596709... v2 refactor (#1008) for k, v := range env { - profileEnv[k] = v + common.ProfileEnv[k] = v } +<<<<<<< HEAD err = serviceManager.AddServicesToCompose(context.Background(), FleetProfileName, []string{ElasticAgentServiceName}, profileEnv) +======= + err := serviceManager.AddServicesToCompose(context.Background(), common.FleetProfileName, []string{common.ElasticAgentServiceName}, common.ProfileEnv) +>>>>>>> 5f596709... v2 refactor (#1008) if err != nil { log.Error("Could not deploy the elastic-agent") return err } // get container hostname once - hostname, err := steps.GetContainerHostname(containerName) + hostname, err := docker.GetContainerHostname(containerName) if err != nil { return err } @@ -154,16 +197,16 @@ func (sats *StandAloneTestSuite) startAgent(image string, env map[string]string) } func (sats *StandAloneTestSuite) getContainerLogs() error { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() - profile := FleetProfileName - serviceName := ElasticAgentServiceName + profile := common.FleetProfileName + serviceName := common.ElasticAgentServiceName composes := []string{ profile, // profile name serviceName, // agent service } - err := serviceManager.RunCommand(profile, composes, []string{"logs", serviceName}, profileEnv) + err := serviceManager.RunCommand(profile, composes, []string{"logs", serviceName}, common.ProfileEnv) if err != nil { log.WithFields(log.Fields{ "error": err, @@ -210,7 +253,7 @@ func (sats *StandAloneTestSuite) installTestTools(containerName string) error { } func (sats *StandAloneTestSuite) thereIsNewDataInTheIndexFromAgent() error { - maxTimeout := time.Duration(timeoutFactor) * time.Minute * 2 + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute * 2 minimumHitsCount := 50 result, err := searchAgentData(sats.Hostname, sats.RuntimeDependenciesStartDate, minimumHitsCount, maxTimeout) @@ -220,13 +263,13 @@ func (sats *StandAloneTestSuite) thereIsNewDataInTheIndexFromAgent() error { log.Tracef("Search result: %v", result) - return e2e.AssertHitsArePresent(result) + return elasticsearch.AssertHitsArePresent(result) } func (sats *StandAloneTestSuite) theDockerContainerIsStopped(serviceName string) error { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() - err := serviceManager.RemoveServicesFromCompose(context.Background(), FleetProfileName, []string{serviceName}, profileEnv) + err := serviceManager.RemoveServicesFromCompose(context.Background(), common.FleetProfileName, []string{serviceName}, common.ProfileEnv) if err != nil { return err } @@ -251,10 +294,10 @@ func (sats *StandAloneTestSuite) thereIsNoNewDataInTheIndexAfterAgentShutsDown() return nil } - return e2e.AssertHitsAreNotPresent(result) + return elasticsearch.AssertHitsAreNotPresent(result) } -func searchAgentData(hostname string, startDate time.Time, minimumHitsCount int, maxTimeout time.Duration) (e2e.SearchResult, error) { +func searchAgentData(hostname string, startDate time.Time, minimumHitsCount int, maxTimeout time.Duration) (elasticsearch.SearchResult, error) { timezone := "America/New_York" esQuery := map[string]interface{}{ @@ -331,11 +374,11 @@ func searchAgentData(hostname string, startDate time.Time, minimumHitsCount int, indexName := "logs-elastic_agent-default" - result, err := e2e.WaitForNumberOfHits(context.Background(), indexName, esQuery, minimumHitsCount, maxTimeout) + result, err := elasticsearch.WaitForNumberOfHits(context.Background(), indexName, esQuery, minimumHitsCount, maxTimeout) if err != nil { log.WithFields(log.Fields{ "error": err, - }).Warn(e2e.WaitForIndices()) + }).Warn(elasticsearch.WaitForIndices()) } return result, err diff --git a/e2e/_suites/fleet/world.go b/e2e/_suites/fleet/world.go index 375ac61662..7cd2fe5a62 100644 --- a/e2e/_suites/fleet/world.go +++ b/e2e/_suites/fleet/world.go @@ -7,10 +7,11 @@ package main import ( "fmt" - "github.com/elastic/e2e-testing/cli/services" - "github.com/elastic/e2e-testing/e2e/steps" + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/docker" ) +<<<<<<< HEAD // developerMode tears down the backend services (ES, Kibana, Package Registry) // after a test suite. This is the desired behavior, but when developing, we maybe want to keep // them running to speed up the development cycle. @@ -57,6 +58,8 @@ const kibanaBaseURL = "http://localhost:5601" var kibanaClient *services.KibanaClient +======= +>>>>>>> 5f596709... v2 refactor (#1008) // IngestManagerTestSuite represents a test suite, holding references to the pieces needed to run the tests type IngestManagerTestSuite struct { Fleet *FleetTestSuite @@ -64,13 +67,13 @@ type IngestManagerTestSuite struct { } func (imts *IngestManagerTestSuite) processStateOnTheHost(process string, state string) error { - profile := FleetProfileName - serviceName := ElasticAgentServiceName + profile := common.FleetProfileName + serviceName := common.ElasticAgentServiceName containerName := fmt.Sprintf("%s_%s_%s_%d", profile, imts.Fleet.Image+"-systemd", serviceName, 1) if imts.StandAlone.Hostname != "" { containerName = fmt.Sprintf("%s_%s_%d", profile, serviceName, 1) } - return steps.CheckProcessStateOnTheHost(containerName, process, state, timeoutFactor) + return docker.CheckProcessStateOnTheHost(containerName, process, state, common.TimeoutFactor) } diff --git a/e2e/_suites/helm/helm_charts_test.go b/e2e/_suites/helm/helm_charts_test.go index 53eacd3fb8..e1f5dcdea7 100644 --- a/e2e/_suites/helm/helm_charts_test.go +++ b/e2e/_suites/helm/helm_charts_test.go @@ -13,11 +13,13 @@ import ( "github.com/Jeffail/gabs/v2" "github.com/cenkalti/backoff/v4" "github.com/elastic/e2e-testing/cli/config" - "github.com/elastic/e2e-testing/cli/services" - k8s "github.com/elastic/e2e-testing/cli/services" - shell "github.com/elastic/e2e-testing/cli/shell" - "github.com/elastic/e2e-testing/e2e" "github.com/elastic/e2e-testing/e2e/steps" + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/compose" + "github.com/elastic/e2e-testing/internal/helm" + "github.com/elastic/e2e-testing/internal/kubectl" + "github.com/elastic/e2e-testing/internal/shell" + "github.com/elastic/e2e-testing/internal/utils" "go.elastic.co/apm" "github.com/cucumber/godog" @@ -33,14 +35,14 @@ var developerMode = false var elasticAPMActive = false -var helm k8s.HelmManager +var helmManager helm.Manager // timeoutFactor a multiplier for the max timeout when doing backoff retries. // It can be overriden by TIMEOUT_FACTOR env var var timeoutFactor = 2 //nolint:unused -var kubectl k8s.Kubectl +var kubectlClient kubectl.Kubectl // helmVersion represents the default version used for Helm var helmVersion = "3.x" @@ -81,7 +83,7 @@ func setupSuite() { timeoutFactor = shell.GetEnvInteger("TIMEOUT_FACTOR", timeoutFactor) stackVersion = shell.GetEnv("STACK_VERSION", stackVersion) - v, err := e2e.GetElasticArtifactVersion(stackVersion) + v, err := utils.GetElasticArtifactVersion(stackVersion) if err != nil { log.WithFields(log.Fields{ "error": err, @@ -90,11 +92,12 @@ func setupSuite() { } stackVersion = v - h, err := k8s.HelmFactory(helmVersion) + h, err := helm.Factory(helmVersion) if err != nil { log.Fatalf("Helm could not be initialised: %v", err) } - helm = h + + helmManager = h testSuite = HelmChartTestSuite{ ClusterName: "helm-charts-test-suite", @@ -132,7 +135,7 @@ func (ts *HelmChartTestSuite) aClusterIsRunning() error { } func (ts *HelmChartTestSuite) addElasticRepo(ctx context.Context) error { - err := helm.AddRepo(ctx, "elastic", "https://helm.elastic.co") + err := helmManager.AddRepo(ctx, "elastic", "https://helm.elastic.co") if err != nil { log.WithField("error", err).Error("Could not add Elastic Helm repo") } @@ -143,7 +146,7 @@ func (ts *HelmChartTestSuite) aResourceContainsTheKey(resource string, key strin lowerResource := strings.ToLower(resource) escapedKey := strings.ReplaceAll(key, ".", `\.`) - output, err := kubectl.Run(ts.currentContext, "get", lowerResource, ts.getResourceName(resource), "-o", `jsonpath="{.data['`+escapedKey+`']}"`) + output, err := kubectlClient.Run(ts.currentContext, "get", lowerResource, ts.getResourceName(resource), "-o", `jsonpath="{.data['`+escapedKey+`']}"`) if err != nil { return err } @@ -162,7 +165,7 @@ func (ts *HelmChartTestSuite) aResourceContainsTheKey(resource string, key strin func (ts *HelmChartTestSuite) aResourceManagesRBAC(resource string) error { lowerResource := strings.ToLower(resource) - output, err := kubectl.Run(ts.currentContext, "get", lowerResource, ts.getResourceName(resource), "-o", `jsonpath="'{.metadata.labels.chart}'"`) + output, err := kubectlClient.Run(ts.currentContext, "get", lowerResource, ts.getResourceName(resource), "-o", `jsonpath="'{.metadata.labels.chart}'"`) if err != nil { return err } @@ -179,18 +182,18 @@ func (ts *HelmChartTestSuite) aResourceManagesRBAC(resource string) error { } func (ts *HelmChartTestSuite) aResourceWillExposePods(resourceType string) error { - selector, err := kubectl.GetResourceSelector(ts.currentContext, "deployment", ts.Name+"-"+ts.Name) + selector, err := kubectlClient.GetResourceSelector(ts.currentContext, "deployment", ts.Name+"-"+ts.Name) if err != nil { return err } maxTimeout := time.Duration(timeoutFactor) * time.Minute - exp := e2e.GetExponentialBackOff(maxTimeout) + exp := common.GetExponentialBackOff(maxTimeout) retryCount := 1 checkEndpointsFn := func() error { - output, err := kubectl.GetStringResourcesBySelector(ts.currentContext, "endpoints", selector) + output, err := kubectlClient.GetStringResourcesBySelector(ts.currentContext, "endpoints", selector) if err != nil { log.WithFields(log.Fields{ "elapsedTime": exp.GetElapsedTime(), @@ -254,7 +257,7 @@ func (ts *HelmChartTestSuite) aResourceWillExposePods(resourceType string) error } func (ts *HelmChartTestSuite) aResourceWillManagePods(resourceType string) error { - selector, err := kubectl.GetResourceSelector(ts.currentContext, "deployment", ts.Name+"-"+ts.Name) + selector, err := kubectlClient.GetResourceSelector(ts.currentContext, "deployment", ts.Name+"-"+ts.Name) if err != nil { return err } @@ -273,7 +276,7 @@ func (ts *HelmChartTestSuite) aResourceWillManagePods(resourceType string) error } func (ts *HelmChartTestSuite) checkResources(resourceType, selector string, min int) ([]interface{}, error) { - resources, err := kubectl.GetResourcesBySelector(ts.currentContext, resourceType, selector) + resources, err := kubectlClient.GetResourcesBySelector(ts.currentContext, resourceType, selector) if err != nil { return nil, err } @@ -315,7 +318,7 @@ func (ts *HelmChartTestSuite) createCluster(ctx context.Context, k8sVersion stri } func (ts *HelmChartTestSuite) deleteChart() { - err := helm.DeleteChart(ts.currentContext, ts.Name) + err := helmManager.DeleteChart(ts.currentContext, ts.Name) if err != nil { log.WithFields(log.Fields{ "chart": ts.Name, @@ -364,23 +367,23 @@ func (ts *HelmChartTestSuite) getPodName() string { // getResourceName returns the name of the service, in lowercase, based on the k8s resource func (ts *HelmChartTestSuite) getResourceName(resource string) string { - if resource == k8s.ResourceTypes.ClusterRole { + if resource == kubectl.ResourceTypes.ClusterRole { return strings.ToLower(ts.Name + "-" + ts.Name + "-cluster-role") - } else if resource == k8s.ResourceTypes.ClusterRoleBinding { + } else if resource == kubectl.ResourceTypes.ClusterRoleBinding { return strings.ToLower(ts.Name + "-" + ts.Name + "-cluster-role-binding") - } else if resource == k8s.ResourceTypes.ConfigMap { + } else if resource == kubectl.ResourceTypes.ConfigMap { if ts.Name == "filebeat" || ts.Name == "metricbeat" { return strings.ToLower(ts.Name + "-" + ts.Name + "-daemonset-config") } return strings.ToLower(ts.Name + "-" + ts.Name + "-config") - } else if resource == k8s.ResourceTypes.Daemonset { + } else if resource == kubectl.ResourceTypes.Daemonset { return strings.ToLower(ts.Name + "-" + ts.Name) - } else if resource == k8s.ResourceTypes.Deployment { + } else if resource == kubectl.ResourceTypes.Deployment { if ts.Name == "metricbeat" { return strings.ToLower(ts.Name + "-" + ts.Name + "-metrics") } return strings.ToLower(ts.Name + "-" + ts.Name) - } else if resource == k8s.ResourceTypes.ServiceAccount { + } else if resource == kubectl.ResourceTypes.ServiceAccount { return strings.ToLower(ts.Name + "-" + ts.Name) } @@ -400,7 +403,7 @@ func (ts *HelmChartTestSuite) install(ctx context.Context, chart string) error { defer span.End() // Rancher Local Path Provisioner and local-path storage class for Elasticsearch volumes - _, err := kubectl.Run(ctx, "apply", "-f", "https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml") + _, err := kubectlClient.Run(ctx, "apply", "-f", "https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml") if err != nil { log.Errorf("Could not apply Rancher Local Path Provisioner: %v", err) return err @@ -409,13 +412,13 @@ func (ts *HelmChartTestSuite) install(ctx context.Context, chart string) error { "chart": ts.Name, }).Info("Rancher Local Path Provisioner and local-path storage class for Elasticsearch volumes installed") - maxTimeout := timeoutFactor * 100 + maxTimeout := common.TimeoutFactor * 100 log.Debug("Applying workaround to use Rancher's local-path storage class for Elasticsearch volumes") flags = []string{"--wait", fmt.Sprintf("--timeout=%ds", maxTimeout), "--values", "https://raw.githubusercontent.com/elastic/helm-charts/master/elasticsearch/examples/kubernetes-kind/values.yaml"} } - return helm.InstallChart(ctx, ts.Name, elasticChart, ts.Version, flags) + return helmManager.InstallChart(ctx, ts.Name, elasticChart, ts.Version, flags) } func (ts *HelmChartTestSuite) installRuntimeDependencies(ctx context.Context, dependencies ...string) error { @@ -435,7 +438,7 @@ func (ts *HelmChartTestSuite) installRuntimeDependencies(ctx context.Context, de } func (ts *HelmChartTestSuite) podsManagedByDaemonSet() error { - output, err := kubectl.Run(ts.currentContext, "get", "daemonset", "--namespace=default", "-l", "app="+ts.Name+"-"+ts.Name, "-o", "jsonpath='{.items[0].metadata.labels.chart}'") + output, err := kubectlClient.Run(ts.currentContext, "get", "daemonset", "--namespace=default", "-l", "app="+ts.Name+"-"+ts.Name, "-o", "jsonpath='{.items[0].metadata.labels.chart}'") if err != nil { return err } @@ -452,7 +455,7 @@ func (ts *HelmChartTestSuite) podsManagedByDaemonSet() error { } func (ts *HelmChartTestSuite) resourceConstraintsAreApplied(constraint string) error { - output, err := kubectl.Run(ts.currentContext, "get", "pods", "-l", "app="+ts.getPodName(), "-o", "jsonpath='{.items[0].spec.containers[0].resources."+constraint+"}'") + output, err := kubectlClient.Run(ts.currentContext, "get", "pods", "-l", "app="+ts.getPodName(), "-o", "jsonpath='{.items[0].spec.containers[0].resources."+constraint+"}'") if err != nil { return err } @@ -472,7 +475,7 @@ func (ts *HelmChartTestSuite) resourceConstraintsAreApplied(constraint string) e func (ts *HelmChartTestSuite) resourceWillManageAdditionalPodsForMetricsets(resource string) error { lowerResource := strings.ToLower(resource) - output, err := kubectl.Run(ts.currentContext, "get", lowerResource, ts.getResourceName(resource), "-o", "jsonpath='{.metadata.labels.chart}'") + output, err := kubectlClient.Run(ts.currentContext, "get", lowerResource, ts.getResourceName(resource), "-o", "jsonpath='{.metadata.labels.chart}'") if err != nil { return err } @@ -489,7 +492,7 @@ func (ts *HelmChartTestSuite) resourceWillManageAdditionalPodsForMetricsets(reso } func (ts *HelmChartTestSuite) strategyCanBeUsedDuringUpdates(strategy string) error { - return ts.strategyCanBeUsedForResourceDuringUpdates(strategy, k8s.ResourceTypes.Daemonset) + return ts.strategyCanBeUsedForResourceDuringUpdates(strategy, kubectl.ResourceTypes.Daemonset) } func (ts *HelmChartTestSuite) strategyCanBeUsedForResourceDuringUpdates(strategy string, resource string) error { @@ -497,11 +500,11 @@ func (ts *HelmChartTestSuite) strategyCanBeUsedForResourceDuringUpdates(strategy strategyKey := "strategy" name := ts.getResourceName(resource) - if resource == k8s.ResourceTypes.Daemonset { + if resource == kubectl.ResourceTypes.Daemonset { strategyKey = "updateStrategy" } - output, err := kubectl.Run(ts.currentContext, "get", lowerResource, name, "-o", `go-template={{.spec.`+strategyKey+`.type}}`) + output, err := kubectlClient.Run(ts.currentContext, "get", lowerResource, name, "-o", `go-template={{.spec.`+strategyKey+`.type}}`) if err != nil { return err } @@ -526,7 +529,7 @@ func (ts *HelmChartTestSuite) volumeMountedWithSubpath(name string, mountPath st getMountValues := func(key string) ([]string, error) { // build the arguments for capturing the volume mounts - output, err := kubectl.Run(ts.currentContext, "get", "pods", "-l", "app="+ts.getPodName(), "-o", `jsonpath="{.items[0].spec.containers[0].volumeMounts[*]['`+key+`']}"`) + output, err := kubectlClient.Run(ts.currentContext, "get", "pods", "-l", "app="+ts.getPodName(), "-o", `jsonpath="{.items[0].spec.containers[0].volumeMounts[*]['`+key+`']}"`) if err != nil { return []string{}, err } @@ -591,7 +594,7 @@ func (ts *HelmChartTestSuite) volumeMountedWithSubpath(name string, mountPath st func (ts *HelmChartTestSuite) willRetrieveSpecificMetrics(chartName string) error { kubeStateMetrics := "kube-state-metrics" - output, err := kubectl.Run(ts.currentContext, "get", "deployment", ts.Name+"-"+kubeStateMetrics, "-o", "jsonpath='{.metadata.name}'") + output, err := kubectlClient.Run(ts.currentContext, "get", "deployment", ts.Name+"-"+kubeStateMetrics, "-o", "jsonpath='{.metadata.name}'") if err != nil { return err } @@ -673,7 +676,7 @@ func InitializeHelmChartTestSuite(ctx *godog.TestSuiteContext) { defer suiteParentSpan.End() if elasticAPMActive { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() env := map[string]string{ "stackVersion": stackVersion, @@ -727,7 +730,7 @@ func InitializeHelmChartTestSuite(ctx *godog.TestSuiteContext) { } if elasticAPMActive { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() err := serviceManager.StopCompose(suiteContext, true, []string{"helm"}) if err != nil { log.WithFields(log.Fields{ diff --git a/e2e/_suites/metricbeat/metricbeat_test.go b/e2e/_suites/metricbeat/metricbeat_test.go index 34395ce26b..2a14879c65 100644 --- a/e2e/_suites/metricbeat/metricbeat_test.go +++ b/e2e/_suites/metricbeat/metricbeat_test.go @@ -15,11 +15,13 @@ import ( "github.com/cucumber/godog" messages "github.com/cucumber/messages-go/v10" "github.com/elastic/e2e-testing/cli/config" - "github.com/elastic/e2e-testing/cli/docker" - "github.com/elastic/e2e-testing/cli/services" - "github.com/elastic/e2e-testing/cli/shell" - "github.com/elastic/e2e-testing/e2e" "github.com/elastic/e2e-testing/e2e/steps" + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/compose" + "github.com/elastic/e2e-testing/internal/docker" + "github.com/elastic/e2e-testing/internal/elasticsearch" + "github.com/elastic/e2e-testing/internal/shell" + "github.com/elastic/e2e-testing/internal/utils" log "github.com/sirupsen/logrus" "go.elastic.co/apm" ) @@ -38,11 +40,7 @@ var metricbeatVersionBase = "7.x-SNAPSHOT" // It can be overriden by BEAT_VERSION env var var metricbeatVersion = metricbeatVersionBase -// timeoutFactor a multiplier for the max timeout when doing backoff retries. -// It can be overriden by TIMEOUT_FACTOR env var -var timeoutFactor = 3 - -var serviceManager services.ServiceManager +var serviceManager compose.ServiceManager // stackVersion is the version of the stack to use // It can be overriden by STACK_VERSION env var @@ -69,7 +67,7 @@ func setupSuite() { } // check if base version is an alias - v, err := e2e.GetElasticArtifactVersion(metricbeatVersionBase) + v, err := utils.GetElasticArtifactVersion(metricbeatVersionBase) if err != nil { log.WithFields(log.Fields{ "error": err, @@ -79,10 +77,9 @@ func setupSuite() { metricbeatVersionBase = v metricbeatVersion = shell.GetEnv("BEAT_VERSION", metricbeatVersionBase) - timeoutFactor = shell.GetEnvInteger("TIMEOUT_FACTOR", timeoutFactor) stackVersion = shell.GetEnv("STACK_VERSION", stackVersion) - v, err = e2e.GetElasticArtifactVersion(stackVersion) + v, err = utils.GetElasticArtifactVersion(stackVersion) if err != nil { log.WithFields(log.Fields{ "error": err, @@ -91,10 +88,10 @@ func setupSuite() { } stackVersion = v - serviceManager = services.NewServiceManager() + serviceManager = compose.NewServiceManager() testSuite = MetricbeatTestSuite{ - Query: e2e.ElasticsearchQuery{}, + Query: elasticsearch.Query{}, } } @@ -102,14 +99,14 @@ func setupSuite() { // the service to be monitored //nolint:unused type MetricbeatTestSuite struct { - cleanUpTmpFiles bool // if it's needed to clean up temporary files - configurationFile string // the name of the configuration file to be used in this test suite - ServiceName string // the service to be monitored by metricbeat - ServiceType string // the type of the service to be monitored by metricbeat - ServiceVariant string // the variant of the service to be monitored by metricbeat - ServiceVersion string // the version of the service to be monitored by metricbeat - Query e2e.ElasticsearchQuery // the specs for the ES query - Version string // the metricbeat version for the test + cleanUpTmpFiles bool // if it's needed to clean up temporary files + configurationFile string // the name of the configuration file to be used in this test suite + ServiceName string // the service to be monitored by metricbeat + ServiceType string // the type of the service to be monitored by metricbeat + ServiceVariant string // the variant of the service to be monitored by metricbeat + ServiceVersion string // the version of the service to be monitored by metricbeat + Query elasticsearch.Query // the specs for the ES query + Version string // the metricbeat version for the test // instrumentation currentContext context.Context } @@ -141,7 +138,7 @@ func (mts *MetricbeatTestSuite) setIndexName() { index = fmt.Sprintf("metricbeat-%s", mVersion) } - index += "-" + e2e.RandomString(8) + index += "-" + utils.RandomString(8) mts.Query.IndexName = strings.ToLower(index) } @@ -156,10 +153,10 @@ func (mts *MetricbeatTestSuite) CleanUp() error { testSuite.currentContext = apm.ContextWithSpan(context.Background(), span) defer span.End() - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() fn := func(ctx context.Context) { - err := e2e.DeleteIndex(ctx, mts.getIndexName()) + err := elasticsearch.DeleteIndex(ctx, mts.getIndexName()) if err != nil { log.WithFields(log.Fields{ "profile": "metricbeat", @@ -262,7 +259,7 @@ func InitializeMetricbeatTestSuite(ctx *godog.TestSuiteContext) { suiteContext = apm.ContextWithSpan(suiteContext, suiteParentSpan) defer suiteParentSpan.End() - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() env := map[string]string{ "stackVersion": stackVersion, @@ -275,8 +272,8 @@ func InitializeMetricbeatTestSuite(ctx *godog.TestSuiteContext) { }).Fatal("Could not run the profile.") } - minutesToBeHealthy := time.Duration(timeoutFactor) * time.Minute - healthy, err := e2e.WaitForElasticsearch(suiteContext, minutesToBeHealthy) + minutesToBeHealthy := time.Duration(common.TimeoutFactor) * time.Minute + healthy, err := elasticsearch.WaitForElasticsearch(suiteContext, minutesToBeHealthy) if !healthy { log.WithFields(log.Fields{ "error": err, @@ -307,7 +304,7 @@ func InitializeMetricbeatTestSuite(ctx *godog.TestSuiteContext) { defer suiteParentSpan.End() if !developerMode { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() err := serviceManager.StopCompose(suiteContext, true, []string{"metricbeat"}) if err != nil { log.WithFields(log.Fields{ @@ -383,7 +380,7 @@ func (mts *MetricbeatTestSuite) installedUsingConfiguration(configuration string mts.Version = metricbeatVersion mts.setIndexName() - metricbeatVersion = e2e.CheckPRVersion(metricbeatVersion, metricbeatVersionBase) + metricbeatVersion = utils.CheckPRVersion(metricbeatVersion, metricbeatVersionBase) configurationFilePath, err := steps.FetchBeatConfiguration(false, "metricbeat", configuration+".yml") if err != nil { @@ -409,9 +406,9 @@ func (mts *MetricbeatTestSuite) runMetricbeatService() error { useCISnapshots := shell.GetEnvBool("BEATS_USE_CI_SNAPSHOTS") beatsLocalPath := shell.GetEnv("BEATS_LOCAL_PATH", "") if useCISnapshots || beatsLocalPath != "" { - artifactName := e2e.BuildArtifactName("metricbeat", mts.Version, metricbeatVersionBase, "linux", "amd64", "tar.gz", true) + artifactName := utils.BuildArtifactName("metricbeat", mts.Version, metricbeatVersionBase, "linux", "amd64", "tar.gz", true) - imagePath, err := e2e.FetchBeatsBinary(artifactName, "metricbeat", mts.Version, metricbeatVersionBase, timeoutFactor, true) + imagePath, err := utils.FetchBeatsBinary(artifactName, "metricbeat", mts.Version, metricbeatVersionBase, common.TimeoutFactor, true) if err != nil { return err } @@ -433,16 +430,16 @@ func (mts *MetricbeatTestSuite) runMetricbeatService() error { } // this is needed because, in general, the target service (apache, mysql, redis) does not have a healthcheck - waitForService := time.Duration(timeoutFactor) * 10 * time.Second + waitForService := time.Duration(common.TimeoutFactor) * 10 * time.Second if mts.ServiceName == "ceph" { // see https://github.com/elastic/beats/blob/ef6274d0d1e36308a333cbed69846a1bd63528ae/metricbeat/module/ceph/mgr_osd_tree/mgr_osd_tree_integration_test.go#L35 // Ceph service needs more time to start up waitForService = waitForService * 4 } - e2e.Sleep(waitForService) + utils.Sleep(waitForService) - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() logLevel := log.GetLevel().String() if log.GetLevel() == log.TraceLevel { @@ -460,7 +457,7 @@ func (mts *MetricbeatTestSuite) runMetricbeatService() error { "serviceName": mts.ServiceName, } - env["metricbeatDockerNamespace"] = e2e.GetDockerNamespaceEnvVar("beats") + env["metricbeatDockerNamespace"] = utils.GetDockerNamespaceEnvVar("beats") env["metricbeatPlatform"] = "linux/amd64" err := serviceManager.AddServicesToCompose(testSuite.currentContext, "metricbeat", []string{"metricbeat"}, env) @@ -579,14 +576,14 @@ func (mts *MetricbeatTestSuite) thereAreEventsInTheIndex() error { } minimumHitsCount := 5 - maxTimeout := time.Duration(timeoutFactor) * time.Minute + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute - result, err := e2e.WaitForNumberOfHits(mts.currentContext, mts.getIndexName(), esQuery, minimumHitsCount, maxTimeout) + result, err := elasticsearch.WaitForNumberOfHits(mts.currentContext, mts.getIndexName(), esQuery, minimumHitsCount, maxTimeout) if err != nil { return err } - err = e2e.AssertHitsArePresent(result) + err = elasticsearch.AssertHitsArePresent(result) if err != nil { log.WithFields(log.Fields{ "eventModule": mts.Query.EventModule, @@ -615,12 +612,12 @@ func (mts *MetricbeatTestSuite) thereAreNoErrorsInTheIndex() error { } minimumHitsCount := 5 - maxTimeout := time.Duration(timeoutFactor) * time.Minute + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute - result, err := e2e.WaitForNumberOfHits(mts.currentContext, mts.getIndexName(), esQuery, minimumHitsCount, maxTimeout) + result, err := elasticsearch.WaitForNumberOfHits(mts.currentContext, mts.getIndexName(), esQuery, minimumHitsCount, maxTimeout) if err != nil { return err } - return e2e.AssertHitsDoNotContainErrors(result, mts.Query) + return elasticsearch.AssertHitsDoNotContainErrors(result, mts.Query) } diff --git a/e2e/steps/befores.go b/e2e/steps/befores.go index 0584efad52..d4cb3a2e3b 100644 --- a/e2e/steps/befores.go +++ b/e2e/steps/befores.go @@ -8,14 +8,14 @@ import ( "context" "strings" - "github.com/elastic/e2e-testing/cli/services" - "github.com/elastic/e2e-testing/cli/shell" + "github.com/elastic/e2e-testing/internal/compose" + "github.com/elastic/e2e-testing/internal/shell" log "github.com/sirupsen/logrus" ) // AddAPMServicesForInstrumentation adds a Kibana and APM Server instances to the running project func AddAPMServicesForInstrumentation(ctx context.Context, profile string, stackVersion string, needsKibana bool, env map[string]string) { - serviceManager := services.NewServiceManager() + serviceManager := compose.NewServiceManager() apmServerURL := shell.GetEnv("APM_SERVER_URL", "") if strings.HasPrefix(apmServerURL, "http://localhost") { diff --git a/e2e/steps/configurations.go b/e2e/steps/configurations.go index 545e01318c..7e1cbfc9ec 100644 --- a/e2e/steps/configurations.go +++ b/e2e/steps/configurations.go @@ -7,8 +7,8 @@ package steps import ( "path" - "github.com/elastic/e2e-testing/cli/shell" - "github.com/elastic/e2e-testing/e2e" + "github.com/elastic/e2e-testing/internal/shell" + "github.com/elastic/e2e-testing/internal/utils" log "github.com/sirupsen/logrus" ) @@ -44,7 +44,7 @@ func FetchBeatConfiguration(xpack bool, beat string, configFileName string) (str configurationFileURL += "/" + beat + "/" + configFileName - configurationFilePath, err := e2e.DownloadFile(configurationFileURL) + configurationFilePath, err := utils.DownloadFile(configurationFileURL) if err != nil { return "", err } diff --git a/e2e/steps/processes.go b/e2e/steps/processes.go deleted file mode 100644 index f9721d7634..0000000000 --- a/e2e/steps/processes.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package steps - -import ( - "time" - - "github.com/elastic/e2e-testing/e2e" - log "github.com/sirupsen/logrus" -) - -// CheckProcessStateOnTheHost checks if a process is in the desired state in a container -// name of the container for the service: -// we are using the Docker client instead of docker-compose -// because it does not support returning the output of a -// command: it simply returns error level -func CheckProcessStateOnTheHost(containerName string, process string, state string, timeoutFactor int) error { - timeout := time.Duration(timeoutFactor) * time.Minute - - err := e2e.WaitForProcess(containerName, process, state, timeout) - if err != nil { - if state == "started" { - log.WithFields(log.Fields{ - "container ": containerName, - "error": err, - "timeout": timeout, - }).Error("The process was not found but should be present") - } else { - log.WithFields(log.Fields{ - "container": containerName, - "error": err, - "timeout": timeout, - }).Error("The process was found but shouldn't be present") - } - - return err - } - - return nil -} diff --git a/e2e/steps/services.go b/e2e/steps/services.go deleted file mode 100644 index b015a99c09..0000000000 --- a/e2e/steps/services.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package steps - -import ( - "context" - - "github.com/elastic/e2e-testing/cli/docker" - "github.com/elastic/e2e-testing/cli/services" - log "github.com/sirupsen/logrus" -) - -// ExecCommandInService executes a command in a service from a profile -func ExecCommandInService(profile string, image string, serviceName string, cmds []string, env map[string]string, detach bool) error { - serviceManager := services.NewServiceManager() - - composes := []string{ - profile, // profile name - image, // image for the service - } - composeArgs := []string{"exec", "-T"} - if detach { - composeArgs = append(composeArgs, "-d") - } - composeArgs = append(composeArgs, serviceName) - composeArgs = append(composeArgs, cmds...) - - err := serviceManager.RunCommand(profile, composes, composeArgs, env) - if err != nil { - log.WithFields(log.Fields{ - "command": cmds, - "error": err, - "service": serviceName, - }).Error("Could not execute command in service container") - - return err - } - - return nil -} - -// GetContainerHostname we need the container name because we use the Docker Client instead of Docker Compose -func GetContainerHostname(containerName string) (string, error) { - log.WithFields(log.Fields{ - "containerName": containerName, - }).Trace("Retrieving container name from the Docker client") - - hostname, err := docker.ExecCommandIntoContainer(context.Background(), containerName, "root", []string{"cat", "/etc/hostname"}) - if err != nil { - log.WithFields(log.Fields{ - "containerName": containerName, - "error": err, - }).Error("Could not retrieve container name from the Docker client") - return "", err - } - - log.WithFields(log.Fields{ - "containerName": containerName, - "hostname": hostname, - }).Info("Hostname retrieved from the Docker client") - - return hostname, nil -} diff --git a/internal/common/defaults.go b/internal/common/defaults.go new file mode 100644 index 0000000000..dc98c83f59 --- /dev/null +++ b/internal/common/defaults.go @@ -0,0 +1,43 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package common + +// ElasticAgentProcessName the name of the process for the Elastic Agent +const ElasticAgentProcessName = "elastic-agent" + +// ElasticAgentServiceName the name of the service for the Elastic Agent +const ElasticAgentServiceName = "elastic-agent" + +// ElasticEndpointIntegrationTitle title for the Elastic Endpoint integration in the package registry. +// This value could change depending on the version of the package registry +// We are using the title because the feature files have to be super readable +// and the title is more readable than the name +const ElasticEndpointIntegrationTitle = "Endpoint Security" + +// FleetProfileName the name of the profile to run the runtime, backend services +const FleetProfileName = "fleet" + +// AgentVersionBase is the base version of the agent to use +var AgentVersionBase = "8.0.0-SNAPSHOT" + +// AgentVersion is the version of the agent to use +// It can be overriden by BEAT_VERSION env var +var AgentVersion = AgentVersionBase + +// AgentStaleVersion is the version of the agent to use as a base during upgrade +// It can be overriden by ELASTIC_AGENT_STALE_VERSION env var. Using latest GA as a default. +var AgentStaleVersion = "7.11-SNAPSHOT" + +// StackVersion is the version of the stack to use +// It can be overriden by STACK_VERSION env var +var StackVersion = AgentVersionBase + +// KibanaVersion is the version of kibana to use +// It can be override by KIBANA_VERSION +var KibanaVersion = AgentVersionBase + +// ProfileEnv is the environment to be applied to any execution +// affecting the runtime dependencies (or profile) +var ProfileEnv map[string]string diff --git a/internal/common/retry.go b/internal/common/retry.go new file mode 100644 index 0000000000..fa38cb95a1 --- /dev/null +++ b/internal/common/retry.go @@ -0,0 +1,35 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package common + +import ( + "time" + + backoff "github.com/cenkalti/backoff/v4" +) + +// TimeoutFactor a multiplier for the max timeout when doing backoff retries. +// It can be overriden by TIMEOUT_FACTOR env var +var TimeoutFactor = 3 + +// GetExponentialBackOff returns a preconfigured exponential backoff instance +func GetExponentialBackOff(elapsedTime time.Duration) *backoff.ExponentialBackOff { + var ( + initialInterval = 500 * time.Millisecond + randomizationFactor = 0.5 + multiplier = 2.0 + maxInterval = 5 * time.Second + maxElapsedTime = elapsedTime + ) + + exp := backoff.NewExponentialBackOff() + exp.InitialInterval = initialInterval + exp.RandomizationFactor = randomizationFactor + exp.Multiplier = multiplier + exp.MaxInterval = maxInterval + exp.MaxElapsedTime = maxElapsedTime + + return exp +} diff --git a/cli/services/manager.go b/internal/compose/compose.go similarity index 87% rename from cli/services/manager.go rename to internal/compose/compose.go index b67bcbe935..cc6374282a 100644 --- a/cli/services/manager.go +++ b/internal/compose/compose.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package services +package compose import ( "context" @@ -10,7 +10,7 @@ import ( "path/filepath" "github.com/elastic/e2e-testing/cli/config" - state "github.com/elastic/e2e-testing/cli/internal" + state "github.com/elastic/e2e-testing/internal/state" "go.elastic.co/apm" log "github.com/sirupsen/logrus" @@ -20,6 +20,7 @@ import ( // ServiceManager manages lifecycle of a service type ServiceManager interface { AddServicesToCompose(ctx context.Context, profile string, composeNames []string, env map[string]string) error + ExecCommandInService(profile string, image string, serviceName string, cmds []string, env map[string]string, detach bool) error RemoveServicesFromCompose(ctx context.Context, profile string, composeNames []string, env map[string]string) error RunCommand(profile string, composeNames []string, composeArgs []string, env map[string]string) error RunCompose(ctx context.Context, isProfile bool, composeNames []string, env map[string]string) error @@ -63,6 +64,33 @@ func (sm *DockerServiceManager) AddServicesToCompose(ctx context.Context, profil return nil } +// ExecCommandInService executes a command in a service from a profile +func (sm *DockerServiceManager) ExecCommandInService(profile string, image string, serviceName string, cmds []string, env map[string]string, detach bool) error { + composes := []string{ + profile, // profile name + image, // image for the service + } + composeArgs := []string{"exec", "-T"} + if detach { + composeArgs = append(composeArgs, "-d") + } + composeArgs = append(composeArgs, serviceName) + composeArgs = append(composeArgs, cmds...) + + err := sm.RunCommand(profile, composes, composeArgs, env) + if err != nil { + log.WithFields(log.Fields{ + "command": cmds, + "error": err, + "service": serviceName, + }).Error("Could not execute command in service container") + + return err + } + + return nil +} + // RemoveServicesFromCompose removes services from a running docker compose func (sm *DockerServiceManager) RemoveServicesFromCompose(ctx context.Context, profile string, composeNames []string, env map[string]string) error { span, _ := apm.StartSpanOptions(ctx, "Remove services from Docker Compose", "docker-compose.services.remove", apm.SpanOptions{ diff --git a/cli/shell/curl.go b/internal/curl/curl.go similarity index 99% rename from cli/shell/curl.go rename to internal/curl/curl.go index 3da97c3135..47d1af4a77 100644 --- a/cli/shell/curl.go +++ b/internal/curl/curl.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package shell +package curl import ( "bytes" diff --git a/cli/docker/docker.go b/internal/docker/docker.go similarity index 63% rename from cli/docker/docker.go rename to internal/docker/docker.go index 8f7236b778..f626e4c91d 100644 --- a/cli/docker/docker.go +++ b/internal/docker/docker.go @@ -8,6 +8,7 @@ import ( "bytes" "compress/gzip" "context" + "fmt" "os" "path/filepath" "strings" @@ -17,6 +18,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" + "github.com/elastic/e2e-testing/internal/common" log "github.com/sirupsen/logrus" ) @@ -25,6 +27,36 @@ var instance *client.Client // OPNetworkName name of the network used by the tool const OPNetworkName = "elastic-dev-network" +// CheckProcessStateOnTheHost checks if a process is in the desired state in a container +// name of the container for the service: +// we are using the Docker client instead of docker-compose +// because it does not support returning the output of a +// command: it simply returns error level +func CheckProcessStateOnTheHost(containerName string, process string, state string, timeoutFactor int) error { + timeout := time.Duration(common.TimeoutFactor) * time.Minute + + err := WaitForProcess(containerName, process, state, timeout) + if err != nil { + if state == "started" { + log.WithFields(log.Fields{ + "container ": containerName, + "error": err, + "timeout": timeout, + }).Error("The process was not found but should be present") + } else { + log.WithFields(log.Fields{ + "container": containerName, + "error": err, + "timeout": timeout, + }).Error("The process was found but shouldn't be present") + } + + return err + } + + return nil +} + // ExecCommandIntoContainer executes a command, as a user, into a container func ExecCommandIntoContainer(ctx context.Context, containerName string, user string, cmd []string) (string, error) { return ExecCommandIntoContainerWithEnv(ctx, containerName, user, cmd, []string{}) @@ -135,6 +167,29 @@ func ExecCommandIntoContainerWithEnv(ctx context.Context, containerName string, return output, nil } +// GetContainerHostname we need the container name because we use the Docker Client instead of Docker Compose +func GetContainerHostname(containerName string) (string, error) { + log.WithFields(log.Fields{ + "containerName": containerName, + }).Trace("Retrieving container name from the Docker client") + + hostname, err := ExecCommandIntoContainer(context.Background(), containerName, "root", []string{"cat", "/etc/hostname"}) + if err != nil { + log.WithFields(log.Fields{ + "containerName": containerName, + "error": err, + }).Error("Could not retrieve container name from the Docker client") + return "", err + } + + log.WithFields(log.Fields{ + "containerName": containerName, + "hostname": hostname, + }).Info("Hostname retrieved from the Docker client") + + return hostname, nil +} + // InspectContainer returns the JSON representation of the inspection of a // Docker container, identified by its name func InspectContainer(name string) (*types.ContainerJSON, error) { @@ -226,21 +281,8 @@ func TagImage(src string, target string) error { dockerClient := getDockerClient() maxTimeout := 15 * time.Second + exp := common.GetExponentialBackOff(maxTimeout) retryCount := 0 - var ( - initialInterval = 500 * time.Millisecond - randomizationFactor = 0.5 - multiplier = 2.0 - maxInterval = 5 * time.Second - maxElapsedTime = maxTimeout - ) - - exp := backoff.NewExponentialBackOff() - exp.InitialInterval = initialInterval - exp.RandomizationFactor = randomizationFactor - exp.Multiplier = multiplier - exp.MaxInterval = maxInterval - exp.MaxElapsedTime = maxElapsedTime tagImageFn := func() error { retryCount++ @@ -290,6 +332,93 @@ func RemoveDevNetwork() error { return nil } +// WaitForProcess polls a container executing "ps" command until the process is in the desired state (present or not), +// or a timeout happens +func WaitForProcess(containerName string, process string, desiredState string, maxTimeout time.Duration) error { + exp := common.GetExponentialBackOff(maxTimeout) + + mustBePresent := false + if desiredState == "started" { + mustBePresent = true + } + retryCount := 1 + + processStatus := func() error { + log.WithFields(log.Fields{ + "desiredState": desiredState, + "process": process, + }).Trace("Checking process desired state on the container") + + output, err := ExecCommandIntoContainer(context.Background(), containerName, "root", []string{"pgrep", "-n", "-l", process}) + if err != nil { + log.WithFields(log.Fields{ + "desiredState": desiredState, + "elapsedTime": exp.GetElapsedTime(), + "error": err, + "container": containerName, + "mustBePresent": mustBePresent, + "process": process, + "retry": retryCount, + }).Warn("Could not execute 'pgrep -n -l' in the container") + + retryCount++ + + return err + } + + outputContainsProcess := strings.Contains(output, process) + + // both true or both false + if mustBePresent == outputContainsProcess { + log.WithFields(log.Fields{ + "desiredState": desiredState, + "container": containerName, + "mustBePresent": mustBePresent, + "process": process, + }).Infof("Process desired state checked") + + return nil + } + + if mustBePresent { + err = fmt.Errorf("%s process is not running in the container yet", process) + log.WithFields(log.Fields{ + "desiredState": desiredState, + "elapsedTime": exp.GetElapsedTime(), + "error": err, + "container": containerName, + "process": process, + "retry": retryCount, + }).Warn(err.Error()) + + retryCount++ + + return err + } + + err = fmt.Errorf("%s process is still running in the container", process) + log.WithFields(log.Fields{ + "elapsedTime": exp.GetElapsedTime(), + "error": err, + "container": containerName, + "process": process, + "state": desiredState, + "retry": retryCount, + }).Warn(err.Error()) + + retryCount++ + + return err + } + + err := backoff.Retry(processStatus, exp) + if err != nil { + return err + } + + return nil +} + func getDockerClient() *client.Client { if instance != nil { return instance diff --git a/e2e/assertions.go b/internal/elasticsearch/assertions.go similarity index 97% rename from e2e/assertions.go rename to internal/elasticsearch/assertions.go index 794fcca114..b4a07796dd 100644 --- a/e2e/assertions.go +++ b/internal/elasticsearch/assertions.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package e2e +package elasticsearch import ( "fmt" @@ -31,7 +31,7 @@ func AssertHitsAreNotPresent(hits map[string]interface{}) error { // AssertHitsDoNotContainErrors returns an error if any of the returned entries contains // an "error.message" field in the "_source" document -func AssertHitsDoNotContainErrors(hits map[string]interface{}, q ElasticsearchQuery) error { +func AssertHitsDoNotContainErrors(hits map[string]interface{}, q Query) error { errors := []interface{}{} iterableHits := hits["hits"].(map[string]interface{})["hits"].([]interface{}) diff --git a/e2e/elasticsearch.go b/internal/elasticsearch/client.go similarity index 93% rename from e2e/elasticsearch.go rename to internal/elasticsearch/client.go index 07eedda924..c8b32e48f8 100644 --- a/e2e/elasticsearch.go +++ b/internal/elasticsearch/client.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package e2e +package elasticsearch import ( "bytes" @@ -13,18 +13,19 @@ import ( "time" backoff "github.com/cenkalti/backoff/v4" - "github.com/elastic/e2e-testing/cli/shell" - curl "github.com/elastic/e2e-testing/cli/shell" + "github.com/elastic/e2e-testing/internal/common" + curl "github.com/elastic/e2e-testing/internal/curl" + "github.com/elastic/e2e-testing/internal/shell" es "github.com/elastic/go-elasticsearch/v8" log "github.com/sirupsen/logrus" "go.elastic.co/apm" "go.elastic.co/apm/module/apmelasticsearch" ) -// ElasticsearchQuery a very reduced representation of an elasticsearch query, where +// Query a very reduced representation of an elasticsearch query, where // we want to simply override the event.module and service.version fields //nolint:unused -type ElasticsearchQuery struct { +type Query struct { EventModule string IndexName string ServiceVersion string @@ -114,8 +115,8 @@ func getElasticsearchClientFromHostPort(ctx context.Context, host string, port i return esClient, nil } -//nolint:unused -func search(ctx context.Context, indexName string, query map[string]interface{}) (SearchResult, error) { +// Search provide search interface to ES +func Search(ctx context.Context, indexName string, query map[string]interface{}) (SearchResult, error) { span, _ := apm.StartSpanOptions(ctx, "Search", "elasticsearch.search", apm.SpanOptions{ Parent: apm.SpanFromContext(ctx).TraceContext(), }) @@ -147,6 +148,7 @@ func search(ctx context.Context, indexName string, query map[string]interface{}) esClient.Search.WithBody(&buf), esClient.Search.WithTrackTotalHits(true), esClient.Search.WithPretty(), + esClient.Search.WithSize(10000), ) if err != nil { log.WithFields(log.Fields{ @@ -200,7 +202,7 @@ func WaitForElasticsearch(ctx context.Context, maxTimeoutMinutes time.Duration) // WaitForElasticsearchFromHostPort waits for an elasticsearch running in a host:port to be healthy, returning false // if elasticsearch does not get healthy status in a defined number of minutes. func WaitForElasticsearchFromHostPort(ctx context.Context, host string, port int, maxTimeoutMinutes time.Duration) (bool, error) { - exp := GetExponentialBackOff(maxTimeoutMinutes) + exp := common.GetExponentialBackOff(maxTimeoutMinutes) retryCount := 1 @@ -249,7 +251,7 @@ func WaitForElasticsearchFromHostPort(ctx context.Context, host string, port int // WaitForIndices waits for the elasticsearch indices to return the list of indices. func WaitForIndices() (string, error) { - exp := GetExponentialBackOff(60 * time.Second) + exp := common.GetExponentialBackOff(60 * time.Second) retryCount := 1 body := "" @@ -292,13 +294,13 @@ func WaitForIndices() (string, error) { // WaitForNumberOfHits waits for an elasticsearch query to return more than a number of hits, // returning false if the query does not reach that number in a defined number of time. func WaitForNumberOfHits(ctx context.Context, indexName string, query map[string]interface{}, desiredHits int, maxTimeout time.Duration) (SearchResult, error) { - exp := GetExponentialBackOff(maxTimeout) + exp := common.GetExponentialBackOff(maxTimeout) retryCount := 1 result := SearchResult{} numberOfHits := func() error { - hits, err := search(ctx, indexName, query) + hits, err := Search(ctx, indexName, query) if err != nil { log.WithFields(log.Fields{ "desiredHits": desiredHits, diff --git a/internal/git/git.go b/internal/git/git.go new file mode 100644 index 0000000000..ee2663bd82 --- /dev/null +++ b/internal/git/git.go @@ -0,0 +1,187 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package git + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/lann/builder" + log "github.com/sirupsen/logrus" + + git "gopkg.in/src-d/go-git.v4" + "gopkg.in/src-d/go-git.v4/plumbing" + ssh "gopkg.in/src-d/go-git.v4/plumbing/transport/ssh" +) + +// GitProtocol the git protocol string representation +const GitProtocol = "git@" + +// Project representes a git project +type Project struct { + BaseWorkspace string + Branch string + Domain string + Name string + Protocol string + User string +} + +// GetURL Returns the workspace of a Project +func (d *Project) GetURL() string { + if d.Protocol == GitProtocol { + return d.Protocol + d.Domain + ":" + d.User + "/" + d.Name + } + + return "https://" + d.Domain + "/" + d.User + "/" + d.Name +} + +// GetWorkspace Returns the workspace of a Project +func (d *Project) GetWorkspace() string { + return filepath.Join(d.BaseWorkspace, d.Name) +} + +type projectBuilder builder.Builder + +func (b projectBuilder) Build() Project { + return builder.GetStruct(b).(Project) +} + +func (b projectBuilder) WithBaseWorkspace(baseWorkspace string) projectBuilder { + return builder.Set(b, "BaseWorkspace", baseWorkspace).(projectBuilder) +} + +func (b projectBuilder) WithDomain(domain string) projectBuilder { + return builder.Set(b, "Domain", domain).(projectBuilder) +} + +func (b projectBuilder) WithGitProtocol() projectBuilder { + return builder.Set(b, "Protocol", GitProtocol).(projectBuilder) +} + +func (b projectBuilder) WithName(name string) projectBuilder { + return builder.Set(b, "Name", name).(projectBuilder) +} + +func (b projectBuilder) WithRemote(remote string) projectBuilder { + coordinates := strings.Split(remote, ":") + if len(coordinates) == 1 { + return b.withUser(coordinates[0]).withBranch("master") + } else if len(coordinates) != 2 { + return b + } + + return b.withUser(coordinates[0]).withBranch(coordinates[1]) +} + +func (b projectBuilder) withBranch(branch string) projectBuilder { + return builder.Set(b, "Branch", branch).(projectBuilder) +} + +func (b projectBuilder) withUser(user string) projectBuilder { + return builder.Set(b, "User", user).(projectBuilder) +} + +// ProjectBuilder builder for git projects +var ProjectBuilder = builder.Register(projectBuilder{}, Project{}).(projectBuilder) + +// Clone allows cloning an array of repositories simultaneously +func Clone(repositories ...Project) { + repositoriesChannel := make(chan Project, len(repositories)) + for i := range repositories { + repositoriesChannel <- repositories[i] + } + close(repositoriesChannel) + + workers := 5 + if len(repositoriesChannel) < workers { + workers = len(repositoriesChannel) + } + + errorChannel := make(chan error, 1) + resultChannel := make(chan bool, len(repositories)) + + for i := 0; i < workers; i++ { + // Consume work from repositoriesChannel. Loop will end when no more work. + for repository := range repositoriesChannel { + go cloneGithubRepository(repository, resultChannel, errorChannel) + } + } + + // Collect results from workers + + for i := 0; i < len(repositories); i++ { + select { + case <-resultChannel: + log.WithFields(log.Fields{ + "url": repositories[i].GetURL(), + }).Info("Git clone succeed") + case err := <-errorChannel: + if err != nil { + log.WithFields(log.Fields{ + "url": repositories[i].GetURL(), + "error": err, + }).Warn("Git clone errored") + } + } + } +} + +func cloneGithubRepository( + githubRepo Project, resultChannel chan bool, errorChannel chan error) { + + gitRepositoryDir := githubRepo.GetWorkspace() + + if _, err := os.Stat(gitRepositoryDir); os.IsExist(err) { + select { + case errorChannel <- err: + // will break parent goroutine out of loop + default: + // don't care, first error wins + } + return + } + + githubRepositoryURL := githubRepo.GetURL() + + log.WithFields(log.Fields{ + "url": githubRepositoryURL, + "directory": gitRepositoryDir, + }).Info("Cloning project. This process could take long depending on its size") + + cloneOptions := &git.CloneOptions{ + URL: githubRepositoryURL, + Progress: os.Stdout, + ReferenceName: plumbing.ReferenceName(fmt.Sprintf("refs/heads/%s", githubRepo.Branch)), + SingleBranch: true, + } + + if githubRepo.Protocol == GitProtocol { + auth, err1 := ssh.NewSSHAgentAuth("git") + if err1 != nil { + log.WithFields(log.Fields{ + "error": err1, + }).Fatal("Cloning using keys from SSH agent failed") + } + + cloneOptions.Auth = auth + } + + _, err := git.PlainClone(gitRepositoryDir, false, cloneOptions) + + if err != nil { + select { + case errorChannel <- err: + // will break parent goroutine out of loop + default: + // don't care, first error wins + } + return + } + + resultChannel <- true +} diff --git a/internal/git/git_test.go b/internal/git/git_test.go new file mode 100644 index 0000000000..11aa84f0ee --- /dev/null +++ b/internal/git/git_test.go @@ -0,0 +1,103 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package git + +import ( + "path" + "testing" + + "github.com/Flaque/filet" + "github.com/elastic/e2e-testing/internal/io" + "github.com/stretchr/testify/assert" +) + +const repoBranch = "master" +const repoDomain = "github.com" +const repoName = "Hello-World" +const repoRemote = "octocat" + +func TestBuild(t *testing.T) { + var repo = ProjectBuilder. + WithBaseWorkspace("."). + WithDomain(repoDomain). + WithRemote(repoRemote). + WithName(repoName). + Build() + + assert.Equal(t, ".", repo.BaseWorkspace) + assert.Equal(t, repoDomain, repo.Domain) + assert.Equal(t, "", repo.Protocol) + assert.Equal(t, repoBranch, repo.Branch) + assert.Equal(t, repoRemote, repo.User) + assert.Equal(t, "https://"+repoDomain+"/"+repoRemote+"/"+repoName, repo.GetURL()) +} + +func TestBuildWithGitProtocol(t *testing.T) { + var repo = ProjectBuilder. + WithBaseWorkspace("."). + WithGitProtocol(). + WithDomain(repoDomain). + WithRemote(repoRemote). + WithName(repoName). + Build() + + assert.Equal(t, ".", repo.BaseWorkspace) + assert.Equal(t, repoDomain, repo.Domain) + assert.Equal(t, "git@", repo.Protocol) + assert.Equal(t, repoBranch, repo.Branch) + assert.Equal(t, repoRemote, repo.User) + assert.Equal(t, "git@"+repoDomain+":"+repoRemote+"/"+repoName, repo.GetURL()) +} + +func TestBuildWithWrongRemote(t *testing.T) { + var repo = ProjectBuilder. + WithBaseWorkspace("."). + WithDomain(repoDomain). + WithRemote(repoRemote). + WithName(repoName). + Build() + + assert.Equal(t, repoBranch, repo.Branch) + assert.Equal(t, repoRemote, repo.User) +} + +func TestBuildWithWellFormedRemote(t *testing.T) { + var repo = ProjectBuilder. + WithBaseWorkspace("."). + WithDomain(repoDomain). + WithRemote(repoRemote + ":foo"). + WithName(repoName). + Build() + + assert.Equal(t, "foo", repo.Branch) + assert.Equal(t, repoRemote, repo.User) +} + +func TestClone(t *testing.T) { + defer filet.CleanUp(t) + gitDir := createGitDir(t) + + var repo = ProjectBuilder. + WithBaseWorkspace(gitDir). + WithDomain(repoDomain). + WithRemote(repoRemote + ":" + repoBranch). + WithName(repoName). + Build() + + Clone(repo) + + e, _ := io.Exists(path.Join(gitDir, repoName)) + assert.True(t, e) +} + +func createGitDir(t *testing.T) string { + tmpDir := filet.TmpDir(t, "") + gitDir := path.Join(tmpDir, "git") + + err := io.MkdirAll(gitDir) + assert.Nil(t, err) + + return gitDir +} diff --git a/cli/services/helm.go b/internal/helm/helm.go similarity index 90% rename from cli/services/helm.go rename to internal/helm/helm.go index 7edbf621c0..403bd0f399 100644 --- a/cli/services/helm.go +++ b/internal/helm/helm.go @@ -2,34 +2,34 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package services +package helm import ( "context" "errors" "strings" - "github.com/elastic/e2e-testing/cli/shell" + "github.com/elastic/e2e-testing/internal/shell" log "github.com/sirupsen/logrus" "go.elastic.co/apm" ) -// HelmManager defines the operations for Helm -type HelmManager interface { +// Manager defines the operations for Helm +type Manager interface { AddRepo(ctx context.Context, repo string, URL string) error DeleteChart(ctx context.Context, chart string) error InstallChart(ctx context.Context, name string, chart string, version string, flags []string) error } -// HelmFactory returns oone of the Helm supported versions, or an error -func HelmFactory(version string) (HelmManager, error) { +// Factory returns oone of the Helm supported versions, or an error +func Factory(version string) (Manager, error) { if strings.HasPrefix(version, "3.") { helm := &helm3X{} helm.Version = version return helm, nil } - var helm HelmManager + var helm Manager return helm, errors.New("Sorry, we don't support Helm v" + version + " version") } diff --git a/internal/installer/base.go b/internal/installer/base.go new file mode 100644 index 0000000000..1178c46909 --- /dev/null +++ b/internal/installer/base.go @@ -0,0 +1,151 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package installer + +import ( + "context" + "fmt" + "strings" + + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/compose" + "github.com/elastic/e2e-testing/internal/docker" + "github.com/elastic/e2e-testing/internal/kibana" + log "github.com/sirupsen/logrus" +) + +// Package represents the operations that can be performed by an installer package type +type Package interface { + Install(containerName string, token string) error + InstallCerts(cfg *kibana.FleetConfig) error + PrintLogs(containerName string) error + Postinstall() error + Preinstall() error + Uninstall() error +} + +// BasePackage holds references to basic state for all installers +type BasePackage struct { + binaryName string + commitFile string + image string + logFile string + profile string + service string +} + +// extractPackage depends on the underlying OS, so 'cmds' must contain the specific instructions for the OS +func (i *BasePackage) extractPackage(cmds []string) error { + sm := compose.NewServiceManager() + err := sm.ExecCommandInService(i.profile, i.image, i.service, cmds, common.ProfileEnv, false) + if err != nil { + log.WithFields(log.Fields{ + "command": cmds, + "error": err, + "image": i.image, + "service": i.service, + }).Error("Could not extract agent package in the box") + + return err + } + + return nil +} + +// Postinstall executes operations after installing a DEB package +func (i *BasePackage) Postinstall() error { + err := SystemctlRun(i.profile, i.image, i.service, "enable") + if err != nil { + return err + } + return SystemctlRun(i.profile, i.image, i.service, "start") +} + +// PrintLogs prints logs for the agent +func (i *BasePackage) PrintLogs(containerName string) error { + err := i.resolveLogFile(containerName) + if err != nil { + return fmt.Errorf("Could not resolve log file: %v", err) + } + + cmd := []string{ + "cat", i.logFile, + } + + sm := compose.NewServiceManager() + err = sm.ExecCommandInService(i.profile, i.image, i.service, cmd, common.ProfileEnv, false) + if err != nil { + return err + } + + return nil +} + +// resolveLogFile retrieves the full path of the log file in the underlying Docker container +// calculating the hash commit if necessary +func (i *BasePackage) resolveLogFile(containerName string) error { + if strings.Contains(i.logFile, "%s") { + hash, err := getElasticAgentHash(containerName, i.commitFile) + if err != nil { + log.WithFields(log.Fields{ + "containerName": containerName, + "error": err, + }).Error("Could not get agent hash in the container") + + return err + } + + i.logFile = fmt.Sprintf(i.logFile, hash) + } + + return nil +} + +// getElasticAgentHash uses Elastic Agent's home dir to read the file with agent's build hash +// it will return the first six characters of the hash (short hash) +func getElasticAgentHash(containerName string, commitFile string) (string, error) { + cmd := []string{ + "cat", commitFile, + } + + fullHash, err := docker.ExecCommandIntoContainer(context.Background(), containerName, "root", cmd) + if err != nil { + return "", err + } + + runes := []rune(fullHash) + shortHash := string(runes[0:6]) + + log.WithFields(log.Fields{ + "commitFile": commitFile, + "containerName": containerName, + "hash": fullHash, + "shortHash": shortHash, + }).Debug("Agent build hash found") + + return shortHash, nil +} + +// SystemctlRun runs systemctl in profile or service +func SystemctlRun(profile string, image string, service string, command string) error { + cmd := []string{"systemctl", command, common.ElasticAgentProcessName} + sm := compose.NewServiceManager() + err := sm.ExecCommandInService(profile, image, service, cmd, common.ProfileEnv, false) + if err != nil { + log.WithFields(log.Fields{ + "command": cmd, + "error": err, + "service": service, + }).Errorf("Could not %s the service", command) + + return err + } + + log.WithFields(log.Fields{ + "command": cmd, + "service": service, + }).Trace("Systemctl executed") + return nil +} diff --git a/e2e/_suites/fleet/services_test.go b/internal/installer/base_test.go similarity index 99% rename from e2e/_suites/fleet/services_test.go rename to internal/installer/base_test.go index 12e4d87f43..b5f0f27593 100644 --- a/e2e/_suites/fleet/services_test.go +++ b/internal/installer/base_test.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package main +package installer import ( "os" diff --git a/internal/installer/deb.go b/internal/installer/deb.go new file mode 100644 index 0000000000..5ec4c07c2b --- /dev/null +++ b/internal/installer/deb.go @@ -0,0 +1,132 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package installer + +import ( + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/compose" + "github.com/elastic/e2e-testing/internal/kibana" + "github.com/elastic/e2e-testing/internal/utils" + log "github.com/sirupsen/logrus" +) + +// DEBPackage implements operations for a DEB installer +type DEBPackage struct { + BasePackage +} + +// NewDEBPackage creates an instance for the DEB installer +func NewDEBPackage(binaryName string, profile string, image string, service string, commitFile string, logFile string) *DEBPackage { + return &DEBPackage{ + BasePackage: BasePackage{ + binaryName: binaryName, + commitFile: commitFile, + image: image, + profile: profile, + service: service, + }, + } +} + +// Install installs a DEB package +func (i *DEBPackage) Install(cfg *kibana.FleetConfig) error { + return i.extractPackage([]string{"apt", "install", "/" + i.binaryName, "-y"}) +} + +// InstallCerts installs the certificates for a DEB package +func (i *DEBPackage) InstallCerts() error { + return installCertsForDebian(i.profile, i.image, i.service) +} +func installCertsForDebian(profile string, image string, service string) error { + sm := compose.NewServiceManager() + if err := sm.ExecCommandInService(profile, image, service, []string{"apt-get", "update"}, common.ProfileEnv, false); err != nil { + return err + } + if err := sm.ExecCommandInService(profile, image, service, []string{"apt", "install", "ca-certificates", "-y"}, common.ProfileEnv, false); err != nil { + return err + } + if err := sm.ExecCommandInService(profile, image, service, []string{"update-ca-certificates", "-f"}, common.ProfileEnv, false); err != nil { + return err + } + return nil +} + +// Preinstall executes operations before installing a DEB package +func (i *DEBPackage) Preinstall() error { + log.Trace("No preinstall commands for DEB packages") + return nil +} + +// Uninstall uninstalls a DEB package +func (i *DEBPackage) Uninstall() error { + log.Trace("No uninstall commands for DEB packages") + return nil +} + +// newDebianInstaller returns an instance of the Debian installer for a specific version +func newDebianInstaller(image string, tag string, version string) (ElasticAgentInstaller, error) { + image = image + "-systemd" // we want to consume systemd boxes + service := image + profile := common.FleetProfileName + + // extract the agent in the box, as it's mounted as a volume + artifact := "elastic-agent" + os := "linux" + arch := "amd64" + extension := "deb" + + binaryName := utils.BuildArtifactName(artifact, version, common.AgentVersionBase, os, arch, extension, false) + binaryPath, err := downloadAgentBinary(binaryName, artifact, version) + if err != nil { + log.WithFields(log.Fields{ + "artifact": artifact, + "version": version, + "os": os, + "arch": arch, + "extension": extension, + "error": err, + }).Error("Could not download the binary for the agent") + return ElasticAgentInstaller{}, err + } + + enrollFn := func(cfg *kibana.FleetConfig) error { + return runElasticAgentCommandEnv(profile, image, service, common.ElasticAgentProcessName, "enroll", cfg.Flags(), map[string]string{}) + } + + workingDir := "/var/lib/elastic-agent" + binDir := workingDir + "/data/elastic-agent-%s/" + + commitFile := "/etc/elastic-agent/.elastic-agent.active.commit" + + logsDir := binDir + "logs/" + logFileName := "elastic-agent-json.log" + logFile := logsDir + "/" + logFileName + + installerPackage := NewDEBPackage(binaryName, profile, image, service, commitFile, logFile) + + return ElasticAgentInstaller{ + artifactArch: arch, + artifactExtension: extension, + artifactName: artifact, + artifactOS: os, + artifactVersion: version, + BinaryPath: binaryPath, + EnrollFn: enrollFn, + Image: image, + InstallFn: installerPackage.Install, + InstallCertsFn: installerPackage.InstallCerts, + InstallerType: "deb", + Name: binaryName, + PostInstallFn: installerPackage.Postinstall, + PreInstallFn: installerPackage.Preinstall, + PrintLogsFn: installerPackage.PrintLogs, + processName: common.ElasticAgentProcessName, + Profile: profile, + Service: service, + Tag: tag, + UninstallFn: installerPackage.Uninstall, + workingDir: workingDir, + }, nil +} diff --git a/internal/installer/docker.go b/internal/installer/docker.go new file mode 100644 index 0000000000..1877dde9e6 --- /dev/null +++ b/internal/installer/docker.go @@ -0,0 +1,183 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package installer + +import ( + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/docker" + "github.com/elastic/e2e-testing/internal/kibana" + "github.com/elastic/e2e-testing/internal/utils" + log "github.com/sirupsen/logrus" +) + +// DockerPackage implements operations for a DEB installer +type DockerPackage struct { + BasePackage + installerPath string + ubi8 bool + // optional fields + arch string + artifact string + originalVersion string + OS string + version string +} + +// NewDockerPackage creates an instance for the Docker installer +func NewDockerPackage(binaryName string, profile string, image string, service string, installerPath string, ubi8 bool, commitFile string, logFile string) *DockerPackage { + return &DockerPackage{ + BasePackage: BasePackage{ + binaryName: binaryName, + commitFile: commitFile, + image: image, + logFile: logFile, + profile: profile, + service: service, + }, + installerPath: installerPath, + ubi8: ubi8, + } +} + +// Install installs a Docker package +func (i *DockerPackage) Install(cfg *kibana.FleetConfig) error { + log.Trace("No install commands for Docker packages") + return nil +} + +// InstallCerts installs the certificates for a Docker package +func (i *DockerPackage) InstallCerts() error { + log.Trace("No install certs commands for Docker packages") + return nil +} + +// Preinstall executes operations before installing a Docker package +func (i *DockerPackage) Preinstall() error { + err := docker.LoadImage(i.installerPath) + if err != nil { + return err + } + + // we need to tag the loaded image because its tag relates to the target branch + return docker.TagImage( + "docker.elastic.co/beats/"+i.artifact+":"+common.AgentVersionBase, + "docker.elastic.co/observability-ci/"+i.artifact+":"+i.originalVersion+"-amd64", + ) +} + +// Postinstall executes operations after installing a Docker package +func (i *DockerPackage) Postinstall() error { + log.Trace("No postinstall commands for Docker packages") + return nil +} + +// Uninstall uninstalls a Docker package +func (i *DockerPackage) Uninstall() error { + log.Trace("No uninstall commands for Docker packages") + return nil +} + +// WithArch sets the architecture +func (i *DockerPackage) WithArch(arch string) *DockerPackage { + i.arch = arch + return i +} + +// WithArtifact sets the artifact +func (i *DockerPackage) WithArtifact(artifact string) *DockerPackage { + i.artifact = artifact + return i +} + +// WithOS sets the OS +func (i *DockerPackage) WithOS(OS string) *DockerPackage { + i.OS = OS + return i +} + +// WithVersion sets the version +func (i *DockerPackage) WithVersion(version string) *DockerPackage { + i.version = utils.CheckPRVersion(version, common.AgentVersionBase) // sanitize version + i.originalVersion = version + return i +} + +// newDockerInstaller returns an instance of the Docker installer +func newDockerInstaller(ubi8 bool, version string) (ElasticAgentInstaller, error) { + image := "elastic-agent" + service := image + profile := common.FleetProfileName + + // extract the agent in the box, as it's mounted as a volume + artifact := "elastic-agent" + + artifactName := artifact + if ubi8 { + artifactName = "elastic-agent-ubi8" + image = "elastic-agent-ubi8" + } + + os := "linux" + arch := "amd64" + extension := "tar.gz" + + binaryName := utils.BuildArtifactName(artifactName, version, common.AgentVersionBase, os, arch, extension, true) + binaryPath, err := downloadAgentBinary(binaryName, artifact, version) + if err != nil { + log.WithFields(log.Fields{ + "artifact": artifact, + "version": version, + "os": os, + "arch": arch, + "extension": extension, + "error": err, + }).Error("Could not download the binary for the agent") + return ElasticAgentInstaller{}, err + } + + homeDir := "/usr/share/elastic-agent" + workingDir := homeDir + binDir := homeDir + "/data/elastic-agent-%s/" + + commitFile := homeDir + ".elastic-agent.active.commit" + + logsDir := binDir + "logs/" + logFileName := "elastic-agent-json.log" + logFile := logsDir + "/" + logFileName + + enrollFn := func(cfg *kibana.FleetConfig) error { + return nil + } + + installerPackage := NewDockerPackage(binaryName, profile, artifactName, service, binaryPath, ubi8, commitFile, logFile). + WithArch(arch). + WithArtifact(artifactName). + WithOS(os). + WithVersion(version) + + return ElasticAgentInstaller{ + artifactArch: arch, + artifactExtension: extension, + artifactName: artifact, + artifactOS: os, + artifactVersion: version, + BinaryPath: binaryPath, + EnrollFn: enrollFn, + Image: image, + InstallFn: installerPackage.Install, + InstallCertsFn: installerPackage.InstallCerts, + InstallerType: "docker", + Name: binaryName, + PostInstallFn: installerPackage.Postinstall, + PreInstallFn: installerPackage.Preinstall, + PrintLogsFn: installerPackage.PrintLogs, + processName: common.ElasticAgentProcessName, + Profile: profile, + Service: service, + Tag: version, + UninstallFn: installerPackage.Uninstall, + workingDir: workingDir, + }, nil +} diff --git a/internal/installer/elasticagent.go b/internal/installer/elasticagent.go new file mode 100644 index 0000000000..8399089158 --- /dev/null +++ b/internal/installer/elasticagent.go @@ -0,0 +1,145 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package installer + +import ( + "context" + "fmt" + + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/compose" + "github.com/elastic/e2e-testing/internal/docker" + "github.com/elastic/e2e-testing/internal/kibana" + "github.com/elastic/e2e-testing/internal/utils" + log "github.com/sirupsen/logrus" +) + +// ElasticAgentInstaller represents how to install an agent, depending of the box type +type ElasticAgentInstaller struct { + artifactArch string // architecture of the artifact + artifactExtension string // extension of the artifact + artifactName string // name of the artifact + artifactOS string // OS of the artifact + artifactVersion string // version of the artifact + BinaryPath string // the local path where the agent for the binary is located + EnrollFn func(cfg *kibana.FleetConfig) error + Image string // docker image + InstallerType string + InstallFn func(cfg *kibana.FleetConfig) error + InstallCertsFn func() error + Name string // the name for the binary + processName string // name of the elastic-agent process + Profile string // parent docker-compose file + PostInstallFn func() error + PreInstallFn func() error + PrintLogsFn func(containerName string) error + Service string // name of the service + Tag string // docker tag + UninstallFn func() error + workingDir string // location of the application +} + +// ListElasticAgentWorkingDirContent list Elastic Agent's working dir content +func (i *ElasticAgentInstaller) ListElasticAgentWorkingDirContent(containerName string) (string, error) { + cmd := []string{ + "ls", "-l", i.workingDir, + } + + content, err := docker.ExecCommandIntoContainer(context.Background(), containerName, "root", cmd) + if err != nil { + return "", err + } + + log.WithFields(log.Fields{ + "workingDir": i.workingDir, + "containerName": containerName, + "content": content, + }).Debug("Agent working dir content") + + return content, nil +} + +// runElasticAgentCommandEnv runs a command for the elastic-agent +func runElasticAgentCommandEnv(profile string, image string, service string, process string, command string, arguments []string, env map[string]string) error { + cmds := []string{ + "timeout", fmt.Sprintf("%dm", common.TimeoutFactor), process, command, + } + cmds = append(cmds, arguments...) + + for k, v := range env { + common.ProfileEnv[k] = v + } + + sm := compose.NewServiceManager() + err := sm.ExecCommandInService(profile, image, service, cmds, common.ProfileEnv, false) + if err != nil { + log.WithFields(log.Fields{ + "command": cmds, + "profile": profile, + "service": service, + "error": err, + }).Error("Could not run agent command in the box") + + return err + } + + return nil +} + +// downloadAgentBinary it downloads the binary and stores the location of the downloaded file +// into the installer struct, to be used else where +// If the environment variable ELASTIC_AGENT_DOWNLOAD_URL exists, then the artifact to be downloaded will +// be defined by that value +// Else if the environment variable BEATS_LOCAL_PATH is set, then the artifact +// to be used will be defined by the local snapshot produced by the local build. +// Else, if the environment variable BEATS_USE_CI_SNAPSHOTS is set, then the artifact +// to be downloaded will be defined by the latest snapshot produced by the Beats CI. +func downloadAgentBinary(artifactName string, artifact string, version string) (string, error) { + imagePath, err := utils.FetchBeatsBinary(artifactName, artifact, version, common.AgentVersionBase, common.TimeoutFactor, true) + if err != nil { + return "", err + } + + return imagePath, nil +} + +// GetElasticAgentInstaller returns an installer from a docker image +func GetElasticAgentInstaller(image string, installerType string, version string) ElasticAgentInstaller { + log.WithFields(log.Fields{ + "image": image, + "installer": installerType, + }).Debug("Configuring installer for the agent") + + var installer ElasticAgentInstaller + var err error + if "centos" == image && "tar" == installerType { + installer, err = newTarInstaller("centos", "latest", version) + } else if "centos" == image && "systemd" == installerType { + installer, err = newCentosInstaller("centos", "latest", version) + } else if "debian" == image && "tar" == installerType { + installer, err = newTarInstaller("debian", "stretch", version) + } else if "debian" == image && "systemd" == installerType { + installer, err = newDebianInstaller("debian", "stretch", version) + } else if "docker" == image && "default" == installerType { + installer, err = newDockerInstaller(false, version) + } else if "docker" == image && "ubi8" == installerType { + installer, err = newDockerInstaller(true, version) + } else { + log.WithFields(log.Fields{ + "image": image, + "installer": installerType, + }).Fatal("Sorry, we currently do not support this installer") + return ElasticAgentInstaller{} + } + + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "image": image, + "installer": installerType, + }).Fatal("Sorry, we could not download the installer") + } + return installer +} diff --git a/internal/installer/rpm.go b/internal/installer/rpm.go new file mode 100644 index 0000000000..acc7c554fb --- /dev/null +++ b/internal/installer/rpm.go @@ -0,0 +1,136 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package installer + +import ( + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/compose" + "github.com/elastic/e2e-testing/internal/kibana" + "github.com/elastic/e2e-testing/internal/utils" + log "github.com/sirupsen/logrus" +) + +// RPMPackage implements operations for a RPM installer +type RPMPackage struct { + BasePackage +} + +// NewRPMPackage creates an instance for the RPM installer +func NewRPMPackage(binaryName string, profile string, image string, service string, commitFile string, logFile string) *RPMPackage { + return &RPMPackage{ + BasePackage: BasePackage{ + binaryName: binaryName, + commitFile: commitFile, + image: image, + logFile: logFile, + profile: profile, + service: service, + }, + } +} + +// Install installs a RPM package +func (i *RPMPackage) Install(cfg *kibana.FleetConfig) error { + return i.extractPackage([]string{"yum", "localinstall", "/" + i.binaryName, "-y"}) +} + +// InstallCerts installs the certificates for a RPM package +func (i *RPMPackage) InstallCerts() error { + return installCertsForCentos(i.profile, i.image, i.service) +} +func installCertsForCentos(profile string, image string, service string) error { + sm := compose.NewServiceManager() + if err := sm.ExecCommandInService(profile, image, service, []string{"yum", "check-update"}, common.ProfileEnv, false); err != nil { + return err + } + if err := sm.ExecCommandInService(profile, image, service, []string{"yum", "install", "ca-certificates", "-y"}, common.ProfileEnv, false); err != nil { + return err + } + if err := sm.ExecCommandInService(profile, image, service, []string{"update-ca-trust", "force-enable"}, common.ProfileEnv, false); err != nil { + return err + } + if err := sm.ExecCommandInService(profile, image, service, []string{"update-ca-trust", "extract"}, common.ProfileEnv, false); err != nil { + return err + } + return nil +} + +// Preinstall executes operations before installing a RPM package +func (i *RPMPackage) Preinstall() error { + log.Trace("No preinstall commands for RPM packages") + return nil +} + +// Uninstall uninstalls a RPM package +func (i *RPMPackage) Uninstall() error { + log.Trace("No uninstall commands for RPM packages") + return nil +} + +// newCentosInstaller returns an instance of the Centos installer for a specific version +func newCentosInstaller(image string, tag string, version string) (ElasticAgentInstaller, error) { + image = image + "-systemd" // we want to consume systemd boxes + service := image + profile := common.FleetProfileName + + // extract the agent in the box, as it's mounted as a volume + artifact := "elastic-agent" + os := "linux" + arch := "x86_64" + extension := "rpm" + + binaryName := utils.BuildArtifactName(artifact, version, common.AgentVersionBase, os, arch, extension, false) + binaryPath, err := downloadAgentBinary(binaryName, artifact, version) + if err != nil { + log.WithFields(log.Fields{ + "artifact": artifact, + "version": version, + "os": os, + "arch": arch, + "extension": extension, + "error": err, + }).Error("Could not download the binary for the agent") + return ElasticAgentInstaller{}, err + } + + enrollFn := func(cfg *kibana.FleetConfig) error { + return runElasticAgentCommandEnv(profile, image, service, common.ElasticAgentProcessName, "enroll", cfg.Flags(), map[string]string{}) + } + + workingDir := "/var/lib/elastic-agent" + binDir := workingDir + "/data/elastic-agent-%s/" + + commitFile := "/etc/elastic-agent/.elastic-agent.active.commit" + + logsDir := binDir + "logs/" + logFileName := "elastic-agent-json.log" + logFile := logsDir + "/" + logFileName + + installerPackage := NewRPMPackage(binaryName, profile, image, service, commitFile, logFile) + + return ElasticAgentInstaller{ + artifactArch: arch, + artifactExtension: extension, + artifactName: artifact, + artifactOS: os, + artifactVersion: version, + BinaryPath: binaryPath, + EnrollFn: enrollFn, + Image: image, + InstallFn: installerPackage.Install, + InstallCertsFn: installerPackage.InstallCerts, + InstallerType: "rpm", + Name: binaryName, + PostInstallFn: installerPackage.Postinstall, + PreInstallFn: installerPackage.Preinstall, + PrintLogsFn: installerPackage.PrintLogs, + processName: common.ElasticAgentProcessName, + Profile: profile, + Service: service, + Tag: tag, + UninstallFn: installerPackage.Uninstall, + workingDir: workingDir, + }, nil +} diff --git a/internal/installer/tar.go b/internal/installer/tar.go new file mode 100644 index 0000000000..26d0288165 --- /dev/null +++ b/internal/installer/tar.go @@ -0,0 +1,216 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package installer + +import ( + "fmt" + + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/compose" + "github.com/elastic/e2e-testing/internal/kibana" + "github.com/elastic/e2e-testing/internal/utils" + log "github.com/sirupsen/logrus" +) + +// TARPackage implements operations for a RPM installer +type TARPackage struct { + BasePackage + // optional fields + arch string + artifact string + OS string + OSFlavour string // at this moment, centos or debian + version string +} + +// NewTARPackage creates an instance for the RPM installer +func NewTARPackage(binaryName string, profile string, image string, service string, commitFile string, logFile string) *TARPackage { + return &TARPackage{ + BasePackage: BasePackage{ + binaryName: binaryName, + commitFile: commitFile, + image: image, + logFile: logFile, + profile: profile, + service: service, + }, + } +} + +// Install installs a TAR package +func (i *TARPackage) Install(cfg *kibana.FleetConfig) error { + // install the elastic-agent to /usr/bin/elastic-agent using command + binary := fmt.Sprintf("/elastic-agent/%s", i.artifact) + args := cfg.Flags() + + err := runElasticAgentCommandEnv(i.profile, i.image, i.service, binary, "install", args, map[string]string{}) + if err != nil { + return fmt.Errorf("Failed to install the agent with subcommand: %v", err) + } + + return nil +} + +// InstallCerts installs the certificates for a TAR package, using the right OS package manager +func (i *TARPackage) InstallCerts() error { + if i.OSFlavour == "centos" { + return installCertsForCentos(i.profile, i.image, i.service) + } else if i.OSFlavour == "debian" { + return installCertsForDebian(i.profile, i.image, i.service) + } + + log.WithFields(log.Fields{ + "arch": i.arch, + "OS": i.OS, + "OSFlavour": i.OSFlavour, + }).Debug("Installation of certificates was skipped because of unknown OS flavour") + + return nil +} + +// Postinstall executes operations after installing a TAR package +func (i *TARPackage) Postinstall() error { + log.Trace("No postinstall commands for TAR installer") + return nil +} + +// Preinstall executes operations before installing a TAR package +func (i *TARPackage) Preinstall() error { + err := i.extractPackage([]string{"tar", "-xvf", "/" + i.binaryName}) + if err != nil { + return err + } + + // simplify layout + cmds := [][]string{ + {"rm", "-fr", "/elastic-agent"}, + {"mv", fmt.Sprintf("/%s-%s-%s-%s", i.artifact, i.version, i.OS, i.arch), "/elastic-agent"}, + } + for _, cmd := range cmds { + sm := compose.NewServiceManager() + err = sm.ExecCommandInService(i.profile, i.image, i.service, cmd, common.ProfileEnv, false) + if err != nil { + log.WithFields(log.Fields{ + "command": cmd, + "error": err, + "image": i.image, + "service": i.service, + "version": i.version, + }).Error("Could not extract agent package in the box") + + return err + } + } + + return nil +} + +// Uninstall uninstalls a TAR package +func (i *TARPackage) Uninstall() error { + args := []string{"-f"} + + return runElasticAgentCommandEnv(i.profile, i.image, i.service, common.ElasticAgentProcessName, "uninstall", args, map[string]string{}) +} + +// WithArch sets the architecture +func (i *TARPackage) WithArch(arch string) *TARPackage { + i.arch = arch + return i +} + +// WithArtifact sets the artifact +func (i *TARPackage) WithArtifact(artifact string) *TARPackage { + i.artifact = artifact + return i +} + +// WithOS sets the OS +func (i *TARPackage) WithOS(OS string) *TARPackage { + i.OS = OS + return i +} + +// WithOSFlavour sets the OS flavour, at this moment centos or debian +func (i *TARPackage) WithOSFlavour(OSFlavour string) *TARPackage { + i.OSFlavour = OSFlavour + return i +} + +// WithVersion sets the version +func (i *TARPackage) WithVersion(version string) *TARPackage { + i.version = version + return i +} + +// newTarInstaller returns an instance of the Debian installer for a specific version +func newTarInstaller(image string, tag string, version string) (ElasticAgentInstaller, error) { + dockerImage := image + "-systemd" // we want to consume systemd boxes + service := dockerImage + profile := common.FleetProfileName + + // extract the agent in the box, as it's mounted as a volume + artifact := "elastic-agent" + os := "linux" + arch := "x86_64" + extension := "tar.gz" + + binaryName := utils.BuildArtifactName(artifact, version, common.AgentVersionBase, os, arch, extension, false) + binaryPath, err := downloadAgentBinary(binaryName, artifact, version) + if err != nil { + log.WithFields(log.Fields{ + "artifact": artifact, + "version": version, + "os": os, + "arch": arch, + "extension": extension, + "error": err, + }).Error("Could not download the binary for the agent") + return ElasticAgentInstaller{}, err + } + + workingDir := "/opt/Elastic/Agent" + + commitFile := "/elastic-agent/.elastic-agent.active.commit" + + logsDir := workingDir + "/data/elastic-agent-%s/logs/" + logFileName := "elastic-agent-json.log" + logFile := logsDir + "/" + logFileName + + enrollFn := func(cfg *kibana.FleetConfig) error { + return runElasticAgentCommandEnv(profile, dockerImage, service, common.ElasticAgentProcessName, "enroll", cfg.Flags(), map[string]string{}) + } + + // + installerPackage := NewTARPackage(binaryName, profile, dockerImage, service, commitFile, logFile). + WithArch(arch). + WithArtifact(artifact). + WithOS(os). + WithOSFlavour(image). + WithVersion(utils.CheckPRVersion(version, common.AgentVersionBase)) // sanitize version + + return ElasticAgentInstaller{ + artifactArch: arch, + artifactExtension: extension, + artifactName: artifact, + artifactOS: os, + artifactVersion: version, + BinaryPath: binaryPath, + EnrollFn: enrollFn, + Image: dockerImage, + InstallFn: installerPackage.Install, + InstallCertsFn: installerPackage.InstallCerts, + InstallerType: "tar", + Name: binaryName, + PostInstallFn: installerPackage.Postinstall, + PreInstallFn: installerPackage.Preinstall, + PrintLogsFn: installerPackage.PrintLogs, + processName: common.ElasticAgentProcessName, + Profile: profile, + Service: service, + Tag: tag, + UninstallFn: installerPackage.Uninstall, + workingDir: workingDir, + }, nil +} diff --git a/internal/io/io.go b/internal/io/io.go new file mode 100644 index 0000000000..ce8fb4854c --- /dev/null +++ b/internal/io/io.go @@ -0,0 +1,207 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package io + +import ( + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + + log "github.com/sirupsen/logrus" +) + +// CopyDir recursively copies a directory tree, attempting to preserve permissions. +// Source directory must exist, destination directory will be overridden if it +// exists. Symlinks are ignored and skipped. +func CopyDir(src string, dst string) error { + src = filepath.Clean(src) + dst = filepath.Clean(dst) + + si, err := os.Stat(src) + if err != nil { + return err + } + if !si.IsDir() { + return errors.New("source is not a directory") + } + + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + // always override + + err = MkdirAll(dst) + if err != nil { + return err + } + + entries, err := ioutil.ReadDir(src) + if err != nil { + return err + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + if entry.IsDir() { + err = CopyDir(srcPath, dstPath) + if err != nil { + return err + } + } else { + // Skip symlinks. + if entry.Mode()&os.ModeSymlink != 0 { + continue + } + + err = CopyFile(srcPath, dstPath, 10000) + if err != nil { + return err + } + } + } + + return nil +} + +// CopyFile copies a file from a source to a destiny, always overridding +// the destination file +// Optimising the copy of files in Go: +// https://opensource.com/article/18/6/copying-files-go +func CopyFile(src string, dst string, bufferSize int64) error { + sourceFileStat, err := os.Stat(src) + if err != nil { + return err + } + + if !sourceFileStat.Mode().IsRegular() { + return errors.New(src + " is not a regular file") + } + + source, err := os.Open(src) + if err != nil { + return err + } + defer source.Close() + + // always override + + err = MkdirAll(filepath.Dir(dst)) + if err != nil { + return err + } + + destination, err := os.Create(dst) + if err != nil { + return err + } + defer destination.Close() + + buf := make([]byte, bufferSize) + for { + n, err := source.Read(buf) + if err != nil && err != io.EOF { + return err + } + if n == 0 { + break + } + + if _, err := destination.Write(buf[:n]); err != nil { + return err + } + } + + return err +} + +// Exists checks if a path exists in the file system +func Exists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return true, err +} + +// MkdirAll creates all directories for a directory path +func MkdirAll(path string) error { + if _, err := os.Stat(path); os.IsNotExist(err) { + err = os.MkdirAll(path, 0755) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "path": path, + }).Fatal("Directory cannot be created") + + return err + } + } + + return nil +} + +// FindFiles finds files recursively using a Glob pattern for the matching +func FindFiles(pattern string) []string { + matches, err := filepath.Glob(pattern) + + if err != nil { + log.WithFields(log.Fields{ + "pattern": pattern, + }).Warn("pattern is not a Glob") + + return []string{} + } + + return matches +} + +// ReadDir lists the contents of a directory +func ReadDir(path string) ([]os.FileInfo, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + log.WithFields(log.Fields{ + "path": path, + }).Warn("Could not read file system") + return []os.FileInfo{}, err + } + + return files, nil +} + +// ReadFile returns the byte array representing a file +func ReadFile(path string) ([]byte, error) { + bytes, err := ioutil.ReadFile(path) + if err != nil { + log.WithFields(log.Fields{ + "path": path, + }).Warn("Could not read file") + return []byte{}, err + } + + return bytes, nil +} + +// WriteFile writes bytes into target +func WriteFile(bytes []byte, target string) error { + err := ioutil.WriteFile(target, bytes, 0755) + if err != nil { + log.WithFields(log.Fields{ + "target": target, + "error": err, + }).Error("Cannot write file") + + return err + } + + return nil +} diff --git a/internal/io/io_test.go b/internal/io/io_test.go new file mode 100644 index 0000000000..812b94d7fa --- /dev/null +++ b/internal/io/io_test.go @@ -0,0 +1,27 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package io + +import ( + "path" + "testing" + + "github.com/Flaque/filet" + "github.com/stretchr/testify/assert" +) + +func TestMkdirAll(t *testing.T) { + defer filet.CleanUp(t) + + tmpDir := filet.TmpDir(t, "") + + dir := path.Join(tmpDir, ".op", "compose", "services") + + err := MkdirAll(dir) + assert.Nil(t, err) + + e, _ := Exists(dir) + assert.True(t, e) +} diff --git a/internal/kibana/agents.go b/internal/kibana/agents.go new file mode 100644 index 0000000000..bad9c86cc6 --- /dev/null +++ b/internal/kibana/agents.go @@ -0,0 +1,252 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/elastic/e2e-testing/internal/elasticsearch" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +// Agent represents an Elastic Agent enrolled with fleet. +type Agent struct { + ID string `json:"id"` + PolicyID string `json:"policy_id"` + PolicyRevision int `json:"policy_revision,omitempty"` + LocalMetadata struct { + Host struct { + Name string `json:"name"` + HostName string `json:"hostname"` + } `json:"host"` + Elastic struct { + Agent struct { + Version string `json:"version"` + Snapshot bool `json:"snapshot"` + } `json:"agent"` + } `json:"elastic"` + } `json:"local_metadata"` + Status string `json:"status"` +} + +// GetAgentByHostname get an agent by the local_metadata.host.name property +func (c *Client) GetAgentByHostname(hostname string) (Agent, error) { + agents, err := c.ListAgents() + if err != nil { + return Agent{}, err + } + + for _, agent := range agents { + agentHostname := agent.LocalMetadata.Host.Name + // a hostname has an agentID by status + if agentHostname == hostname { + log.WithFields(log.Fields{ + "agent": agent, + }).Trace("Agent found") + return agent, nil + } + } + + return Agent{}, nil +} + +// GetAgentIDByHostname gets agent id by hostname +func (c *Client) GetAgentIDByHostname(hostname string) (string, error) { + agent, err := c.GetAgentByHostname(hostname) + if err != nil { + return "", err + } + log.WithFields(log.Fields{ + "agentId": agent.ID, + }).Trace("Agent Id found") + return agent.ID, nil +} + +// GetAgentStatusByHostname gets agent status by hostname +func (c *Client) GetAgentStatusByHostname(hostname string) (string, error) { + agentID, err := c.GetAgentIDByHostname(hostname) + if err != nil { + return "", err + } + + statusCode, respBody, err := c.get(fmt.Sprintf("%s/agents/%s", FleetAPI, agentID)) + if err != nil { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + "statusCode": statusCode, + }).Error("Could not get agent response") + return "", err + } + + var resp struct { + Item Agent `json:"item"` + } + + if err := json.Unmarshal(respBody, &resp); err != nil { + return "", errors.Wrap(err, "could not convert list agents (response) to JSON") + } + + log.WithFields(log.Fields{ + "agentStatus": resp.Item.Status, + }).Trace("Agent Status found") + return resp.Item.Status, nil +} + +// GetAgentEvents get events of agent +func (c *Client) GetAgentEvents(applicationName string, agentID string, packagePolicyID string, updatedAt string) error { + query := map[string]interface{}{ + "query": map[string]interface{}{ + "bool": map[string]interface{}{ + "filter": []interface{}{ + map[string]interface{}{ + "bool": map[string]interface{}{ + "should": []interface{}{ + map[string]interface{}{ + "match": map[string]interface{}{ + "elastic_agent.id": agentID, + }, + }, + }, + "minimum_should_match": 1, + }, + }, + map[string]interface{}{ + "bool": map[string]interface{}{ + "should": []interface{}{ + map[string]interface{}{ + "match": map[string]interface{}{ + "data_stream.dataset": "elastic_agent", + }, + }, + }, + "minimum_should_match": 1, + }, + }, + }, + }, + }, + } + + indexName := "logs-elastic_agent-default" + + searchResult, err := elasticsearch.Search(context.Background(), indexName, query) + if err != nil { + log.WithFields(log.Fields{ + "agentID": agentID, + "application": applicationName, + "result": searchResult, + "error": err, + "packagePolicyID": packagePolicyID, + }).Error("Could not get agent events from Fleet") + return err + } + + results := searchResult["hits"].(map[string]interface{})["hits"].([]interface{}) + + for _, result := range results { + if message, ok := result.(map[string]interface{})["_source"].(map[string]interface{})["message"].(string); ok { + timestamp := result.(map[string]interface{})["_source"].(map[string]interface{})["@timestamp"].(string) + log.WithFields(log.Fields{ + "agentID": agentID, + "application": applicationName, + "event_at": timestamp, + "message": message, + "packagePolicyID": packagePolicyID, + "updated_at": updatedAt, + }).Trace("Event found") + matches := (strings.Contains(message, applicationName) && + strings.Contains(message, "["+agentID+"]: State changed to") && + strings.Contains(message, "Protecting with policy {"+packagePolicyID+"}")) + + if matches && timestamp > updatedAt { + log.WithFields(log.Fields{ + "application": applicationName, + "event_at": timestamp, + "packagePolicyID": packagePolicyID, + "updated_at": updatedAt, + "message": message, + }).Info("Event after the update was found") + return nil + } + + } + } + + return fmt.Errorf("No %s events where found for the agent in the %s policy", applicationName, packagePolicyID) +} + +// ListAgents returns the list of agents enrolled with Fleet. +func (c *Client) ListAgents() ([]Agent, error) { + statusCode, respBody, err := c.get(fmt.Sprintf("%s/agents", FleetAPI)) + + if err != nil { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + }).Error("Could not get Fleet's online agents") + return nil, err + } + + if statusCode != 200 { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + "statusCode": statusCode, + }).Error("Could not get Fleet's online agents") + + return nil, err + } + + var resp struct { + List []Agent `json:"list"` + } + + if err := json.Unmarshal(respBody, &resp); err != nil { + return nil, errors.Wrap(err, "could not convert list agents (response) to JSON") + } + + return resp.List, nil + +} + +// UnEnrollAgent unenrolls agent from fleet +func (c *Client) UnEnrollAgent(hostname string) error { + agentID, err := c.GetAgentIDByHostname(hostname) + if err != nil { + return err + } + reqBody := `{"revoke": true}` + statusCode, respBody, _ := c.post(fmt.Sprintf("%s/agents/%s/unenroll", FleetAPI, agentID), []byte(reqBody)) + if statusCode != 200 { + return fmt.Errorf("could not unenroll agent; API status code = %d, response body = %s", statusCode, respBody) + } + return nil +} + +// UpgradeAgent upgrades an agent from to version +func (c *Client) UpgradeAgent(hostname string, version string) error { + agentID, err := c.GetAgentIDByHostname(hostname) + if err != nil { + return err + } + reqBody := `{"version":"` + version + `", "force": true}` + statusCode, respBody, err := c.post(fmt.Sprintf("%s/agents/%s/upgrade", FleetAPI, agentID), []byte(reqBody)) + if statusCode != 200 { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + "statusCode": statusCode, + }).Error("Could not upgrade agent") + + return err + } + return nil + +} diff --git a/internal/kibana/client.go b/internal/kibana/client.go new file mode 100644 index 0000000000..b357c5a7a4 --- /dev/null +++ b/internal/kibana/client.go @@ -0,0 +1,94 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +// Client is responsible for exporting dashboards from Kibana. +type Client struct { + host string + username string + password string +} + +// NewClient creates a new instance of the client. +func NewClient() (*Client, error) { + host := BaseURL + username := "elastic" + password := "changeme" + + return &Client{ + host: host, + username: username, + password: password, + }, nil +} + +func (c *Client) get(resourcePath string) (int, []byte, error) { + return c.sendRequest(http.MethodGet, resourcePath, nil) +} + +func (c *Client) post(resourcePath string, body []byte) (int, []byte, error) { + return c.sendRequest(http.MethodPost, resourcePath, body) +} + +func (c *Client) put(resourcePath string, body []byte) (int, []byte, error) { + return c.sendRequest(http.MethodPut, resourcePath, body) +} + +func (c *Client) delete(resourcePath string) (int, []byte, error) { + return c.sendRequest(http.MethodDelete, resourcePath, nil) +} + +func (c *Client) sendRequest(method, resourcePath string, body []byte) (int, []byte, error) { + reqBody := bytes.NewReader(body) + base, err := url.Parse(c.host) + if err != nil { + return 0, nil, errors.Wrapf(err, "could not create base URL from host: %v", c.host) + } + + rel, err := url.Parse(resourcePath) + if err != nil { + return 0, nil, errors.Wrapf(err, "could not create relative URL from resource path: %v", resourcePath) + } + + u := base.ResolveReference(rel) + + log.WithFields(log.Fields{ + "method": method, + "url": u, + }).Trace("Kibana API Query") + + req, err := http.NewRequest(method, u.String(), reqBody) + if err != nil { + return 0, nil, errors.Wrapf(err, "could not create %v request to Kibana API resource: %s", method, resourcePath) + } + + req.SetBasicAuth(c.username, c.password) + req.Header.Add("content-type", "application/json") + req.Header.Add("kbn-xsrf", "e2e-tests") + + client := http.Client{} + resp, err := client.Do(req) + if err != nil { + return 0, nil, errors.Wrap(err, "could not send request to Kibana API") + } + + defer resp.Body.Close() + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + return resp.StatusCode, nil, errors.Wrap(err, "could not read response body") + } + + return resp.StatusCode, body, nil +} diff --git a/internal/kibana/client_test.go b/internal/kibana/client_test.go new file mode 100644 index 0000000000..129dc931aa --- /dev/null +++ b/internal/kibana/client_test.go @@ -0,0 +1,24 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetBaseURL(t *testing.T) { + client, _ := NewClient() + assert.NotNil(t, client) + + assert.Equal(t, "http://localhost:5601", client.host) +} + +func TestNewClient(t *testing.T) { + client, _ := NewClient() + + assert.NotNil(t, client) +} diff --git a/internal/kibana/fleet.go b/internal/kibana/fleet.go new file mode 100644 index 0000000000..8ea654f6f2 --- /dev/null +++ b/internal/kibana/fleet.go @@ -0,0 +1,102 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "fmt" + + "github.com/elastic/e2e-testing/internal/common" + log "github.com/sirupsen/logrus" +) + +// FleetConfig represents the configuration for Fleet Server when building the enrollment command +type FleetConfig struct { + EnrollmentToken string + ElasticsearchPort int + ElasticsearchURI string + ElasticsearchCredentials string + KibanaPort int + KibanaURI string + FleetServerPort int + FleetServerURI string + // server + BootstrapFleetServer bool + ServerPolicyID string +} + +// NewFleetConfig builds a new configuration for the fleet agent, defaulting ES credentials, URI and port. +// If the 'bootstrappFleetServer' flag is true, the it will create the config for the initial fleet server +// used to bootstrap Fleet Server +// If the 'fleetServerMode' flag is true, the it will create the config for an agent using an existing Fleet +// Server to connect to Fleet. It will also retrieve the default policy ID for fleet server +func NewFleetConfig(token string, bootstrapFleetServer bool, fleetServerMode bool) (*FleetConfig, error) { + cfg := &FleetConfig{ + BootstrapFleetServer: bootstrapFleetServer, + EnrollmentToken: token, + ElasticsearchCredentials: "elastic:changeme", + ElasticsearchPort: 9200, + ElasticsearchURI: "elasticsearch", + KibanaPort: 5601, + KibanaURI: "kibana", + FleetServerPort: 8220, + FleetServerURI: "localhost", + } + + client, err := NewClient() + if err != nil { + return cfg, err + } + + if fleetServerMode { + defaultFleetServerPolicy, err := client.GetDefaultPolicy(true) + if err != nil { + return nil, err + } + + cfg.ServerPolicyID = defaultFleetServerPolicy.ID + + log.WithFields(log.Fields{ + "elasticsearch": cfg.ElasticsearchURI, + "elasticsearchPort": cfg.ElasticsearchPort, + "policyID": cfg.ServerPolicyID, + "token": cfg.EnrollmentToken, + }).Debug("Fleet Server config created") + } + + return cfg, nil +} + +// Flags bootstrap flags for fleet server +func (cfg FleetConfig) Flags() []string { + if cfg.BootstrapFleetServer { + // TO-DO: remove all code to calculate the fleet-server policy, because it's inferred by the fleet-server + return []string{ + "--force", + "--fleet-server-es", fmt.Sprintf("http://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.ElasticsearchURI, cfg.ElasticsearchPort), + } + } + + /* + // agent using an already bootstrapped fleet-server + fleetServerHost := "https://hostname_of_the_bootstrapped_fleet_server:8220" + return []string{ + "-e", "-v", "--force", "--insecure", + // ensure the enrollment belongs to the default policy + "--enrollment-token=" + cfg.EnrollmentToken, + "--url", fleetServerHost, + } + */ + + baseFlags := []string{"-e", "-v", "--force", "--insecure", "--enrollment-token=" + cfg.EnrollmentToken} + if common.AgentVersionBase == "8.0.0-SNAPSHOT" { + return append(baseFlags, "--url", fmt.Sprintf("http://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.FleetServerURI, cfg.FleetServerPort)) + } + + if cfg.ServerPolicyID != "" { + baseFlags = append(baseFlags, "--fleet-server-insecure-http", "--fleet-server", fmt.Sprintf("http://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.ElasticsearchURI, cfg.ElasticsearchPort), "--fleet-server-host=http://0.0.0.0", "--fleet-server-policy", cfg.ServerPolicyID) + } + + return append(baseFlags, "--kibana-url", fmt.Sprintf("http://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.KibanaURI, cfg.KibanaPort)) +} diff --git a/internal/kibana/integrations.go b/internal/kibana/integrations.go new file mode 100644 index 0000000000..2c63dea9a4 --- /dev/null +++ b/internal/kibana/integrations.go @@ -0,0 +1,313 @@ +package kibana + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/Jeffail/gabs/v2" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +// IntegrationPackage used to share information about a integration +type IntegrationPackage struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + Title string `json:"title"` + Version string `json:"version"` +} + +// AddIntegrationToPolicy adds an integration to policy +func (c *Client) AddIntegrationToPolicy(packageDS PackageDataStream) error { + reqBody, err := json.Marshal(packageDS) + if err != nil { + return errors.Wrap(err, "could not convert policy-package (request) to JSON") + } + + statusCode, respBody, err := c.post(fmt.Sprintf("%s/package_policies", FleetAPI), reqBody) + if err != nil { + return errors.Wrap(err, "could not add package to policy") + } + + if statusCode != 200 { + return fmt.Errorf("could not add package to policy; API status code = %d; response body = %s", statusCode, respBody) + } + return nil +} + +// DeleteIntegrationFromPolicy adds an integration to policy +func (c *Client) DeleteIntegrationFromPolicy(packageDS PackageDataStream) error { + reqBody := `{"packagePolicyIds":["` + packageDS.ID + `"]}` + statusCode, respBody, err := c.post(fmt.Sprintf("%s/package_policies/delete", FleetAPI), []byte(reqBody)) + if err != nil { + return errors.Wrap(err, "could not delete integration from policy") + } + + if statusCode != 200 { + return fmt.Errorf("could not delete integration from policy; API status code = %d; response body = %s", statusCode, respBody) + } + return nil +} + +// GetIntegrations returns all available integrations +func (c *Client) GetIntegrations() ([]IntegrationPackage, error) { + statusCode, respBody, err := c.get(fmt.Sprintf("%s/epm/packages", FleetAPI)) + + if err != nil { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + }).Error("Could not get Integration package") + return []IntegrationPackage{}, err + } + + if statusCode != 200 { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + "statusCode": statusCode, + }).Error("Could not get Fleet's installed integrations") + + return nil, err + } + + jsonParsed, err := gabs.ParseJSON([]byte(respBody)) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "responseBody": jsonParsed, + }).Error("Could not parse get response into JSON") + return []IntegrationPackage{}, err + } + + var resp struct { + Packages []IntegrationPackage `json:"response"` + } + + if err := json.Unmarshal(respBody, &resp); err != nil { + return []IntegrationPackage{}, errors.Wrap(err, "Unable to convert integration package to JSON") + } + + return resp.Packages, nil + +} + +// GetIntegrationByPackageName returns metadata from an integration from Fleet +func (c *Client) GetIntegrationByPackageName(packageName string) (IntegrationPackage, error) { + integrationPackages, err := c.GetIntegrations() + if err != nil { + log.WithFields(log.Fields{ + "error": err, + }).Error("Could not get Integration packages list") + return IntegrationPackage{}, err + } + + for _, pkg := range integrationPackages { + if strings.EqualFold(pkg.Name, packageName) || strings.EqualFold(pkg.Title, packageName) { + return pkg, nil + } + } + + return IntegrationPackage{}, errors.New("Unable to find package") +} + +// GetIntegrationFromAgentPolicy get package policy from agent policy +func (c *Client) GetIntegrationFromAgentPolicy(packageName string, policy Policy) (PackageDataStream, error) { + packagePolicies, err := c.ListPackagePolicies() + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "policy": policy, + }).Trace("An error retrieving the package policies") + return PackageDataStream{}, err + } + + for _, child := range packagePolicies { + if policy.ID == child.PolicyID && (strings.EqualFold(packageName, child.Name) || strings.EqualFold(packageName, child.Package.Title)) { + return child, nil + } + } + + return PackageDataStream{}, errors.New("Unable to find package in policy") +} + +// SecurityEndpoint endpoint metadata +type SecurityEndpoint struct { + Metadata struct { + Status string `json:"host_status"` + Host struct { + Hostname string `json:"hostname"` + Name string `json:"name"` + } `json:"host"` + Elastic struct { + Agent struct { + ID string `json:"id"` + Version string `json:"version"` + } `json:"agent"` + } `json:"elastic"` + Endpoint struct { + Policy struct { + Applied struct { + Name string `json:"name"` + Status string `json:"status"` + } `json:"applied"` + } `json:"policy"` + } `json:"Endpoint"` + } `json:"metadata"` +} + +// GetMetadataFromSecurityApp sends a POST request to retrieve metadata from Security App +func (c *Client) GetMetadataFromSecurityApp() ([]SecurityEndpoint, error) { + reqBody := `{}` + statusCode, respBody, err := c.post(fmt.Sprintf("%s/metadata", EndpointAPI), []byte(reqBody)) + if err != nil { + return []SecurityEndpoint{}, errors.Wrap(err, "could not get endpoint metadata") + } + + jsonParsed, _ := gabs.ParseJSON([]byte(respBody)) + log.WithFields(log.Fields{ + "responseBody": jsonParsed, + }).Trace("Endpoint Metadata Response") + + if statusCode != 200 { + return []SecurityEndpoint{}, fmt.Errorf("could not get endpoint metadata; API status code = %d; response body = %s", statusCode, respBody) + } + + var resp struct { + Hosts []SecurityEndpoint `json:"hosts"` + } + + if err := json.Unmarshal(respBody, &resp); err != nil { + return []SecurityEndpoint{}, errors.Wrap(err, "Unable to convert metadata from security app to JSON") + } + + return resp.Hosts, nil +} + +// InstallIntegrationAssets sends a POST request to Fleet installing the assets for an integration +func (c *Client) InstallIntegrationAssets(integration IntegrationPackage) (string, error) { + reqBody := `{}` + statusCode, respBody, err := c.post(fmt.Sprintf("%s/epm/packages/%s-%s", FleetAPI, integration.Name, integration.Version), []byte(reqBody)) + if err != nil { + return "", errors.Wrap(err, "could not install integration assets") + } + + if statusCode != 200 { + return "", fmt.Errorf("could not install integration assets; API status code = %d; response body = %s", statusCode, respBody) + } + + var resp struct { + Response struct { + ID string `json:"id"` + } `json:"response"` + } + + if err := json.Unmarshal(respBody, &resp); err != nil { + return "", errors.Wrap(err, "Unable to convert install integration assets to JSON") + } + + return resp.Response.ID, nil +} + +// IsAgentListedInSecurityApp retrieves the hosts from Endpoint to check if a hostname +// is listed in the Security App. For that, we will inspect the metadata, and will iterate +// through the hosts, until we get the proper hostname. +func (c *Client) IsAgentListedInSecurityApp(hostName string) (SecurityEndpoint, error) { + hosts, err := c.GetMetadataFromSecurityApp() + if err != nil { + return SecurityEndpoint{}, err + } + + for _, host := range hosts { + metadataHostname := host.Metadata.Host.Hostname + if metadataHostname == hostName { + log.WithFields(log.Fields{ + "hostname": hostName, + }).Debug("Hostname for the agent listed in the Security App") + + return host, nil + } + } + + return SecurityEndpoint{}, nil +} + +// IsAgentListedInSecurityAppWithStatus inspects the metadata field for a hostname, obtained from +// the security App. We will check if the status matches the desired status, returning an error +// if the agent is not present in the Security App +func (c *Client) IsAgentListedInSecurityAppWithStatus(hostName string, desiredStatus string) (bool, error) { + host, err := c.IsAgentListedInSecurityApp(hostName) + if err != nil { + log.WithFields(log.Fields{ + "hostname": hostName, + "error": err, + }).Error("There was an error getting the agent in the Administration view in the Security app") + return false, err + } + + hostStatus := host.Metadata.Status + log.WithFields(log.Fields{ + "desiredStatus": desiredStatus, + "hostname": hostName, + "status": hostStatus, + }).Debug("Hostname for the agent listed with desired status in the Administration view in the Security App") + + return (hostStatus == desiredStatus), nil +} + +// IsPolicyResponseListedInSecurityApp sends a POST request to Endpoint to check if a hostname +// is listed in the Security App. For that, we will inspect the metadata, and will iterate +// through the hosts, until we get the policy status, finally checking for the success +// status. +func (c *Client) IsPolicyResponseListedInSecurityApp(agentID string) (bool, error) { + hosts, err := c.GetMetadataFromSecurityApp() + if err != nil { + return false, err + } + + for _, host := range hosts { + metadataAgentID := host.Metadata.Elastic.Agent.ID + name := host.Metadata.Endpoint.Policy.Applied.Name + status := host.Metadata.Endpoint.Policy.Applied.Status + if metadataAgentID == agentID { + log.WithFields(log.Fields{ + "agentID": agentID, + "name": name, + "status": status, + }).Debug("Policy response for the agent listed in the Security App") + + return (status == "success"), nil + } + } + + return false, nil +} + +// UpdateIntegrationPackagePolicy sends a PUT request to Fleet updating integration +// configuration +func (c *Client) UpdateIntegrationPackagePolicy(packageDS PackageDataStream) (string, error) { + // empty the ID as it won't be recoganized in the PUT body + id := packageDS.ID + packageDS.ID = "" + reqBody, _ := json.Marshal(packageDS) + statusCode, respBody, err := c.put(fmt.Sprintf("%s/package_policies/%s", FleetAPI, id), reqBody) + if err != nil { + return "", errors.Wrap(err, "could not update integration package") + } + + if statusCode != 200 { + return "", fmt.Errorf("could not update package ; API status code = %d; response body = %s", statusCode, respBody) + } + var resp struct { + Item struct { + UpdatedAt string `json:"updated_at"` + } `json:"item"` + } + + if err := json.Unmarshal(respBody, &resp); err != nil { + return "", errors.Wrap(err, "Unable to convert install updated package policy to JSON") + } + + return resp.Item.UpdatedAt, nil +} diff --git a/internal/kibana/policies.go b/internal/kibana/policies.go new file mode 100644 index 0000000000..7e7402b416 --- /dev/null +++ b/internal/kibana/policies.go @@ -0,0 +1,149 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "encoding/json" + "fmt" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +// Policy represents an Ingest Manager policy. +type Policy struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + Description string `json:"description"` + Namespace string `json:"namespace"` + IsDefault bool `json:"is_default"` + IsManaged bool `json:"is_managed"` + IsDefaultFleetServer bool `json:"is_default_fleet_server"` + AgentsCount int `json:"agents"` // Number of agents connected to Policy + Status string `json:"status"` +} + +// GetDefaultPolicy gets the default policy or optionally the default fleet policy +func (c *Client) GetDefaultPolicy(fleetServer bool) (Policy, error) { + policies, err := c.ListPolicies() + if err != nil { + return Policy{}, err + } + + for _, policy := range policies { + if fleetServer && policy.IsDefaultFleetServer { + log.WithField("policy", policy).Trace("Returning Default Fleet Server Policy") + return policy, nil + } else if !fleetServer && policy.IsDefault { + log.WithField("policy", policy).Trace("Returning Default Agent Policy") + return policy, nil + } + } + return Policy{}, errors.New("Could not obtain default policy") +} + +// ListPolicies returns the list of policies +func (c *Client) ListPolicies() ([]Policy, error) { + statusCode, respBody, err := c.get(fmt.Sprintf("%s/agent_policies", FleetAPI)) + + if err != nil { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + }).Error("Could not get Fleet's policies") + return nil, err + } + + if statusCode != 200 { + log.WithFields(log.Fields{ + "error": err, + "statusCode": statusCode, + }).Error("Could not get Fleet's policies") + + return nil, err + } + + var resp struct { + Items []Policy `json:"items"` + } + if err := json.Unmarshal(respBody, &resp); err != nil { + return nil, errors.Wrap(err, "Unable to convert list of policies to JSON") + } + + return resp.Items, nil +} + +// Var represents a single variable at the package or +// data stream level, encapsulating the data type of the +// variable and it's value. +type Var struct { + Value interface{} `json:"value"` + Type string `json:"type"` +} + +// Vars is a collection of variables either at the package or +// data stream level. +type Vars map[string]Var + +// DataStream represents a data stream within a package. +type DataStream struct { + Type string `json:"type"` + Dataset string `json:"dataset"` +} + +// Input represents a package-level input. +type Input struct { + Type string `json:"type"` + Enabled bool `json:"enabled"` + Streams []interface{} `json:"streams"` + Vars Vars `json:"vars,omitempty"` + Config interface{} `json:"config,omitempty"` +} + +// PackageDataStream represents a request to add a single package's single data stream to a +// Policy in Ingest Manager. +type PackageDataStream struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + Description string `json:"description"` + Namespace string `json:"namespace"` + PolicyID string `json:"policy_id"` + Enabled bool `json:"enabled"` + OutputID string `json:"output_id"` + Inputs []Input `json:"inputs"` + Package IntegrationPackage `json:"package"` +} + +// ListPackagePolicies return list of package policies +func (c *Client) ListPackagePolicies() ([]PackageDataStream, error) { + statusCode, respBody, err := c.get(fmt.Sprintf("%s/package_policies", FleetAPI)) + + if err != nil { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + }).Error("Could not get Fleet's package policies") + return nil, err + } + + if statusCode != 200 { + log.WithFields(log.Fields{ + "error": err, + "statusCode": statusCode, + }).Error("Could not get Fleet's package policies") + + return nil, err + } + + var resp struct { + Items []PackageDataStream `json:"items"` + } + + if err := json.Unmarshal(respBody, &resp); err != nil { + return nil, errors.Wrap(err, "Unable to convert list of package policies to JSON") + } + + return resp.Items, nil +} diff --git a/internal/kibana/server.go b/internal/kibana/server.go new file mode 100644 index 0000000000..cd37d7ae92 --- /dev/null +++ b/internal/kibana/server.go @@ -0,0 +1,304 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/Jeffail/gabs/v2" + "github.com/cenkalti/backoff/v4" + "github.com/elastic/e2e-testing/internal/common" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "go.elastic.co/apm" +) + +// EnrollmentAPIKey struct for holding enrollment response +type EnrollmentAPIKey struct { + Active bool `json:"active"` + APIKey string `json:"api_key"` + APIKeyID string `json:"api_key_id"` + ID string `json:"id"` + Name string `json:"name"` + PolicyID string `json:"policy_id"` +} + +// CreateEnrollmentAPIKey creates an enrollment api key +func (c *Client) CreateEnrollmentAPIKey(policy Policy) (EnrollmentAPIKey, error) { + + reqBody := `{"policy_id": "` + policy.ID + `"}` + statusCode, respBody, _ := c.post(fmt.Sprintf("%s/enrollment-api-keys", FleetAPI), []byte(reqBody)) + if statusCode != 200 { + jsonParsed, err := gabs.ParseJSON([]byte(respBody)) + log.WithFields(log.Fields{ + "body": jsonParsed, + "reqBody": reqBody, + "error": err, + "statusCode": statusCode, + }).Error("Could not create enrollment api key") + + return EnrollmentAPIKey{}, err + } + + var resp struct { + Enrollment EnrollmentAPIKey `json:"item"` + } + + if err := json.Unmarshal(respBody, &resp); err != nil { + return EnrollmentAPIKey{}, errors.Wrap(err, "Unable to convert enrollment response to JSON") + } + + return resp.Enrollment, nil +} + +// DeleteEnrollmentAPIKey deletes the enrollment api key +func (c *Client) DeleteEnrollmentAPIKey(enrollmentID string) error { + statusCode, respBody, err := c.delete(fmt.Sprintf("%s/enrollment-api-keys/%s", FleetAPI, enrollmentID)) + + if err != nil { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + }).Error("Could not delete enrollment key") + return err + } + + if statusCode != 200 { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + "statusCode": statusCode, + }).Error("Could not delete enrollment key") + + return err + } + return nil +} + +// GetDataStreams get data streams from deployed agents +func (c *Client) GetDataStreams() (*gabs.Container, error) { + statusCode, respBody, err := c.get(fmt.Sprintf("%s/data_streams", FleetAPI)) + + if err != nil { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + }).Error("Could not get Fleet data streams") + return &gabs.Container{}, err + } + + if statusCode != 200 { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + "statusCode": statusCode, + }).Error("Could not get Fleet data streams api") + + return &gabs.Container{}, err + } + + jsonParsed, err := gabs.ParseJSON([]byte(respBody)) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "responseBody": jsonParsed, + }).Error("Could not parse response into JSON") + return nil, err + } + + // data streams should contain array of elements + dataStreams := jsonParsed.Path("data_streams") + + log.WithFields(log.Fields{ + "count": len(dataStreams.Children()), + }).Debug("Data Streams retrieved") + + return dataStreams, nil +} + +// ListEnrollmentAPIKeys list the enrollment api keys +func (c *Client) ListEnrollmentAPIKeys() ([]EnrollmentAPIKey, error) { + statusCode, respBody, err := c.get(fmt.Sprintf("%s/enrollment-api-keys", FleetAPI)) + + if err != nil { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + }).Error("Could not get Integration package") + return []EnrollmentAPIKey{}, err + } + + if statusCode != 200 { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + "statusCode": statusCode, + }).Error("Could not get enrollment apis") + + return []EnrollmentAPIKey{}, err + } + + var resp struct { + List []EnrollmentAPIKey `json:"list"` + } + + if err := json.Unmarshal(respBody, &resp); err != nil { + return nil, errors.Wrap(err, "Unable to convert list of enrollment apis to JSON") + } + + return resp.List, nil + +} + +// RecreateFleet this will force recreate the fleet configuration +func (c *Client) RecreateFleet() error { + waitForFleet := func() error { + reqBody := `{ "forceRecreate": true }` + statusCode, respBody, err := c.post(fmt.Sprintf("%s/setup", FleetAPI), []byte(reqBody)) + if err != nil { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + "statusCode": statusCode, + }).Error("Could not initialise Fleet setup") + return err + } + + jsonResponse, err := gabs.ParseJSON([]byte(respBody)) + if err != nil { + log.WithFields(log.Fields{ + "body": jsonResponse, + "error": err, + "statusCode": statusCode, + }).Error("Could not parse JSON response") + return err + } + + if statusCode != 200 { + log.WithFields(log.Fields{ + "statusCode": statusCode, + "body": jsonResponse, + }).Warn("Fleet not ready") + return errors.New("Fleet not ready") + } + + log.WithFields(log.Fields{ + "body": jsonResponse, + "statusCode": statusCode, + }).Info("Fleet setup done") + return nil + } + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute * 2 + exp := common.GetExponentialBackOff(maxTimeout) + + err := backoff.Retry(waitForFleet, exp) + if err != nil { + return err + } + return nil +} + +// WaitForFleet waits for fleet server to be ready +func (c *Client) WaitForFleet() error { + waitForFleet := func() error { + statusCode, respBody, err := c.get(fmt.Sprintf("%s/agents/setup", FleetAPI)) + if err != nil { + log.WithFields(log.Fields{ + "body": respBody, + "error": err, + "statusCode": statusCode, + }).Error("Could not verify Fleet is setup and ready") + return err + } + if statusCode != 200 { + log.WithFields(log.Fields{ + "statusCode": statusCode, + }).Warn("Fleet not ready") + return err + } + + jsonResponse, err := gabs.ParseJSON([]byte(respBody)) + if err != nil { + log.WithFields(log.Fields{ + "body": jsonResponse, + "error": err, + "statusCode": statusCode, + }).Error("Could not parse JSON response") + return err + } + + isReady := jsonResponse.Path("isReady").Data().(bool) + if !isReady { + log.WithFields(log.Fields{ + "body": jsonResponse, + "error": err, + "statusCode": statusCode, + }).Error("Kibana has not been initialized") + return errors.New("Kibana has not been initialized") + } + log.Info("Kibana setup initialized") + return nil + } + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute * 2 + exp := common.GetExponentialBackOff(maxTimeout) + + err := backoff.Retry(waitForFleet, exp) + if err != nil { + return err + } + return nil + +} + +// WaitForReady waits for Kibana to be healthy and accept connections +func (c *Client) WaitForReady(maxTimeoutMinutes time.Duration) (bool, error) { + maxTimeout := time.Duration(common.TimeoutFactor) * time.Minute * 2 + exp := common.GetExponentialBackOff(maxTimeout) + + ctx := context.Background() + + retryCount := 1 + + kibanaStatus := func() error { + span, _ := apm.StartSpanOptions(ctx, "Health", "kibana.health", apm.SpanOptions{ + Parent: apm.SpanFromContext(ctx).TraceContext(), + }) + defer span.End() + + statusCode, respBody, err := c.get("status") + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "statusCode": statusCode, + "respBody": respBody, + "retry": retryCount, + "statusEndpoint": fmt.Sprintf("%s/status", BaseURL), + "elapsedTime": exp.GetElapsedTime(), + }).Warn("The Kibana instance is not healthy yet") + + retryCount++ + + return err + } + + log.WithFields(log.Fields{ + "retries": retryCount, + "statusEndpoint": fmt.Sprintf("%s/status", BaseURL), + "elapsedTime": exp.GetElapsedTime(), + }).Info("The Kibana instance is healthy") + + return nil + } + + err := backoff.Retry(kibanaStatus, exp) + if err != nil { + return false, err + } + + return true, nil +} diff --git a/internal/kibana/url_prefixes.go b/internal/kibana/url_prefixes.go new file mode 100644 index 0000000000..62524f1768 --- /dev/null +++ b/internal/kibana/url_prefixes.go @@ -0,0 +1,16 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +const ( + // BaseURL Kibana host address + BaseURL = "http://localhost:5601" + + // FleetAPI is the prefix for all Kibana Fleet API resources. + FleetAPI = "/api/fleet" + + // EndpointAPI is the endpoint API + EndpointAPI = "/api/endpoint" +) diff --git a/cli/services/kubectl.go b/internal/kubectl/kubectl.go similarity index 98% rename from cli/services/kubectl.go rename to internal/kubectl/kubectl.go index 04c1b576cb..df40e655a4 100644 --- a/cli/services/kubectl.go +++ b/internal/kubectl/kubectl.go @@ -2,14 +2,14 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package services +package kubectl import ( "context" "encoding/json" "strings" - "github.com/elastic/e2e-testing/cli/shell" + "github.com/elastic/e2e-testing/internal/shell" "go.elastic.co/apm" ) diff --git a/cli/services/sanitizer.go b/internal/sanitizer/sanitizer.go similarity index 88% rename from cli/services/sanitizer.go rename to internal/sanitizer/sanitizer.go index b50d08ffbb..f5a9064c23 100644 --- a/cli/services/sanitizer.go +++ b/internal/sanitizer/sanitizer.go @@ -1,4 +1,8 @@ -package services +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sanitizer import ( "strings" diff --git a/cli/services/sanitizer_test.go b/internal/sanitizer/sanitizer_test.go similarity index 77% rename from cli/services/sanitizer_test.go rename to internal/sanitizer/sanitizer_test.go index 19c0f52951..24b02e3014 100644 --- a/cli/services/sanitizer_test.go +++ b/internal/sanitizer/sanitizer_test.go @@ -1,4 +1,8 @@ -package services +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sanitizer import ( "testing" diff --git a/cli/shell/shell.go b/internal/shell/shell.go similarity index 100% rename from cli/shell/shell.go rename to internal/shell/shell.go diff --git a/cli/shell/shell_test.go b/internal/shell/shell_test.go similarity index 100% rename from cli/shell/shell_test.go rename to internal/shell/shell_test.go diff --git a/internal/state/state.go b/internal/state/state.go new file mode 100644 index 0000000000..7924233cb4 --- /dev/null +++ b/internal/state/state.go @@ -0,0 +1,120 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package state + +import ( + "os" + "path/filepath" + "strings" + + "github.com/elastic/e2e-testing/internal/io" + log "github.com/sirupsen/logrus" + + "gopkg.in/yaml.v2" +) + +// stateRun represents a Run +type stateRun struct { + ID string // ID of the run + Profile stateService // profile of the run (Optional) + Env map[string]string // environment for the run + Services []stateService // services in the run +} + +// stateService represents a service in a Run +type stateService struct { + Name string +} + +// Recover recovers the state for a run +func Recover(id string, workdir string) map[string]string { + run := stateRun{ + Env: map[string]string{}, + } + + stateFile := filepath.Join(workdir, id+".run") + bytes, err := io.ReadFile(stateFile) //nolint + if err != nil { + return run.Env + } + + err = yaml.Unmarshal(bytes, &run) + if err != nil { + log.WithFields(log.Fields{ + "stateFile": stateFile, + }).Error("Could not unmarshal state") + } + + return run.Env +} + +// Destroy destroys the state for a run +func Destroy(id string, workdir string) { + stateFile := filepath.Join(workdir, id+".run") + err := os.Remove(stateFile) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "stateFile": stateFile, + }).Warn("Could not destroy state") + + return + } + + log.WithFields(log.Fields{ + "stateFile": stateFile, + }).Trace("State destroyed") +} + +// Update updates the state of en execution, using ID as the file name for the run. +// The state file will be located under 'workdir', which by default will be the tool's +// workspace. +func Update(id string, workdir string, composeFilePaths []string, env map[string]string) { + stateFile := filepath.Join(workdir, id+".run") + + log.WithFields(log.Fields{ + "dir": workdir, + "stateFile": stateFile, + }).Trace("Updating state") + + run := stateRun{ + ID: id, + Env: env, + Services: []stateService{}, + } + + if strings.HasSuffix(id, "-profile") { + run.Profile = stateService{ + Name: filepath.Base(filepath.Dir(composeFilePaths[0])), + } + } + + for i, f := range composeFilePaths { + if i > 0 { + run.Services = append(run.Services, stateService{ + Name: filepath.Base(filepath.Dir(f)), + }) + } + } + + bytes, err := yaml.Marshal(&run) + if err != nil { + log.WithFields(log.Fields{ + "stateFile": stateFile, + }).Error("Could not marshal state") + } + + err = io.WriteFile(bytes, stateFile) //nolint + if err != nil { + log.WithFields(log.Fields{ + "stateFile": stateFile, + }).Error("Could not create state file") + } + + log.WithFields(log.Fields{ + "dir": workdir, + "stateFile": stateFile, + }).Trace("State updated") +} diff --git a/internal/state/state_test.go b/internal/state/state_test.go new file mode 100644 index 0000000000..11d7874236 --- /dev/null +++ b/internal/state/state_test.go @@ -0,0 +1,70 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package state + +import ( + "path/filepath" + "testing" + + "github.com/Flaque/filet" + "github.com/elastic/e2e-testing/internal/io" + "github.com/stretchr/testify/assert" +) + +func TestRecover(t *testing.T) { + defer filet.CleanUp(t) + + tmpDir := filet.TmpDir(t, "") + + workspace := filepath.Join(tmpDir, ".op") + + ID := "myprofile-profile" + composeFiles := []string{ + filepath.Join(workspace, "compose/services/a/1.yml"), + filepath.Join(workspace, "compose/services/b/2.yml"), + filepath.Join(workspace, "compose/services/c/3.yml"), + filepath.Join(workspace, "compose/services/d/4.yml"), + } + initialEnv := map[string]string{ + "foo": "bar", + } + + _ = io.MkdirAll(workspace) + + Update(ID, workspace, composeFiles, initialEnv) + + runFile := filepath.Join(workspace, ID+".run") + e, _ := io.Exists(runFile) + assert.True(t, e) + + env := Recover(ID, workspace) + + value, e := env["foo"] + assert.True(t, e) + assert.Equal(t, "bar", value) +} + +func TestUpdateCreatesStateFile(t *testing.T) { + defer filet.CleanUp(t) + + tmpDir := filet.TmpDir(t, "") + + workspace := filepath.Join(tmpDir, ".op") + + ID := "myprofile-profile" + composeFiles := []string{ + filepath.Join(workspace, "compose/services/a/1.yml"), + filepath.Join(workspace, "compose/services/b/2.yml"), + filepath.Join(workspace, "compose/services/c/3.yml"), + filepath.Join(workspace, "compose/services/d/4.yml"), + } + runFile := filepath.Join(workspace, ID+".run") + _ = io.MkdirAll(runFile) + + Update(ID, workspace, composeFiles, map[string]string{}) + + e, _ := io.Exists(runFile) + assert.True(t, e) +} diff --git a/e2e/utils.go b/internal/utils/utils.go similarity index 83% rename from e2e/utils.go rename to internal/utils/utils.go index 1d67b7ca32..3c2a877930 100644 --- a/e2e/utils.go +++ b/internal/utils/utils.go @@ -2,10 +2,9 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package e2e +package utils import ( - "context" "fmt" "io" "io/ioutil" @@ -19,9 +18,9 @@ import ( "github.com/Jeffail/gabs/v2" backoff "github.com/cenkalti/backoff/v4" - "github.com/elastic/e2e-testing/cli/docker" - "github.com/elastic/e2e-testing/cli/shell" - curl "github.com/elastic/e2e-testing/cli/shell" + "github.com/elastic/e2e-testing/internal/common" + curl "github.com/elastic/e2e-testing/internal/curl" + "github.com/elastic/e2e-testing/internal/shell" log "github.com/sirupsen/logrus" ) @@ -170,32 +169,12 @@ func getGCPBucketCoordinates(fileName string, artifact string, version string, f return bucket, prefix, object } -// GetExponentialBackOff returns a preconfigured exponential backoff instance -func GetExponentialBackOff(elapsedTime time.Duration) *backoff.ExponentialBackOff { - var ( - initialInterval = 500 * time.Millisecond - randomizationFactor = 0.5 - multiplier = 2.0 - maxInterval = 5 * time.Second - maxElapsedTime = elapsedTime - ) - - exp := backoff.NewExponentialBackOff() - exp.InitialInterval = initialInterval - exp.RandomizationFactor = randomizationFactor - exp.Multiplier = multiplier - exp.MaxInterval = maxInterval - exp.MaxElapsedTime = maxElapsedTime - - return exp -} - // GetElasticArtifactVersion returns the current version: // 1. Elastic's artifact repository, building the JSON path query based // If the version is a PR, then it will return the version without checking the artifacts API // i.e. GetElasticArtifactVersion("$VERSION") func GetElasticArtifactVersion(version string) (string, error) { - exp := GetExponentialBackOff(time.Minute) + exp := common.GetExponentialBackOff(time.Minute) retryCount := 1 @@ -264,7 +243,7 @@ func GetElasticArtifactVersion(version string) (string, error) { // i.e. GetElasticArtifactURL("elastic-agent-$VERSION-x86_64.rpm", "elastic-agent","$VERSION") // i.e. GetElasticArtifactURL("elastic-agent-$VERSION-linux-amd64.tar.gz", "elastic-agent","$VERSION") func GetElasticArtifactURL(artifactName string, artifact string, version string) (string, error) { - exp := GetExponentialBackOff(time.Minute) + exp := common.GetExponentialBackOff(time.Minute) retryCount := 1 @@ -328,7 +307,7 @@ func GetElasticArtifactURL(artifactName string, artifact string, version string) // GetObjectURLFromBucket extracts the media URL for the desired artifact from the // Google Cloud Storage bucket used by the CI to push snapshots func GetObjectURLFromBucket(bucket string, prefix string, object string, maxtimeout time.Duration) (string, error) { - exp := GetExponentialBackOff(maxtimeout) + exp := common.GetExponentialBackOff(maxtimeout) retryCount := 1 @@ -452,7 +431,7 @@ func DownloadFile(url string) (string, error) { filepath := tempFile.Name() - exp := GetExponentialBackOff(3) + exp := common.GetExponentialBackOff(3) retryCount := 1 var fileReader io.ReadCloser @@ -583,90 +562,3 @@ func GetDockerNamespaceEnvVar(fallback string) string { } return fallback } - -// WaitForProcess polls a container executing "ps" command until the process is in the desired state (present or not), -// or a timeout happens -func WaitForProcess(containerName string, process string, desiredState string, maxTimeout time.Duration) error { - exp := GetExponentialBackOff(maxTimeout) - - mustBePresent := false - if desiredState == "started" { - mustBePresent = true - } - retryCount := 1 - - processStatus := func() error { - log.WithFields(log.Fields{ - "desiredState": desiredState, - "process": process, - }).Trace("Checking process desired state on the container") - - output, err := docker.ExecCommandIntoContainer(context.Background(), containerName, "root", []string{"pgrep", "-n", "-l", "-f", process}) - if err != nil { - log.WithFields(log.Fields{ - "desiredState": desiredState, - "elapsedTime": exp.GetElapsedTime(), - "error": err, - "container": containerName, - "mustBePresent": mustBePresent, - "process": process, - "retry": retryCount, - }).Warn("Could not execute 'pgrep -n -l -f' in the container") - - retryCount++ - - return err - } - - outputContainsProcess := strings.Contains(output, process) - - // both true or both false - if mustBePresent == outputContainsProcess { - log.WithFields(log.Fields{ - "desiredState": desiredState, - "container": containerName, - "mustBePresent": mustBePresent, - "process": process, - }).Infof("Process desired state checked") - - return nil - } - - if mustBePresent { - err = fmt.Errorf("%s process is not running in the container yet", process) - log.WithFields(log.Fields{ - "desiredState": desiredState, - "elapsedTime": exp.GetElapsedTime(), - "error": err, - "container": containerName, - "process": process, - "retry": retryCount, - }).Warn(err.Error()) - - retryCount++ - - return err - } - - err = fmt.Errorf("%s process is still running in the container", process) - log.WithFields(log.Fields{ - "elapsedTime": exp.GetElapsedTime(), - "error": err, - "container": containerName, - "process": process, - "state": desiredState, - "retry": retryCount, - }).Warn(err.Error()) - - retryCount++ - - return err - } - - err := backoff.Retry(processStatus, exp) - if err != nil { - return err - } - - return nil -} diff --git a/e2e/utils_test.go b/internal/utils/utils_test.go similarity index 99% rename from e2e/utils_test.go rename to internal/utils/utils_test.go index 0d1d154c43..9516ef75d5 100644 --- a/e2e/utils_test.go +++ b/internal/utils/utils_test.go @@ -1,4 +1,4 @@ -package e2e +package utils import ( "io/ioutil"