-
Notifications
You must be signed in to change notification settings - Fork 42
feat: support flavours in services, specially in the elastic-agent #1162
Changes from 13 commits
7a46fca
41db6b0
6879808
590b31e
463ff9c
9c9768d
fd1542b
0fe7826
1922901
64ac3b1
79ad38d
c34a4e0
50054f7
c47bf82
8dd9fd9
423d28a
eaa88c3
e23947f
5b011e0
18a8114
ebd5ea9
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
This file was deleted.
This file was deleted.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,9 +15,7 @@ import ( | |
"github.com/cenkalti/backoff/v4" | ||
"github.com/cucumber/godog" | ||
"github.com/elastic/e2e-testing/internal/common" | ||
"github.com/elastic/e2e-testing/internal/compose" | ||
"github.com/elastic/e2e-testing/internal/deploy" | ||
"github.com/elastic/e2e-testing/internal/docker" | ||
"github.com/elastic/e2e-testing/internal/elasticsearch" | ||
"github.com/elastic/e2e-testing/internal/installer" | ||
"github.com/elastic/e2e-testing/internal/kibana" | ||
|
@@ -90,7 +88,14 @@ func (fts *FleetTestSuite) afterScenario() { | |
|
||
developerMode := shell.GetEnvBool("DEVELOPER_MODE") | ||
if !developerMode { | ||
_ = fts.deployer.Remove([]string{common.FleetProfileName, serviceName}, common.ProfileEnv) | ||
agentInstaller := fts.getInstaller() | ||
|
||
_ = fts.deployer.Remove( | ||
[]deploy.ServiceRequest{ | ||
deploy.NewServiceRequest(common.FleetProfileName), | ||
deploy.NewServiceRequest(serviceName).WithFlavour(agentInstaller.Image), | ||
}, | ||
common.ProfileEnv) | ||
} else { | ||
log.WithField("service", serviceName).Info("Because we are running in development mode, the service won't be stopped") | ||
} | ||
|
@@ -131,6 +136,7 @@ func (fts *FleetTestSuite) beforeScenario() { | |
} | ||
|
||
func (fts *FleetTestSuite) contributeSteps(s *godog.ScenarioContext) { | ||
s.Step(`^a "([^"]*)" agent is deployed to Fleet$`, fts.anAgentIsDeployedToFleet) | ||
s.Step(`^a "([^"]*)" agent is deployed to Fleet with "([^"]*)" installer$`, fts.anAgentIsDeployedToFleetWithInstaller) | ||
s.Step(`^a "([^"]*)" agent "([^"]*)" is deployed to Fleet with "([^"]*)" installer$`, fts.anStaleAgentIsDeployedToFleetWithInstaller) | ||
s.Step(`^agent is in version "([^"]*)"$`, fts.agentInVersion) | ||
|
@@ -306,7 +312,18 @@ func (fts *FleetTestSuite) agentInVersion(version string) error { | |
return backoff.Retry(agentInVersionFn, exp) | ||
} | ||
|
||
// supported installers: tar, systemd | ||
// this step infers the installer type from the underlying OS image | ||
// supported images: centos and debian | ||
func (fts *FleetTestSuite) anAgentIsDeployedToFleet(image string) error { | ||
installerType := "rpm" | ||
if image == "debian" { | ||
installerType = "deb" | ||
} | ||
|
||
return fts.anAgentIsDeployedToFleetWithInstallerAndFleetServer(image, installerType) | ||
} | ||
|
||
// supported installers: tar, rpm, deb | ||
func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstaller(image string, installerType string) error { | ||
return fts.anAgentIsDeployedToFleetWithInstallerAndFleetServer(image, installerType) | ||
} | ||
|
@@ -348,7 +365,7 @@ func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstallerAndFleetServer(i | |
} | ||
|
||
// get container hostname once | ||
hostname, err := docker.GetContainerHostname(containerName) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In my installer rework code, we don't need to query the hostname as they is done by inspecting the service further abstracting out connection information. We want to be able to just perform actions on a "service" and not have to be so explicit about how to access it as that is handled by the deployment and installer abstractions |
||
hostname, err := deploy.GetContainerHostname(containerName) | ||
if err != nil { | ||
return err | ||
} | ||
|
@@ -361,7 +378,7 @@ func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstallerAndFleetServer(i | |
// we are using the Docker client instead of docker-compose because it does not support | ||
// returning the output of a command: it simply returns error level | ||
func (fts *FleetTestSuite) getContainerName(i installer.ElasticAgentInstaller, index int) string { | ||
return fmt.Sprintf("%s_%s_%s_%d", i.Profile, i.Image, common.ElasticAgentServiceName, index) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same thing here, in the other branch we use an |
||
return fmt.Sprintf("%s_%s_%d", i.Profile, common.ElasticAgentServiceName, index) | ||
} | ||
|
||
// getServiceName returns the current service name, the one defined at the docker compose | ||
|
@@ -391,17 +408,20 @@ func (fts *FleetTestSuite) processStateChangedOnTheHost(process string, state st | |
|
||
serviceName := agentInstaller.Service // name of the service | ||
|
||
profileService := deploy.NewServiceRequest(profile) | ||
imageService := deploy.NewServiceRequest(common.ElasticAgentServiceName).WithFlavour(agentInstaller.Image) | ||
|
||
if state == "started" { | ||
return installer.SystemctlRun(profile, agentInstaller.Image, serviceName, "start") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In my branch these are actually abstracted out to the installer service as |
||
return installer.SystemctlRun(profileService, imageService, serviceName, "start") | ||
} else if state == "restarted" { | ||
err := installer.SystemctlRun(profile, agentInstaller.Image, serviceName, "stop") | ||
err := installer.SystemctlRun(profileService, imageService, serviceName, "stop") | ||
if err != nil { | ||
return err | ||
} | ||
|
||
utils.Sleep(time.Duration(utils.TimeoutFactor) * 10 * time.Second) | ||
|
||
err = installer.SystemctlRun(profile, agentInstaller.Image, serviceName, "start") | ||
err = installer.SystemctlRun(profileService, imageService, serviceName, "start") | ||
if err != nil { | ||
return err | ||
} | ||
|
@@ -427,7 +447,7 @@ func (fts *FleetTestSuite) processStateChangedOnTheHost(process string, state st | |
"process": process, | ||
}).Trace("Stopping process on the service") | ||
|
||
err := installer.SystemctlRun(profile, agentInstaller.Image, serviceName, "stop") | ||
err := installer.SystemctlRun(profileService, imageService, serviceName, "stop") | ||
if err != nil { | ||
log.WithFields(log.Fields{ | ||
"action": state, | ||
|
@@ -1136,7 +1156,10 @@ func deployAgentToFleet(agentInstaller installer.ElasticAgentInstaller, deployer | |
// we are setting the container name because Centos service could be reused by any other test suite | ||
common.ProfileEnv[envVarsPrefix+"ContainerName"] = containerName | ||
|
||
services := []string{profile, service} | ||
services := []deploy.ServiceRequest{ | ||
deploy.NewServiceRequest(profile), | ||
deploy.NewServiceRequest(common.ElasticAgentServiceName).WithFlavour(agentInstaller.Image), | ||
} | ||
err := deployer.Add(services, common.ProfileEnv) | ||
if err != nil { | ||
log.WithFields(log.Fields{ | ||
|
@@ -1150,7 +1173,7 @@ func deployAgentToFleet(agentInstaller installer.ElasticAgentInstaller, deployer | |
targetFile := "/" | ||
|
||
// copy downloaded agent to the root dir of the container | ||
err = docker.CopyFileToContainer(context.Background(), containerName, agentInstaller.BinaryPath, targetFile, isTar) | ||
err = deploy.CopyFileToContainer(context.Background(), containerName, agentInstaller.BinaryPath, targetFile, isTar) | ||
if err != nil { | ||
return nil, err | ||
} | ||
|
@@ -1217,16 +1240,17 @@ func inputs(integration string) []kibana.Input { | |
} | ||
|
||
func (fts *FleetTestSuite) getContainerLogs() error { | ||
serviceManager := compose.NewServiceManager() | ||
serviceManager := deploy.NewServiceManager() | ||
|
||
profile := common.FleetProfileName | ||
agentInstaller := fts.getInstaller() | ||
profile := deploy.NewServiceRequest(common.FleetProfileName) | ||
serviceName := common.ElasticAgentServiceName | ||
|
||
composes := []string{ | ||
profile, // profile name | ||
serviceName, // agent service | ||
services := []deploy.ServiceRequest{ | ||
profile, // profile name | ||
deploy.NewServiceRequest(serviceName).WithFlavour(agentInstaller.Image), // agent service | ||
} | ||
err := serviceManager.RunCommand(profile, composes, []string{"logs", serviceName}, common.ProfileEnv) | ||
err := serviceManager.RunCommand(profile, services, []string{"logs", serviceName}, common.ProfileEnv) | ||
if err != nil { | ||
log.WithFields(log.Fields{ | ||
"error": err, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Rather than calling
deploy.NewServiceManager
we could initialize it like we do in the fleet tests that way you just usedeployer.Add
instead and it'll do the right thing no matter the provider (docker or k8s)There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If we do that we will be introducing a bug in the CLI side, as we have coupled the Bootstrap method with the Fleet profile. Maybe we can fix that in a separate PR.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Well, indeed the bug is there if we decide to migrate the metricbeat test suite 😄