Skip to content
This repository has been archived by the owner on Sep 17, 2024. It is now read-only.

feat: add first scenario for Fleet Server #900

Merged
merged 30 commits into from
Apr 19, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
b1931c0
chore: capture Fleet's default policy in a stronger manner
mdelapenya Mar 15, 2021
6f54550
chore: support passing the field for is_default policy
mdelapenya Mar 15, 2021
ad0d17f
chore: remove inferred type for array
mdelapenya Mar 15, 2021
e52c8b6
chore: enable fleet server in kibana config
mdelapenya Mar 15, 2021
e8bee02
chore: create fleet config struct
mdelapenya Mar 15, 2021
bb62c4f
chore: refactor enroll command logic to use the new struct
mdelapenya Mar 15, 2021
d8e57ce
chore: check if the fleet-server field exists when retrieving the policy
mdelapenya Mar 15, 2021
6189bff
chore: refactor install to support fleet-server
mdelapenya Mar 16, 2021
d9da8b0
feat: add first scenario for fleet server
mdelapenya Mar 16, 2021
830eb12
chore: add fleet server branch to the CI
mdelapenya Mar 16, 2021
eaa68f2
Merge branch 'master' into 438-fleet-server-scenarios
mdelapenya Mar 18, 2021
bc9e1ff
chore: set Then clause for the scenario
mdelapenya Mar 18, 2021
4ae1019
chore: remove step
mdelapenya Mar 18, 2021
721aa21
fix: define fallback when checking agent status
mdelapenya Mar 22, 2021
f5a9f46
chore: simplify creation of Fleet configs
mdelapenya Mar 24, 2021
eb8f013
fix: forgot to rename variable
mdelapenya Mar 24, 2021
e9edde9
Merge branch 'master' into 438-fleet-server-scenarios
mdelapenya Mar 26, 2021
356750f
WIP
mdelapenya Mar 29, 2021
f2244ad
Merge branch 'master' into 438-fleet-server-scenarios
mdelapenya Mar 29, 2021
7358928
chore: rename scenario
mdelapenya Mar 30, 2021
bcd69ee
Merge branch 'master' into 438-fleet-server-scenarios
mdelapenya Apr 6, 2021
173004f
fix: wrong merge conflicts resolution
mdelapenya Apr 6, 2021
cb57360
chore: support passing environment when running a command in a container
mdelapenya Apr 8, 2021
6865e98
chore: run elastic agent commands passing an env
mdelapenya Apr 8, 2021
6d60a6d
WIP
mdelapenya Apr 8, 2021
04c7fc8
Merge branch 'master' into 438-fleet-server-scenarios
mdelapenya Apr 15, 2021
e5ed65c
chore: separate bootstrapping an agent from connecting to a fleet ser…
mdelapenya Apr 15, 2021
09e4325
fix: use proper fleet-server flags
mdelapenya Apr 15, 2021
491eb17
Merge branch 'master' into 438-fleet-server-scenarios
adam-stokes Apr 16, 2021
a557f88
Merge branch 'master' into 438-fleet-server-scenarios
adam-stokes Apr 19, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .ci/.e2e-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ SUITES:
- name: "Fleet"
pullRequestFilter: " && ~debian"
tags: "fleet_mode_agent"
- name: "Fleet Server"
pullRequestFilter: " && ~debian"
tags: "fleet_server"
- name: "Endpoint Integration"
pullRequestFilter: " && ~debian"
tags: "agent_endpoint_integration"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,6 @@ xpack.fleet.enabled: true
xpack.fleet.registryUrl: http://package-registry:8080
xpack.fleet.agents.enabled: true
xpack.fleet.agents.elasticsearch.host: http://elasticsearch:9200
xpack.fleet.agents.fleetServerEnabled: true
xpack.fleet.agents.kibana.host: http://kibana:5601
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You might need xpack.fleet.agents.fleet_server.hosts: [http://?:5601] instead here with the most recent builds. Probably worth rerunning.

xpack.fleet.agents.tlsCheckDisabled: true
12 changes: 12 additions & 0 deletions cli/docker/docker.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,11 @@ const OPNetworkName = "elastic-dev-network"

// ExecCommandIntoContainer executes a command, as a user, into a container
func ExecCommandIntoContainer(ctx context.Context, containerName string, user string, cmd []string) (string, error) {
return ExecCommandIntoContainerWithEnv(ctx, containerName, user, cmd, []string{})
}

// ExecCommandIntoContainerWithEnv executes a command, as a user, with env, into a container
func ExecCommandIntoContainerWithEnv(ctx context.Context, containerName string, user string, cmd []string, env []string) (string, error) {
dockerClient := getDockerClient()

detach := false
Expand All @@ -36,6 +41,7 @@ func ExecCommandIntoContainer(ctx context.Context, containerName string, user st
"container": containerName,
"command": cmd,
"detach": detach,
"env": env,
"tty": tty,
}).Trace("Creating command to be executed in container")

Expand All @@ -48,12 +54,14 @@ func ExecCommandIntoContainer(ctx context.Context, containerName string, user st
AttachStdout: true,
Detach: detach,
Cmd: cmd,
Env: env,
})

if err != nil {
log.WithFields(log.Fields{
"container": containerName,
"command": cmd,
"env": env,
"error": err,
"detach": detach,
"tty": tty,
Expand All @@ -65,6 +73,7 @@ func ExecCommandIntoContainer(ctx context.Context, containerName string, user st
"container": containerName,
"command": cmd,
"detach": detach,
"env": env,
"tty": tty,
}).Trace("Command to be executed in container created")

Expand All @@ -77,6 +86,7 @@ func ExecCommandIntoContainer(ctx context.Context, containerName string, user st
"container": containerName,
"command": cmd,
"detach": detach,
"env": env,
"error": err,
"tty": tty,
}).Error("Could not execute command in container")
Expand All @@ -91,6 +101,7 @@ func ExecCommandIntoContainer(ctx context.Context, containerName string, user st
"container": containerName,
"command": cmd,
"detach": detach,
"env": env,
"error": err,
"tty": tty,
}).Error("Could not parse command output from container")
Expand All @@ -102,6 +113,7 @@ func ExecCommandIntoContainer(ctx context.Context, containerName string, user st
"container": containerName,
"command": cmd,
"detach": detach,
"env": env,
"tty": tty,
}).Trace("Command sucessfully executed in container")

Expand Down
19 changes: 19 additions & 0 deletions e2e/_suites/fleet/features/fleet_server.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
@fleet_server
Feature: Fleet Server
Scenarios for Fleet Server, where an Elasticseach and a Kibana instances are already provisioned,
so that the Agent is able to communicate with them

@start-fleet-server
Scenario Outline: Deploying an <os> Elastic Agent that starts Fleet Server
When a "<os>" agent is deployed to Fleet with "tar" installer in fleet-server mode
mdelapenya marked this conversation as resolved.
Show resolved Hide resolved
Then the agent is listed in Fleet as "online"

@centos
Examples: Centos
| os |
| centos |

@debian
Examples: Debian
| os |
| debian |
86 changes: 62 additions & 24 deletions e2e/_suites/fleet/fleet.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ func (fts *FleetTestSuite) beforeScenario() {
fts.Version = agentVersion

// create policy with system monitoring enabled
defaultPolicy, err := getAgentDefaultPolicy()
defaultPolicy, err := getAgentDefaultPolicy("is_default")
if err != nil {
log.WithFields(log.Fields{
"err": err,
Expand Down Expand Up @@ -161,6 +161,9 @@ func (fts *FleetTestSuite) contributeSteps(s *godog.ScenarioContext) {
s.Step(`^the policy response will be shown in the Security App$`, fts.thePolicyResponseWillBeShownInTheSecurityApp)
s.Step(`^the policy is updated to have "([^"]*)" in "([^"]*)" mode$`, fts.thePolicyIsUpdatedToHaveMode)
s.Step(`^the policy will reflect the change in the Security App$`, fts.thePolicyWillReflectTheChangeInTheSecurityApp)

// fleet server steps
s.Step(`^a "([^"]*)" agent is deployed to Fleet with "([^"]*)" installer in fleet-server mode$`, fts.anAgentIsDeployedToFleetWithInstallerInFleetMode)
}

func (fts *FleetTestSuite) anStaleAgentIsDeployedToFleetWithInstaller(image, version, installerType string) error {
Expand Down Expand Up @@ -291,10 +294,15 @@ func (fts *FleetTestSuite) agentInVersion(version string) error {

// supported installers: tar, systemd
func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstaller(image string, installerType string) error {
return fts.anAgentIsDeployedToFleetWithInstallerAndFleetServer(image, installerType, false)
}

func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstallerAndFleetServer(image string, installerType string, bootstrapFleetServer bool) error {
log.WithFields(log.Fields{
"image": image,
"installer": installerType,
}).Trace("Deploying an agent to Fleet with base image")
"bootstrapFleetServer": bootstrapFleetServer,
"image": image,
"installer": installerType,
}).Trace("Deploying an agent to Fleet with base image and fleet server")

fts.Image = image
fts.InstallerType = installerType
Expand All @@ -316,15 +324,16 @@ func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstaller(image string, i
fts.CurrentToken = tokenJSONObject.Path("api_key").Data().(string)
fts.CurrentTokenID = tokenJSONObject.Path("id").Data().(string)

err = deployAgentToFleet(installer, containerName, fts.CurrentToken)
var fleetConfig *FleetConfig
fleetConfig, err = deployAgentToFleet(installer, containerName, fts.CurrentToken, bootstrapFleetServer)
fts.Cleanup = true
if err != nil {
return err
}

// the installation process for TAR includes the enrollment
if installer.installerType != "tar" {
err = installer.EnrollFn(fts.CurrentToken)
err = installer.EnrollFn(fleetConfig)
if err != nil {
return err
}
Expand Down Expand Up @@ -452,10 +461,10 @@ func theAgentIsListedInFleetWithStatus(desiredStatus, hostname string) error {
"status": desiredStatus,
}).Info("The Agent is not present in Fleet, as expected")
return nil
} else if desiredStatus == "online" {
retryCount++
return fmt.Errorf("The agent is not present in Fleet, but it should")
}

retryCount++
return fmt.Errorf("The agent is not present in Fleet in the '%s' status, but it should", desiredStatus)
}

isAgentInStatus, err := isAgentInStatus(agentID, desiredStatus)
Expand Down Expand Up @@ -618,7 +627,13 @@ func (fts *FleetTestSuite) theAgentIsReenrolledOnTheHost() error {

installer := fts.getInstaller()

err := installer.EnrollFn(fts.CurrentToken)
// a restart does not need to bootstrap the Fleet Server again
cfg, err := NewFleetConfig(fts.CurrentToken, false, false)
if err != nil {
return err
}

err = installer.EnrollFn(cfg)
if err != nil {
return err
}
Expand Down Expand Up @@ -663,7 +678,7 @@ func (fts *FleetTestSuite) thePolicyShowsTheDatasourceAdded(packageName string)
fts.Integration = integration

configurationIsPresentFn := func() error {
defaultPolicy, err := getAgentDefaultPolicy()
defaultPolicy, err := getAgentDefaultPolicy("is_default")
if err != nil {
log.WithFields(log.Fields{
"error": err,
Expand Down Expand Up @@ -1033,14 +1048,14 @@ func (fts *FleetTestSuite) anAttemptToEnrollANewAgentFails() error {

containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", ElasticAgentServiceName, 2) // name of the new container

err := deployAgentToFleet(installer, containerName, fts.CurrentToken)
fleetConfig, err := deployAgentToFleet(installer, containerName, fts.CurrentToken, false)
// the installation process for TAR includes the enrollment
if installer.installerType != "tar" {
if err != nil {
return err
}

err = installer.EnrollFn(fts.CurrentToken)
err = installer.EnrollFn(fleetConfig)
if err == nil {
err = fmt.Errorf("The agent was enrolled although the token was previously revoked")

Expand Down Expand Up @@ -1334,7 +1349,7 @@ func createFleetToken(name string, policyID string) (*gabs.Container, error) {
return tokenItem, nil
}

func deployAgentToFleet(installer ElasticAgentInstaller, containerName string, token string) error {
func deployAgentToFleet(installer ElasticAgentInstaller, containerName string, token string, bootstrapFleetServer bool) (*FleetConfig, error) {
profile := installer.profile // name of the runtime dependencies compose file
service := installer.service // name of the service
serviceTag := installer.tag // docker tag of the service
Expand All @@ -1357,24 +1372,31 @@ func deployAgentToFleet(installer ElasticAgentInstaller, containerName string, t
"service": service,
"tag": serviceTag,
}).Error("Could not run the target box")
return err
return nil, err
}

err = installer.PreInstallFn()
if err != nil {
return err
return nil, err
}

cfg, cfgError := NewFleetConfig(token, bootstrapFleetServer, false)
if cfgError != nil {
return nil, cfgError
}

err = installer.InstallFn(containerName, token)
err = installer.InstallFn(cfg)
if err != nil {
return err
return nil, err
}

return installer.PostInstallFn()
return cfg, installer.PostInstallFn()
}

// getAgentDefaultPolicy sends a GET request to Fleet for the existing default policy
func getAgentDefaultPolicy() (*gabs.Container, error) {
// getAgentDefaultPolicy sends a GET request to Fleet for the existing default policy, using the
// "defaultPolicyFieldName" passed as parameter as field to be used to find the policy in list
// of fleet policies
func getAgentDefaultPolicy(defaultPolicyFieldName string) (*gabs.Container, error) {
r := createDefaultHTTPRequest(ingestManagerAgentPoliciesURL)
body, err := curl.Get(r)
if err != nil {
Expand Down Expand Up @@ -1402,10 +1424,21 @@ func getAgentDefaultPolicy() (*gabs.Container, error) {
"count": len(policies.Children()),
}).Trace("Fleet policies retrieved")

// TODO: perform a strong check to capture default policy
defaultPolicy := policies.Index(0)
for _, policy := range policies.Children() {
if !policy.Exists(defaultPolicyFieldName) {
continue
}

if policy.Path(defaultPolicyFieldName).Data().(bool) {
log.WithFields(log.Fields{
"field": defaultPolicyFieldName,
"policy": policy,
}).Trace("Default Policy was found")
return policy, nil
}
}

return defaultPolicy, nil
return nil, fmt.Errorf("Default policy was not found with '%s' field equals to 'true'", defaultPolicyFieldName)
}

func getAgentEvents(applicationName string, agentID string, packagePolicyID string, updatedAt string) error {
Expand Down Expand Up @@ -1581,6 +1614,11 @@ func isAgentInStatus(agentID string, desiredStatus string) (bool, error) {

jsonResponse, err := gabs.ParseJSON([]byte(body))

log.WithFields(log.Fields{
"agentID": agentID,
"desiredStatus": desiredStatus,
}).Info(jsonResponse)

agentStatus := jsonResponse.Path("item.status").Data().(string)

return (strings.ToLower(agentStatus) == strings.ToLower(desiredStatus)), nil
Expand Down
Loading