diff --git a/x-pack/elastic-agent/CHANGELOG.asciidoc b/x-pack/elastic-agent/CHANGELOG.asciidoc index 8f5efd70aa38..8843292ceb3b 100644 --- a/x-pack/elastic-agent/CHANGELOG.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.asciidoc @@ -36,6 +36,7 @@ - Windows agent doesn't uninstall with a lowercase `c:` drive in the path {pull}23998[23998] - Fix reloading of log level for services {pull}[24055]24055 - Fix: Successfully installed and enrolled agent running standalone{pull}[24128]24128 +- Remove installed services on agent uninstall {pull}[24151]24151 ==== New features diff --git a/x-pack/elastic-agent/pkg/agent/application/application.go b/x-pack/elastic-agent/pkg/agent/application/application.go index c88cca8fc3c6..a5431c352859 100644 --- a/x-pack/elastic-agent/pkg/agent/application/application.go +++ b/x-pack/elastic-agent/pkg/agent/application/application.go @@ -13,6 +13,7 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/status" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/upgrade" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/warn" @@ -70,7 +71,7 @@ func createApplication( return nil, err } - if IsStandalone(cfg.Fleet) { + if configuration.IsStandalone(cfg.Fleet) { log.Info("Agent is managed locally") return newLocal(ctx, log, pathConfigFile, rawConfig, reexec, statusCtrl, uc, agentInfo) } @@ -82,7 +83,7 @@ func createApplication( return nil, err } - if IsFleetServerBootstrap(cfg.Fleet) { + if configuration.IsFleetServerBootstrap(cfg.Fleet) { log.Info("Agent is in Fleet Server bootstrap mode") return newFleetServerBootstrap(ctx, log, pathConfigFile, rawConfig, statusCtrl, agentInfo) } @@ -91,18 +92,8 @@ func createApplication( return newManaged(ctx, log, store, cfg, rawConfig, reexec, statusCtrl, agentInfo) } -// IsStandalone decides based on missing of fleet.enabled: true or fleet.{access_token,kibana} will place Elastic Agent into standalone mode. -func IsStandalone(cfg *configuration.FleetAgentConfig) bool { - return cfg == nil || !cfg.Enabled -} - -// IsFleetServerBootstrap decides if Elastic Agent is started in bootstrap mode. -func IsFleetServerBootstrap(cfg *configuration.FleetAgentConfig) bool { - return cfg != nil && cfg.Server != nil && cfg.Server.Bootstrap -} - func mergeFleetConfig(rawConfig *config.Config) (storage.Store, *configuration.Configuration, error) { - path := info.AgentConfigFile() + path := paths.AgentConfigFile() store := storage.NewDiskStore(path) reader, err := store.Load() if err != nil { diff --git a/x-pack/elastic-agent/pkg/agent/application/config_test.go b/x-pack/elastic-agent/pkg/agent/application/config_test.go index 824691295fec..1a0287e25024 100644 --- a/x-pack/elastic-agent/pkg/agent/application/config_test.go +++ b/x-pack/elastic-agent/pkg/agent/application/config_test.go @@ -30,7 +30,7 @@ func testMgmtMode(t *testing.T) { err := c.Unpack(&m) require.NoError(t, err) assert.Equal(t, false, m.Fleet.Enabled) - assert.Equal(t, true, IsStandalone(m.Fleet)) + assert.Equal(t, true, configuration.IsStandalone(m.Fleet)) }) @@ -40,7 +40,7 @@ func testMgmtMode(t *testing.T) { err := c.Unpack(&m) require.NoError(t, err) assert.Equal(t, true, m.Fleet.Enabled) - assert.Equal(t, false, IsStandalone(m.Fleet)) + assert.Equal(t, false, configuration.IsStandalone(m.Fleet)) }) } diff --git a/x-pack/elastic-agent/pkg/agent/application/emitter.go b/x-pack/elastic-agent/pkg/agent/application/emitter.go index 0b575db52f76..89310ddeb9d2 100644 --- a/x-pack/elastic-agent/pkg/agent/application/emitter.go +++ b/x-pack/elastic-agent/pkg/agent/application/emitter.go @@ -6,7 +6,6 @@ package application import ( "context" - "fmt" "strings" "sync" @@ -123,7 +122,7 @@ func (e *emitterController) update() error { ast := rawAst.Clone() inputs, ok := transpiler.Lookup(ast, "inputs") if ok { - renderedInputs, err := renderInputs(inputs, varsArray) + renderedInputs, err := transpiler.RenderInputs(inputs, varsArray) if err != nil { return err } @@ -191,90 +190,3 @@ func readfiles(files []string, emitter emitterFunc) error { return emitter(c) } - -func renderInputs(inputs transpiler.Node, varsArray []*transpiler.Vars) (transpiler.Node, error) { - l, ok := inputs.Value().(*transpiler.List) - if !ok { - return nil, fmt.Errorf("inputs must be an array") - } - nodes := []*transpiler.Dict{} - nodesMap := map[string]*transpiler.Dict{} - for _, vars := range varsArray { - for _, node := range l.Value().([]transpiler.Node) { - dict, ok := node.Clone().(*transpiler.Dict) - if !ok { - continue - } - n, err := dict.Apply(vars) - if err == transpiler.ErrNoMatch { - // has a variable that didn't exist, so we ignore it - continue - } - if err != nil { - // another error that needs to be reported - return nil, err - } - if n == nil { - // condition removed it - continue - } - dict = n.(*transpiler.Dict) - hash := string(dict.Hash()) - _, exists := nodesMap[hash] - if !exists { - nodesMap[hash] = dict - nodes = append(nodes, dict) - } - } - } - nInputs := []transpiler.Node{} - for _, node := range nodes { - nInputs = append(nInputs, promoteProcessors(node)) - } - return transpiler.NewList(nInputs), nil -} - -func promoteProcessors(dict *transpiler.Dict) *transpiler.Dict { - p := dict.Processors() - if p == nil { - return dict - } - var currentList *transpiler.List - current, ok := dict.Find("processors") - if ok { - currentList, ok = current.Value().(*transpiler.List) - if !ok { - return dict - } - } - ast, _ := transpiler.NewAST(map[string]interface{}{ - "processors": p, - }) - procs, _ := transpiler.Lookup(ast, "processors") - nodes := nodesFromList(procs.Value().(*transpiler.List)) - if ok && currentList != nil { - nodes = append(nodes, nodesFromList(currentList)...) - } - dictNodes := dict.Value().([]transpiler.Node) - set := false - for i, node := range dictNodes { - switch n := node.(type) { - case *transpiler.Key: - if n.Name() == "processors" { - dictNodes[i] = transpiler.NewKey("processors", transpiler.NewList(nodes)) - set = true - } - } - if set { - break - } - } - if !set { - dictNodes = append(dictNodes, transpiler.NewKey("processors", transpiler.NewList(nodes))) - } - return transpiler.NewDict(dictNodes) -} - -func nodesFromList(list *transpiler.List) []transpiler.Node { - return list.Value().([]transpiler.Node) -} diff --git a/x-pack/elastic-agent/pkg/agent/application/emitter_test.go b/x-pack/elastic-agent/pkg/agent/application/emitter_test.go index d65ba9f074d2..7c1975fef642 100644 --- a/x-pack/elastic-agent/pkg/agent/application/emitter_test.go +++ b/x-pack/elastic-agent/pkg/agent/application/emitter_test.go @@ -3,754 +3,3 @@ // you may not use this file except in compliance with the Elastic License. package application - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/transpiler" -) - -func TestRenderInputs(t *testing.T) { - testcases := map[string]struct { - input transpiler.Node - expected transpiler.Node - varsArray []*transpiler.Vars - err bool - }{ - "inputs not list": { - input: transpiler.NewKey("inputs", transpiler.NewStrVal("not list")), - err: true, - varsArray: []*transpiler.Vars{ - mustMakeVars(map[string]interface{}{}), - }, - }, - "bad variable error": { - input: transpiler.NewKey("inputs", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("${var1.name|'missing ending quote}")), - }), - })), - err: true, - varsArray: []*transpiler.Vars{ - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - }, - }), - }, - }, - "basic single var": { - input: transpiler.NewKey("inputs", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("${var1.name}")), - }), - })), - expected: transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("value1")), - }), - }), - varsArray: []*transpiler.Vars{ - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - }, - }), - }, - }, - "duplicate result is removed": { - input: transpiler.NewKey("inputs", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("${var1.name}")), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("${var1.diff}")), - }), - })), - expected: transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("value1")), - }), - }), - varsArray: []*transpiler.Vars{ - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - "diff": "value1", - }, - }), - }, - }, - "missing var removes input": { - input: transpiler.NewKey("inputs", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("${var1.name}")), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("${var1.missing|var1.diff}")), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("${var1.removed}")), - }), - })), - expected: transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("value1")), - }), - }), - varsArray: []*transpiler.Vars{ - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - "diff": "value1", - }, - }), - }, - }, - "duplicate var result but unique input not removed": { - input: transpiler.NewKey("inputs", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("${var1.name}")), - transpiler.NewKey("unique", transpiler.NewStrVal("0")), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("${var1.diff}")), - transpiler.NewKey("unique", transpiler.NewStrVal("1")), - }), - })), - expected: transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("value1")), - transpiler.NewKey("unique", transpiler.NewStrVal("0")), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("value1")), - transpiler.NewKey("unique", transpiler.NewStrVal("1")), - }), - }), - varsArray: []*transpiler.Vars{ - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - "diff": "value1", - }, - }), - }, - }, - "duplicates across vars array handled": { - input: transpiler.NewKey("inputs", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("${var1.name}")), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("${var1.diff}")), - }), - })), - expected: transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("value1")), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("value2")), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("value3")), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("key", transpiler.NewStrVal("value4")), - }), - }), - varsArray: []*transpiler.Vars{ - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - "diff": "value1", - }, - }), - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - "diff": "value2", - }, - }), - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - "diff": "value3", - }, - }), - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - "diff": "value2", - }, - }), - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - "diff": "value4", - }, - }), - }, - }, - "nested in streams": { - input: transpiler.NewKey("inputs", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/${var1.name}.log"), - })), - }), - })), - }), - })), - expected: transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value1.log"), - })), - }), - })), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value2.log"), - })), - }), - })), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value3.log"), - })), - }), - })), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value4.log"), - })), - }), - })), - }), - }), - varsArray: []*transpiler.Vars{ - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - }, - }), - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value2", - }, - }), - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value2", - }, - }), - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value3", - }, - }), - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value4", - }, - }), - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "missing": "other", - }, - }), - }, - }, - "inputs with processors": { - input: transpiler.NewKey("inputs", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/${var1.name}.log"), - })), - }), - })), - transpiler.NewKey("processors", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("user", transpiler.NewStrVal("user1")), - })), - transpiler.NewKey("to", transpiler.NewStrVal("user")), - })), - }), - })), - }), - })), - expected: transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value1.log"), - })), - }), - })), - transpiler.NewKey("processors", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("user", transpiler.NewStrVal("user1")), - })), - transpiler.NewKey("to", transpiler.NewStrVal("user")), - })), - }), - })), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value2.log"), - })), - }), - })), - transpiler.NewKey("processors", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("user", transpiler.NewStrVal("user1")), - })), - transpiler.NewKey("to", transpiler.NewStrVal("user")), - })), - }), - })), - }), - }), - varsArray: []*transpiler.Vars{ - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - }, - }), - mustMakeVars(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value2", - }, - }), - }, - }, - "vars with processors": { - input: transpiler.NewKey("inputs", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/${var1.name}.log"), - })), - }), - })), - transpiler.NewKey("processors", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("user", transpiler.NewStrVal("user1")), - })), - transpiler.NewKey("to", transpiler.NewStrVal("user")), - })), - }), - })), - }), - })), - expected: transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value1.log"), - })), - }), - })), - transpiler.NewKey("processors", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("custom", transpiler.NewStrVal("value1")), - })), - transpiler.NewKey("to", transpiler.NewStrVal("dynamic")), - })), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("user", transpiler.NewStrVal("user1")), - })), - transpiler.NewKey("to", transpiler.NewStrVal("user")), - })), - }), - })), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value2.log"), - })), - }), - })), - transpiler.NewKey("processors", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("custom", transpiler.NewStrVal("value2")), - })), - transpiler.NewKey("to", transpiler.NewStrVal("dynamic")), - })), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("user", transpiler.NewStrVal("user1")), - })), - transpiler.NewKey("to", transpiler.NewStrVal("user")), - })), - }), - })), - }), - }), - varsArray: []*transpiler.Vars{ - mustMakeVarsP(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - }, - }, - "var1", - []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "fields": map[string]interface{}{ - "custom": "value1", - }, - "to": "dynamic", - }, - }, - }), - mustMakeVarsP(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value2", - }, - }, - "var1", - []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "fields": map[string]interface{}{ - "custom": "value2", - }, - "to": "dynamic", - }, - }, - }), - }, - }, - "inputs without processors and vars with processors": { - input: transpiler.NewKey("inputs", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/${var1.name}.log"), - })), - }), - })), - }), - })), - expected: transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value1.log"), - })), - }), - })), - transpiler.NewKey("processors", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("custom", transpiler.NewStrVal("value1")), - })), - transpiler.NewKey("to", transpiler.NewStrVal("dynamic")), - })), - }), - })), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value2.log"), - })), - }), - })), - transpiler.NewKey("processors", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("custom", transpiler.NewStrVal("value2")), - })), - transpiler.NewKey("to", transpiler.NewStrVal("dynamic")), - })), - }), - })), - }), - }), - varsArray: []*transpiler.Vars{ - mustMakeVarsP(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - }, - }, - "var1", - []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "fields": map[string]interface{}{ - "custom": "value1", - }, - "to": "dynamic", - }, - }, - }), - mustMakeVarsP(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value2", - }, - }, - "var1", - []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "fields": map[string]interface{}{ - "custom": "value2", - }, - "to": "dynamic", - }, - }, - }), - }, - }, - "processors incorrectly a map": { - input: transpiler.NewKey("inputs", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/${var1.name}.log"), - })), - }), - })), - transpiler.NewKey("processors", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("invalid", transpiler.NewStrVal("value")), - })), - })), - }), - })), - expected: transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value1.log"), - })), - }), - })), - transpiler.NewKey("processors", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("invalid", transpiler.NewStrVal("value")), - })), - })), - }), - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value2.log"), - })), - }), - })), - transpiler.NewKey("processors", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("invalid", transpiler.NewStrVal("value")), - })), - })), - }), - }), - varsArray: []*transpiler.Vars{ - mustMakeVarsP(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - }, - }, - "var1", - []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "fields": map[string]interface{}{ - "custom": "value1", - }, - "to": "dynamic", - }, - }, - }), - mustMakeVarsP(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value2", - }, - }, - "var1", - []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "fields": map[string]interface{}{ - "custom": "value2", - }, - "to": "dynamic", - }, - }, - }), - }, - }, - "same var result with different processors": { - input: transpiler.NewKey("inputs", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/${var1.name}.log"), - })), - }), - })), - }), - })), - expected: transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("logfile")), - transpiler.NewKey("streams", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("paths", transpiler.NewList([]transpiler.Node{ - transpiler.NewStrVal("/var/log/value1.log"), - })), - }), - })), - transpiler.NewKey("processors", transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("add_fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("fields", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("custom", transpiler.NewStrVal("value1")), - })), - transpiler.NewKey("to", transpiler.NewStrVal("dynamic")), - })), - }), - })), - }), - }), - varsArray: []*transpiler.Vars{ - mustMakeVarsP(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - }, - }, - "var1", - []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "fields": map[string]interface{}{ - "custom": "value1", - }, - "to": "dynamic", - }, - }, - }), - mustMakeVarsP(map[string]interface{}{ - "var1": map[string]interface{}{ - "name": "value1", - }, - }, - "var1", - []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "fields": map[string]interface{}{ - "custom": "value2", - }, - "to": "dynamic", - }, - }, - }), - }, - }, - } - - for name, test := range testcases { - t.Run(name, func(t *testing.T) { - v, err := renderInputs(test.input, test.varsArray) - if test.err { - require.Error(t, err) - } else { - require.NoError(t, err) - assert.Equal(t, test.expected.String(), v.String()) - } - }) - } -} - -func mustMakeVars(mapping map[string]interface{}) *transpiler.Vars { - v, err := transpiler.NewVars(mapping) - if err != nil { - panic(err) - } - return v -} - -func mustMakeVarsP(mapping map[string]interface{}, processorKey string, processors transpiler.Processors) *transpiler.Vars { - v, err := transpiler.NewVarsWithProcessors(mapping, processorKey, processors) - if err != nil { - panic(err) - } - return v -} diff --git a/x-pack/elastic-agent/pkg/agent/application/enroll_cmd.go b/x-pack/elastic-agent/pkg/agent/application/enroll_cmd.go index 65cce3193103..25564605a376 100644 --- a/x-pack/elastic-agent/pkg/agent/application/enroll_cmd.go +++ b/x-pack/elastic-agent/pkg/agent/application/enroll_cmd.go @@ -23,6 +23,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/control/client" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/control/proto" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" @@ -45,12 +46,12 @@ var ( daemonTimeout = 30 * time.Second // max amount of for communication to running Agent daemon ) -type store interface { +type saver interface { Save(io.Reader) error } type storeLoad interface { - store + saver Load() (io.ReadCloser, error) } @@ -72,7 +73,7 @@ type EnrollCmd struct { log *logger.Logger options *EnrollCmdOption client clienter - configStore store + configStore saver kibanaConfig *kibana.Config agentProc *process.Info } @@ -138,7 +139,7 @@ func NewEnrollCmd( store := storage.NewReplaceOnSuccessStore( configPath, DefaultAgentFleetConfig, - storage.NewDiskStore(info.AgentConfigFile()), + storage.NewDiskStore(paths.AgentConfigFile()), ) return NewEnrollCmdWithStore( @@ -154,7 +155,7 @@ func NewEnrollCmdWithStore( log *logger.Logger, options *EnrollCmdOption, configPath string, - store store, + store saver, ) (*EnrollCmd, error) { return &EnrollCmd{ log: log, @@ -414,13 +415,13 @@ func (c *EnrollCmd) enroll(ctx context.Context) error { // clear action store // fail only if file exists and there was a failure - if err := os.Remove(info.AgentActionStoreFile()); !os.IsNotExist(err) { + if err := os.Remove(paths.AgentActionStoreFile()); !os.IsNotExist(err) { return err } // clear action store // fail only if file exists and there was a failure - if err := os.Remove(info.AgentStateStoreFile()); !os.IsNotExist(err) { + if err := os.Remove(paths.AgentStateStoreFile()); !os.IsNotExist(err) { return err } diff --git a/x-pack/elastic-agent/pkg/agent/application/fleet_gateway.go b/x-pack/elastic-agent/pkg/agent/application/fleet_gateway.go index 225c50e65e68..ed396c70cef6 100644 --- a/x-pack/elastic-agent/pkg/agent/application/fleet_gateway.go +++ b/x-pack/elastic-agent/pkg/agent/application/fleet_gateway.go @@ -14,6 +14,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common/backoff" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage/store" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/status" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/fleetapi" @@ -43,6 +44,8 @@ type backoffSettings struct { Max time.Duration `config:"max"` } +type fleetAcker = store.FleetAcker + type dispatcher interface { Dispatch(acker fleetAcker, actions ...action) error } @@ -55,11 +58,6 @@ type fleetReporter interface { Events() ([]fleetapi.SerializableEvent, func()) } -type fleetAcker interface { - Ack(ctx context.Context, action fleetapi.Action) error - Commit(ctx context.Context) error -} - // FleetGateway is a gateway between the Agent and the Fleet API, it's take cares of all the // bidirectional communication requirements. The gateway aggregates events and will periodically // call the API to send the events and will receive actions to be executed locally. @@ -72,6 +70,14 @@ type FleetGateway interface { SetClient(clienter) } +type stateStore interface { + Add(fleetapi.Action) + AckToken() string + SetAckToken(ackToken string) + Save() error + Actions() []fleetapi.Action +} + type fleetGateway struct { bgContext context.Context log *logger.Logger @@ -88,7 +94,7 @@ type fleetGateway struct { unauthCounter int statusController status.Controller statusReporter status.Reporter - stateStore *stateStore + stateStore stateStore } func newFleetGateway( @@ -100,7 +106,7 @@ func newFleetGateway( r fleetReporter, acker fleetAcker, statusController status.Controller, - stateStore *stateStore, + stateStore stateStore, ) (FleetGateway, error) { scheduler := scheduler.NewPeriodicJitter(defaultGatewaySettings.Duration, defaultGatewaySettings.Jitter) @@ -130,7 +136,7 @@ func newFleetGatewayWithScheduler( r fleetReporter, acker fleetAcker, statusController status.Controller, - stateStore *stateStore, + stateStore stateStore, ) (FleetGateway, error) { // Backoff implementation doesn't support the using context as the shutdown mechanism. diff --git a/x-pack/elastic-agent/pkg/agent/application/fleet_gateway_test.go b/x-pack/elastic-agent/pkg/agent/application/fleet_gateway_test.go index 4af4836936eb..033811630697 100644 --- a/x-pack/elastic-agent/pkg/agent/application/fleet_gateway_test.go +++ b/x-pack/elastic-agent/pkg/agent/application/fleet_gateway_test.go @@ -20,11 +20,13 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/require" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage/store" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" repo "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter" fleetreporter "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter/fleet" + fleetreporterConfig "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter/fleet/config" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/scheduler" ) @@ -119,7 +121,7 @@ func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGat ctx, cancel := context.WithCancel(context.Background()) defer cancel() - stateStore, err := newStateStore(log, storage.NewDiskStore(info.AgentStateStoreFile())) + stateStore, err := store.NewStateStore(log, storage.NewDiskStore(paths.AgentStateStoreFile())) require.NoError(t, err) gateway, err := newFleetGatewayWithScheduler( @@ -248,7 +250,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst") - stateStore, err := newStateStore(log, storage.NewDiskStore(info.AgentStateStoreFile())) + stateStore, err := store.NewStateStore(log, storage.NewDiskStore(paths.AgentStateStoreFile())) require.NoError(t, err) gateway, err := newFleetGatewayWithScheduler( @@ -339,7 +341,7 @@ func TestFleetGateway(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) log, _ := logger.New("tst") - stateStore, err := newStateStore(log, storage.NewDiskStore(info.AgentStateStoreFile())) + stateStore, err := store.NewStateStore(log, storage.NewDiskStore(paths.AgentStateStoreFile())) require.NoError(t, err) gateway, err := newFleetGatewayWithScheduler( @@ -487,7 +489,7 @@ func TestRetriesOnFailures(t *testing.T) { } func getReporter(info agentInfo, log *logger.Logger, t *testing.T) *fleetreporter.Reporter { - fleetR, err := fleetreporter.NewReporter(info, log, fleetreporter.DefaultConfig()) + fleetR, err := fleetreporter.NewReporter(info, log, fleetreporterConfig.DefaultConfig()) if err != nil { t.Fatal(errors.Wrap(err, "fail to create reporters")) } diff --git a/x-pack/elastic-agent/pkg/agent/application/handler_action_unenroll.go b/x-pack/elastic-agent/pkg/agent/application/handler_action_unenroll.go index 83dca329342e..a0cec2753ee9 100644 --- a/x-pack/elastic-agent/pkg/agent/application/handler_action_unenroll.go +++ b/x-pack/elastic-agent/pkg/agent/application/handler_action_unenroll.go @@ -20,7 +20,7 @@ type handlerUnenroll struct { emitter emitterFunc dispatcher programsDispatcher closers []context.CancelFunc - stateStore *stateStore + stateStore stateStore } func (h *handlerUnenroll) Handle(ctx context.Context, a action, acker fleetAcker) error { diff --git a/x-pack/elastic-agent/pkg/agent/application/info/agent_id.go b/x-pack/elastic-agent/pkg/agent/application/info/agent_id.go index f189c43d01e5..6f067f2b3994 100644 --- a/x-pack/elastic-agent/pkg/agent/application/info/agent_id.go +++ b/x-pack/elastic-agent/pkg/agent/application/info/agent_id.go @@ -8,7 +8,6 @@ import ( "bytes" "fmt" "io" - "path/filepath" "github.com/gofrs/uuid" "gopkg.in/yaml.v2" @@ -20,14 +19,7 @@ import ( ) // defaultAgentConfigFile is a name of file used to store agent information -const defaultAgentCapabilitiesFile = "capabilities.yml" -const defaultAgentConfigFile = "fleet.yml" const agentInfoKey = "agent" - -// defaultAgentActionStoreFile is the file that will contains the action that can be replayed after restart. -const defaultAgentActionStoreFile = "action_store.yml" -const defaultAgentStateStoreFile = "state.yml" - const defaultLogLevel = "info" type persistentAgentInfo struct { @@ -40,26 +32,6 @@ type ioStore interface { Load() (io.ReadCloser, error) } -// AgentConfigFile is a name of file used to store agent information -func AgentConfigFile() string { - return filepath.Join(paths.Config(), defaultAgentConfigFile) -} - -// AgentCapabilitiesPath is a name of file used to store agent capabilities -func AgentCapabilitiesPath() string { - return filepath.Join(paths.Config(), defaultAgentCapabilitiesFile) -} - -// AgentActionStoreFile is the file that contains the action that can be replayed after restart. -func AgentActionStoreFile() string { - return filepath.Join(paths.Home(), defaultAgentActionStoreFile) -} - -// AgentStateStoreFile is the file that contains the persisted state of the agent including the action that can be replayed after restart. -func AgentStateStoreFile() string { - return filepath.Join(paths.Home(), defaultAgentStateStoreFile) -} - // updateLogLevel updates log level and persists it to disk. func updateLogLevel(level string) error { ai, err := loadAgentInfo(false, defaultLogLevel) @@ -72,7 +44,7 @@ func updateLogLevel(level string) error { return nil } - agentConfigFile := AgentConfigFile() + agentConfigFile := paths.AgentConfigFile() s := storage.NewDiskStore(agentConfigFile) ai.LogLevel = level @@ -89,7 +61,7 @@ func generateAgentID() (string, error) { } func loadAgentInfo(forceUpdate bool, logLevel string) (*persistentAgentInfo, error) { - agentConfigFile := AgentConfigFile() + agentConfigFile := paths.AgentConfigFile() s := storage.NewDiskStore(agentConfigFile) agentinfo, err := getInfoFromStore(s, logLevel) @@ -114,7 +86,7 @@ func loadAgentInfo(forceUpdate bool, logLevel string) (*persistentAgentInfo, err } func getInfoFromStore(s ioStore, logLevel string) (*persistentAgentInfo, error) { - agentConfigFile := AgentConfigFile() + agentConfigFile := paths.AgentConfigFile() reader, err := s.Load() if err != nil { return nil, err @@ -159,7 +131,7 @@ func getInfoFromStore(s ioStore, logLevel string) (*persistentAgentInfo, error) } func updateAgentInfo(s ioStore, agentInfo *persistentAgentInfo) error { - agentConfigFile := AgentConfigFile() + agentConfigFile := paths.AgentConfigFile() reader, err := s.Load() if err != nil { return err diff --git a/x-pack/elastic-agent/pkg/agent/application/info/agent_metadata.go b/x-pack/elastic-agent/pkg/agent/application/info/agent_metadata.go index af35372ac2e9..ccddf448149b 100644 --- a/x-pack/elastic-agent/pkg/agent/application/info/agent_metadata.go +++ b/x-pack/elastic-agent/pkg/agent/application/info/agent_metadata.go @@ -10,7 +10,6 @@ import ( "runtime" "strings" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/install" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" "github.com/elastic/go-sysinfo" "github.com/elastic/go-sysinfo/types" @@ -142,7 +141,7 @@ func (i *AgentInfo) ECSMetadata() (*ECSMeta, error) { BuildOriginal: release.Info().String(), // only upgradeable if running from Agent installer and running under the // control of the system supervisor (or built specifically with upgrading enabled) - Upgradeable: release.Upgradeable() || (install.RunningInstalled() && install.RunningUnderSupervisor()), + Upgradeable: release.Upgradeable() || (RunningInstalled() && RunningUnderSupervisor()), LogLevel: i.logLevel, }, }, diff --git a/x-pack/elastic-agent/pkg/agent/application/info/state.go b/x-pack/elastic-agent/pkg/agent/application/info/state.go new file mode 100644 index 000000000000..6df317aa61f5 --- /dev/null +++ b/x-pack/elastic-agent/pkg/agent/application/info/state.go @@ -0,0 +1,40 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package info + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" +) + +// RunningInstalled returns true when executing Agent is the installed Agent. +// +// This verifies the running executable path based on hard-coded paths +// for each platform type. +func RunningInstalled() bool { + expected := filepath.Join(paths.InstallPath, paths.BinaryName) + execPath, _ := os.Executable() + execPath, _ = filepath.Abs(execPath) + execName := filepath.Base(execPath) + execDir := filepath.Dir(execPath) + if IsInsideData(execDir) { + // executable path is being reported as being down inside of data path + // move up to directories to perform the comparison + execDir = filepath.Dir(filepath.Dir(execDir)) + execPath = filepath.Join(execDir, execName) + } + return paths.ArePathsEqual(expected, execPath) +} + +// IsInsideData returns true when the exePath is inside of the current Agents data path. +func IsInsideData(exePath string) bool { + expectedPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) + return strings.HasSuffix(exePath, expectedPath) +} diff --git a/x-pack/elastic-agent/pkg/agent/install/svc_unix.go b/x-pack/elastic-agent/pkg/agent/application/info/svc_unix.go similarity index 96% rename from x-pack/elastic-agent/pkg/agent/install/svc_unix.go rename to x-pack/elastic-agent/pkg/agent/application/info/svc_unix.go index c7acb998489f..181bda084ff5 100644 --- a/x-pack/elastic-agent/pkg/agent/install/svc_unix.go +++ b/x-pack/elastic-agent/pkg/agent/application/info/svc_unix.go @@ -4,7 +4,7 @@ // +build !windows -package install +package info import "os" diff --git a/x-pack/elastic-agent/pkg/agent/install/svc_windows.go b/x-pack/elastic-agent/pkg/agent/application/info/svc_windows.go similarity index 98% rename from x-pack/elastic-agent/pkg/agent/install/svc_windows.go rename to x-pack/elastic-agent/pkg/agent/application/info/svc_windows.go index a60aadb54944..67a2e112a818 100644 --- a/x-pack/elastic-agent/pkg/agent/install/svc_windows.go +++ b/x-pack/elastic-agent/pkg/agent/application/info/svc_windows.go @@ -4,7 +4,7 @@ // +build windows -package install +package info import ( "golang.org/x/sys/windows" diff --git a/x-pack/elastic-agent/pkg/agent/application/inspect_config_cmd.go b/x-pack/elastic-agent/pkg/agent/application/inspect_config_cmd.go index 5655aad4d3e2..bae9f82e7f60 100644 --- a/x-pack/elastic-agent/pkg/agent/application/inspect_config_cmd.go +++ b/x-pack/elastic-agent/pkg/agent/application/inspect_config_cmd.go @@ -9,12 +9,13 @@ import ( yaml "gopkg.in/yaml.v2" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage/store" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/capabilities" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config/operations" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/status" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/fleetapi" ) @@ -38,28 +39,12 @@ func (c *InspectConfigCmd) Execute() error { } func (c *InspectConfigCmd) inspectConfig() error { - rawConfig, err := loadConfig(c.cfgPath) + fullCfg, err := operations.LoadFullAgentConfig(c.cfgPath) if err != nil { return err } - cfg, err := configuration.NewFromConfig(rawConfig) - if err != nil { - return err - } - - if IsStandalone(cfg.Fleet) { - return printConfig(rawConfig) - } - - fleetConfig, err := loadFleetConfig(rawConfig) - if err != nil { - return err - } else if fleetConfig == nil { - return fmt.Errorf("no fleet config retrieved yet") - } - - return printMapStringConfig(fleetConfig) + return printConfig(fullCfg) } func loadConfig(configPath string) (*config.Config, error) { @@ -68,7 +53,7 @@ func loadConfig(configPath string) (*config.Config, error) { return nil, err } - path := info.AgentConfigFile() + path := paths.AgentConfigFile() store := storage.NewDiskStore(path) reader, err := store.Load() @@ -102,7 +87,7 @@ func loadFleetConfig(cfg *config.Config) (map[string]interface{}, error) { return nil, err } - stateStore, err := newStateStoreWithMigration(log, info.AgentActionStoreFile(), info.AgentStateStoreFile()) + stateStore, err := store.NewStateStoreWithMigration(log, paths.AgentActionStoreFile(), paths.AgentStateStoreFile()) if err != nil { return nil, err } @@ -124,7 +109,7 @@ func printMapStringConfig(mapStr map[string]interface{}) error { if err != nil { return err } - caps, err := capabilities.Load(info.AgentCapabilitiesPath(), l, status.NewController(l)) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), l, status.NewController(l)) if err != nil { return err } diff --git a/x-pack/elastic-agent/pkg/agent/application/inspect_output_cmd.go b/x-pack/elastic-agent/pkg/agent/application/inspect_output_cmd.go index 2ec9280dfa0d..6303c23c2eb5 100644 --- a/x-pack/elastic-agent/pkg/agent/application/inspect_output_cmd.go +++ b/x-pack/elastic-agent/pkg/agent/application/inspect_output_cmd.go @@ -13,6 +13,7 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/filters" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/program" @@ -71,7 +72,7 @@ func (c *InspectOutputCmd) inspectOutputs(agentInfo *info.AgentInfo) error { return err } - if IsStandalone(cfg.Fleet) { + if configuration.IsStandalone(cfg.Fleet) { return listOutputsFromConfig(l, agentInfo, rawConfig, true) } @@ -124,7 +125,7 @@ func (c *InspectOutputCmd) inspectOutput(agentInfo *info.AgentInfo) error { return err } - if IsStandalone(cfg.Fleet) { + if configuration.IsStandalone(cfg.Fleet) { return printOutputFromConfig(l, agentInfo, c.output, c.program, rawConfig, true) } @@ -209,7 +210,7 @@ func getProgramsFromConfig(log *logger.Logger, agentInfo *info.AgentInfo, cfg *c modifiers.Filters = append(modifiers.Filters, injectFleet(cfg, sysInfo.Info(), agentInfo)) } - caps, err := capabilities.Load(info.AgentCapabilitiesPath(), log, status.NewController(log)) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, status.NewController(log)) if err != nil { return nil, err } diff --git a/x-pack/elastic-agent/pkg/agent/application/local_mode.go b/x-pack/elastic-agent/pkg/agent/application/local_mode.go index 2c1b33f83bae..7a16951efe82 100644 --- a/x-pack/elastic-agent/pkg/agent/application/local_mode.go +++ b/x-pack/elastic-agent/pkg/agent/application/local_mode.go @@ -9,6 +9,7 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/filters" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/upgrade" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" @@ -68,7 +69,7 @@ func newLocal( uc upgraderControl, agentInfo *info.AgentInfo, ) (*Local, error) { - caps, err := capabilities.Load(info.AgentCapabilitiesPath(), log, statusCtrl) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, statusCtrl) if err != nil { return nil, err } diff --git a/x-pack/elastic-agent/pkg/agent/application/managed_mode.go b/x-pack/elastic-agent/pkg/agent/application/managed_mode.go index 242b556594c2..8a4cf41c1a78 100644 --- a/x-pack/elastic-agent/pkg/agent/application/managed_mode.go +++ b/x-pack/elastic-agent/pkg/agent/application/managed_mode.go @@ -15,11 +15,13 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/filters" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/upgrade" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/operation" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage/store" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/capabilities" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/composable" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" @@ -55,21 +57,21 @@ type Managed struct { gateway FleetGateway router *router srv *server.Server - stateStore *stateStore + stateStore stateStore upgrader *upgrade.Upgrader } func newManaged( ctx context.Context, log *logger.Logger, - store storage.Store, + storeSaver storage.Store, cfg *configuration.Configuration, rawConfig *config.Config, reexec reexecManager, statusCtrl status.Controller, agentInfo *info.AgentInfo, ) (*Managed, error) { - caps, err := capabilities.Load(info.AgentCapabilitiesPath(), log, statusCtrl) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, statusCtrl) if err != nil { return nil, err } @@ -153,12 +155,12 @@ func newManaged( batchedAcker := newLazyAcker(acker, log) // Create the state store that will persist the last good policy change on disk. - stateStore, err := newStateStoreWithMigration(log, info.AgentActionStoreFile(), info.AgentStateStoreFile()) + stateStore, err := store.NewStateStoreWithMigration(log, paths.AgentActionStoreFile(), paths.AgentStateStoreFile()) if err != nil { - return nil, errors.New(err, fmt.Sprintf("fail to read action store '%s'", info.AgentActionStoreFile())) + return nil, errors.New(err, fmt.Sprintf("fail to read action store '%s'", paths.AgentActionStoreFile())) } managedApplication.stateStore = stateStore - actionAcker := newStateStoreActionAcker(batchedAcker, stateStore) + actionAcker := store.NewStateStoreActionAcker(batchedAcker, stateStore) actionDispatcher, err := newActionDispatcher(managedApplication.bgContext, log, &handlerDefault{log: log}) if err != nil { @@ -180,7 +182,7 @@ func newManaged( emitter: emit, agentInfo: agentInfo, config: cfg, - store: store, + store: storeSaver, } if cfg.Fleet.Server == nil { // setters only set when not running a local Fleet Server @@ -237,7 +239,7 @@ func newManaged( // TODO(ph) We will need an improvement on fleet, if there is an error while dispatching a // persisted action on disk we should be able to ask Fleet to get the latest configuration. // But at the moment this is not possible because the policy change was acked. - if err := replayActions(log, actionDispatcher, actionAcker, actions...); err != nil { + if err := store.ReplayActions(log, actionDispatcher, actionAcker, actions...); err != nil { log.Errorf("could not recover state, error %+v, skipping...", err) } } diff --git a/x-pack/elastic-agent/pkg/agent/application/paths/common.go b/x-pack/elastic-agent/pkg/agent/application/paths/common.go new file mode 100644 index 000000000000..fca3dbd88288 --- /dev/null +++ b/x-pack/elastic-agent/pkg/agent/application/paths/common.go @@ -0,0 +1,108 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package paths + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" +) + +const ( + tempSubdir = "tmp" +) + +var ( + topPath string + configPath string + logsPath string + tmpCreator sync.Once +) + +func init() { + topPath = initialTop() + configPath = topPath + logsPath = topPath + + fs := flag.CommandLine + fs.StringVar(&topPath, "path.home", topPath, "Agent root path") + fs.StringVar(&configPath, "path.config", configPath, "Config path is the directory Agent looks for its config file") + fs.StringVar(&logsPath, "path.logs", logsPath, "Logs path contains Agent log output") +} + +// Top returns the top directory for Elastic Agent, all the versioned +// home directories live under this top-level/data/elastic-agent-${hash} +func Top() string { + return topPath +} + +// TempDir returns agent temp dir located within data dir. +func TempDir() string { + tmpDir := filepath.Join(Data(), tempSubdir) + tmpCreator.Do(func() { + // create tempdir as it probably don't exists + os.MkdirAll(tmpDir, 0750) + }) + return tmpDir +} + +// Home returns a directory where binary lives +func Home() string { + return versionedHome(topPath) +} + +// Config returns a directory where configuration file lives +func Config() string { + return configPath +} + +// Data returns the data directory for Agent +func Data() string { + return filepath.Join(Top(), "data") +} + +// Logs returns a the log directory for Agent +func Logs() string { + return logsPath +} + +// initialTop returns the initial top-level path for the binary +// +// When nested in top-level/data/elastic-agent-${hash}/ the result is top-level/. +func initialTop() string { + exePath := retrieveExecutablePath() + if insideData(exePath) { + return filepath.Dir(filepath.Dir(exePath)) + } + return exePath +} + +// retrieveExecutablePath returns the executing binary, even if the started binary was a symlink +func retrieveExecutablePath() string { + execPath, err := os.Executable() + if err != nil { + panic(err) + } + evalPath, err := filepath.EvalSymlinks(execPath) + if err != nil { + panic(err) + } + return filepath.Dir(evalPath) +} + +// insideData returns true when the exePath is inside of the current Agents data path. +func insideData(exePath string) bool { + expectedPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) + return strings.HasSuffix(exePath, expectedPath) +} + +func versionedHome(base string) string { + return filepath.Join(base, "data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) +} diff --git a/x-pack/elastic-agent/pkg/agent/application/paths/files.go b/x-pack/elastic-agent/pkg/agent/application/paths/files.go new file mode 100644 index 000000000000..baae67d5621b --- /dev/null +++ b/x-pack/elastic-agent/pkg/agent/application/paths/files.go @@ -0,0 +1,37 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package paths + +import ( + "path/filepath" +) + +// defaultAgentConfigFile is a name of file used to store agent information +const defaultAgentCapabilitiesFile = "capabilities.yml" +const defaultAgentConfigFile = "fleet.yml" + +// defaultAgentActionStoreFile is the file that will contains the action that can be replayed after restart. +const defaultAgentActionStoreFile = "action_store.yml" +const defaultAgentStateStoreFile = "state.yml" + +// AgentConfigFile is a name of file used to store agent information +func AgentConfigFile() string { + return filepath.Join(Config(), defaultAgentConfigFile) +} + +// AgentCapabilitiesPath is a name of file used to store agent capabilities +func AgentCapabilitiesPath() string { + return filepath.Join(Config(), defaultAgentCapabilitiesFile) +} + +// AgentActionStoreFile is the file that contains the action that can be replayed after restart. +func AgentActionStoreFile() string { + return filepath.Join(Home(), defaultAgentActionStoreFile) +} + +// AgentStateStoreFile is the file that contains the persisted state of the agent including the action that can be replayed after restart. +func AgentStateStoreFile() string { + return filepath.Join(Home(), defaultAgentStateStoreFile) +} diff --git a/x-pack/elastic-agent/pkg/agent/application/paths/paths.go b/x-pack/elastic-agent/pkg/agent/application/paths/paths.go index fca3dbd88288..0ee4f459c00f 100644 --- a/x-pack/elastic-agent/pkg/agent/application/paths/paths.go +++ b/x-pack/elastic-agent/pkg/agent/application/paths/paths.go @@ -2,107 +2,34 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package paths - -import ( - "flag" - "fmt" - "os" - "path/filepath" - "strings" - "sync" +// +build !darwin +// +build !windows - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" -) +package paths const ( - tempSubdir = "tmp" -) - -var ( - topPath string - configPath string - logsPath string - tmpCreator sync.Once -) - -func init() { - topPath = initialTop() - configPath = topPath - logsPath = topPath - - fs := flag.CommandLine - fs.StringVar(&topPath, "path.home", topPath, "Agent root path") - fs.StringVar(&configPath, "path.config", configPath, "Config path is the directory Agent looks for its config file") - fs.StringVar(&logsPath, "path.logs", logsPath, "Logs path contains Agent log output") -} - -// Top returns the top directory for Elastic Agent, all the versioned -// home directories live under this top-level/data/elastic-agent-${hash} -func Top() string { - return topPath -} + // BinaryName is the name of the installed binary. + BinaryName = "elastic-agent" -// TempDir returns agent temp dir located within data dir. -func TempDir() string { - tmpDir := filepath.Join(Data(), tempSubdir) - tmpCreator.Do(func() { - // create tempdir as it probably don't exists - os.MkdirAll(tmpDir, 0750) - }) - return tmpDir -} - -// Home returns a directory where binary lives -func Home() string { - return versionedHome(topPath) -} - -// Config returns a directory where configuration file lives -func Config() string { - return configPath -} - -// Data returns the data directory for Agent -func Data() string { - return filepath.Join(Top(), "data") -} + // InstallPath is the installation path using for install command. + InstallPath = "/opt/Elastic/Agent" -// Logs returns a the log directory for Agent -func Logs() string { - return logsPath -} + // SocketPath is the socket path used when installed. + SocketPath = "unix:///run/elastic-agent.sock" -// initialTop returns the initial top-level path for the binary -// -// When nested in top-level/data/elastic-agent-${hash}/ the result is top-level/. -func initialTop() string { - exePath := retrieveExecutablePath() - if insideData(exePath) { - return filepath.Dir(filepath.Dir(exePath)) - } - return exePath -} + // ServiceName is the service name when installed. + ServiceName = "elastic-agent" -// retrieveExecutablePath returns the executing binary, even if the started binary was a symlink -func retrieveExecutablePath() string { - execPath, err := os.Executable() - if err != nil { - panic(err) - } - evalPath, err := filepath.EvalSymlinks(execPath) - if err != nil { - panic(err) - } - return filepath.Dir(evalPath) -} + // ShellWrapperPath is the path to the installed shell wrapper. + ShellWrapperPath = "/usr/bin/elastic-agent" -// insideData returns true when the exePath is inside of the current Agents data path. -func insideData(exePath string) bool { - expectedPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) - return strings.HasSuffix(exePath, expectedPath) -} + // ShellWrapper is the wrapper that is installed. + ShellWrapper = `#!/bin/sh +exec /opt/Elastic/Agent/elastic-agent $@ +` +) -func versionedHome(base string) string { - return filepath.Join(base, "data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) +// ArePathsEqual determines whether paths are equal taking case sensitivity of os into account. +func ArePathsEqual(expected, actual string) bool { + return expected == actual } diff --git a/x-pack/elastic-agent/pkg/agent/install/paths_darwin.go b/x-pack/elastic-agent/pkg/agent/application/paths/paths_darwin.go similarity index 98% rename from x-pack/elastic-agent/pkg/agent/install/paths_darwin.go rename to x-pack/elastic-agent/pkg/agent/application/paths/paths_darwin.go index 3205116951cd..9a186a25e812 100644 --- a/x-pack/elastic-agent/pkg/agent/install/paths_darwin.go +++ b/x-pack/elastic-agent/pkg/agent/application/paths/paths_darwin.go @@ -4,7 +4,7 @@ // +build darwin -package install +package paths const ( // BinaryName is the name of the installed binary. diff --git a/x-pack/elastic-agent/pkg/agent/install/paths_test.go b/x-pack/elastic-agent/pkg/agent/application/paths/paths_test.go similarity index 98% rename from x-pack/elastic-agent/pkg/agent/install/paths_test.go rename to x-pack/elastic-agent/pkg/agent/application/paths/paths_test.go index 68bfcc6b69bf..2fd7584eb13c 100644 --- a/x-pack/elastic-agent/pkg/agent/install/paths_test.go +++ b/x-pack/elastic-agent/pkg/agent/application/paths/paths_test.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package install +package paths import ( "runtime" diff --git a/x-pack/elastic-agent/pkg/agent/install/paths_windows.go b/x-pack/elastic-agent/pkg/agent/application/paths/paths_windows.go similarity index 98% rename from x-pack/elastic-agent/pkg/agent/install/paths_windows.go rename to x-pack/elastic-agent/pkg/agent/application/paths/paths_windows.go index d068fc0c40ad..86020c270441 100644 --- a/x-pack/elastic-agent/pkg/agent/install/paths_windows.go +++ b/x-pack/elastic-agent/pkg/agent/application/paths/paths_windows.go @@ -4,7 +4,7 @@ // +build windows -package install +package paths import "strings" diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/service.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/service.go index 097fabb68556..901a7884a8eb 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/service.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/service.go @@ -24,7 +24,6 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/install" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" ) @@ -90,7 +89,7 @@ func (p *upstartPidProvider) PID(ctx context.Context) (int, error) { // find line pidLine := strings.TrimSpace(string(out)) if pidLine == "" { - return 0, errors.New(fmt.Sprintf("service process not found for service '%v'", install.ServiceName)) + return 0, errors.New(fmt.Sprintf("service process not found for service '%v'", paths.ServiceName)) } re := regexp.MustCompile(agentName + ` start/running, process ([0-9]+)`) @@ -127,17 +126,17 @@ func (p *sysvPidProvider) PID(ctx context.Context) (int, error) { // find line statusLine := strings.TrimSpace(string(out)) if statusLine == "" { - return 0, errors.New(fmt.Sprintf("service process not found for service '%v'", install.ServiceName)) + return 0, errors.New(fmt.Sprintf("service process not found for service '%v'", paths.ServiceName)) } // sysv does not report pid, let's do best effort if !strings.HasPrefix(statusLine, "Running") { - return 0, errors.New(fmt.Sprintf("'%v' is not running", install.ServiceName)) + return 0, errors.New(fmt.Sprintf("'%v' is not running", paths.ServiceName)) } - pidofLine, err := exec.Command("pidof", filepath.Join(install.InstallPath, install.BinaryName)).Output() + pidofLine, err := exec.Command("pidof", filepath.Join(paths.InstallPath, paths.BinaryName)).Output() if err != nil { - return 0, errors.New(fmt.Sprintf("PID not found for'%v': %v", install.ServiceName, err)) + return 0, errors.New(fmt.Sprintf("PID not found for'%v': %v", paths.ServiceName, err)) } pid, err := strconv.Atoi(strings.TrimSpace(string(pidofLine))) @@ -171,7 +170,7 @@ func (p *dbusPidProvider) Close() { } func (p *dbusPidProvider) PID(ctx context.Context) (int, error) { - sn := install.ServiceName + sn := paths.ServiceName if !strings.HasSuffix(sn, ".service") { sn += ".service" } diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/service_darwin.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/service_darwin.go index 1d8d597be129..17bb28bd176b 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/service_darwin.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/service_darwin.go @@ -24,7 +24,6 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/install" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" ) @@ -50,13 +49,13 @@ func (p *darwinPidProvider) Close() {} func (p *darwinPidProvider) PID(ctx context.Context) (int, error) { piders := []func(context.Context) (int, error){ - p.piderFromCmd(ctx, "launchctl", "list", install.ServiceName), + p.piderFromCmd(ctx, "launchctl", "list", paths.ServiceName), } // if release is specifically built to be upgradeable (using DEV flag) // we dont require to run as a service and will need sudo fallback if release.Upgradeable() { - piders = append(piders, p.piderFromCmd(ctx, "sudo", "launchctl", "list", install.ServiceName)) + piders = append(piders, p.piderFromCmd(ctx, "sudo", "launchctl", "list", paths.ServiceName)) } var pidErrors error @@ -96,7 +95,7 @@ func (p *darwinPidProvider) piderFromCmd(ctx context.Context, name string, args } if pidLine == "" { - return 0, errors.New(fmt.Sprintf("service process not found for service '%v'", install.ServiceName)) + return 0, errors.New(fmt.Sprintf("service process not found for service '%v'", paths.ServiceName)) } re := regexp.MustCompile(`"PID" = ([0-9]+);`) diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/service_windows.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/service_windows.go index 555ea098f46a..68c69979c788 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/service_windows.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/service_windows.go @@ -16,7 +16,6 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/install" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" ) @@ -49,7 +48,7 @@ func (p *pidProvider) Close() {} func (p *pidProvider) Name() string { return "Windows Service Manager" } func (p *pidProvider) PID(ctx context.Context) (int, error) { - svc, err := p.winManager.OpenService(install.ServiceName) + svc, err := p.winManager.OpenService(paths.ServiceName) if err != nil { return 0, errors.New("failed to read windows service", err) } diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go index e23764b90564..424cac6c9ffd 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go @@ -15,7 +15,6 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/install" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/program" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/capabilities" @@ -79,7 +78,7 @@ type stateReporter interface { func IsUpgradeable() bool { // only upgradeable if running from Agent installer and running under the // control of the system supervisor (or built specifically with upgrading enabled) - return release.Upgradeable() || (install.RunningInstalled() && install.RunningUnderSupervisor()) + return release.Upgradeable() || (info.RunningInstalled() && info.RunningUnderSupervisor()) } // NewUpgrader creates an upgrader which is capable of performing upgrade operation @@ -256,7 +255,7 @@ func rollbackInstall(ctx context.Context, hash string) { } func copyActionStore(newHash string) error { - storePaths := []string{info.AgentActionStoreFile(), info.AgentStateStoreFile()} + storePaths := []string{paths.AgentActionStoreFile(), paths.AgentStateStoreFile()} for _, currentActionStorePath := range storePaths { newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) diff --git a/x-pack/elastic-agent/pkg/agent/cmd/container.go b/x-pack/elastic-agent/pkg/agent/cmd/container.go index b35442df504a..89a6239d59b3 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/container.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/container.go @@ -19,7 +19,7 @@ import ( "github.com/elastic/beats/v7/libbeat/kibana" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/cli" ) @@ -115,7 +115,7 @@ func containerCmd(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags return err } - _, err = os.Stat(info.AgentConfigFile()) + _, err = os.Stat(paths.AgentConfigFile()) if !os.IsNotExist(err) && !envBool("FLEET_FORCE") { // already enrolled, just run the standard run return run(flags, streams) diff --git a/x-pack/elastic-agent/pkg/agent/cmd/install.go b/x-pack/elastic-agent/pkg/agent/cmd/install.go index 979966cbf13b..bcd2e8c223fe 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/install.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/install.go @@ -54,7 +54,7 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags, status, reason := install.Status() force, _ := cmd.Flags().GetBool("force") if status == install.Installed && !force { - return fmt.Errorf("already installed at: %s", install.InstallPath) + return fmt.Errorf("already installed at: %s", paths.InstallPath) } // check the lock to ensure that elastic-agent is not already running in this directory @@ -72,7 +72,7 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags, if status == install.Broken { if !force { fmt.Fprintf(streams.Out, "Elastic Agent is installed but currently broken: %s\n", reason) - confirm, err := c.Confirm(fmt.Sprintf("Continuing will re-install Elastic Agent over the current installation at %s. Do you want to continue?", install.InstallPath), true) + confirm, err := c.Confirm(fmt.Sprintf("Continuing will re-install Elastic Agent over the current installation at %s. Do you want to continue?", paths.InstallPath), true) if err != nil { return fmt.Errorf("problem reading prompt response") } @@ -82,7 +82,7 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags, } } else { if !force { - confirm, err := c.Confirm(fmt.Sprintf("Elastic Agent will be installed at %s and will run as a service. Do you want to continue?", install.InstallPath), true) + confirm, err := c.Confirm(fmt.Sprintf("Elastic Agent will be installed at %s and will run as a service. Do you want to continue?", paths.InstallPath), true) if err != nil { return fmt.Errorf("problem reading prompt response") } @@ -142,15 +142,15 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags, } } } - - err = install.Install() + cfgFile := flags.Config() + err = install.Install(cfgFile) if err != nil { return err } defer func() { if err != nil { - install.Uninstall() + install.Uninstall(cfgFile) } }() @@ -179,7 +179,7 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags, } err = enrollCmd.Wait() if err != nil { - install.Uninstall() + install.Uninstall(cfgFile) exitErr, ok := err.(*exec.ExitError) if ok { return fmt.Errorf("enroll command failed with exit code: %d", exitErr.ExitCode()) diff --git a/x-pack/elastic-agent/pkg/agent/cmd/run.go b/x-pack/elastic-agent/pkg/agent/cmd/run.go index 8d4157fdadd9..897e0f1a1649 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/run.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/run.go @@ -206,7 +206,7 @@ func reexecPath() (string, error) { } func getOverwrites(rawConfig *config.Config) error { - path := info.AgentConfigFile() + path := paths.AgentConfigFile() store := storage.NewDiskStore(path) reader, err := store.Load() @@ -239,7 +239,7 @@ func getOverwrites(rawConfig *config.Config) error { } func defaultLogLevel(cfg *configuration.Configuration) string { - if application.IsStandalone(cfg.Fleet) { + if configuration.IsStandalone(cfg.Fleet) { // for standalone always take the one from config and don't override return "" } diff --git a/x-pack/elastic-agent/pkg/agent/cmd/uninstall.go b/x-pack/elastic-agent/pkg/agent/cmd/uninstall.go index fbc97ea94bd0..f21f157a2d36 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/uninstall.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/uninstall.go @@ -11,6 +11,8 @@ import ( "github.com/spf13/cobra" c "github.com/elastic/beats/v7/libbeat/common/cli" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/install" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/cli" ) @@ -48,7 +50,7 @@ func uninstallCmd(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags if status == install.NotInstalled { return fmt.Errorf("not installed") } - if status == install.Installed && !install.RunningInstalled() { + if status == install.Installed && !info.RunningInstalled() { return fmt.Errorf("can only be uninstall by executing the installed Elastic Agent at: %s", install.ExecutablePath()) } @@ -56,7 +58,7 @@ func uninstallCmd(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags if status == install.Broken { if !force { fmt.Fprintf(streams.Out, "Elastic Agent is installed but currently broken: %s\n", reason) - confirm, err := c.Confirm(fmt.Sprintf("Continuing will uninstall the broken Elastic Agent at %s. Do you want to continue?", install.InstallPath), true) + confirm, err := c.Confirm(fmt.Sprintf("Continuing will uninstall the broken Elastic Agent at %s. Do you want to continue?", paths.InstallPath), true) if err != nil { return fmt.Errorf("problem reading prompt response") } @@ -66,7 +68,7 @@ func uninstallCmd(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags } } else { if !force { - confirm, err := c.Confirm(fmt.Sprintf("Elastic Agent will be uninstalled from your system at %s. Do you want to continue?", install.InstallPath), true) + confirm, err := c.Confirm(fmt.Sprintf("Elastic Agent will be uninstalled from your system at %s. Do you want to continue?", paths.InstallPath), true) if err != nil { return fmt.Errorf("problem reading prompt response") } @@ -76,12 +78,12 @@ func uninstallCmd(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags } } - err = install.Uninstall() + err = install.Uninstall(flags.Config()) if err != nil { return err } fmt.Fprintf(streams.Out, "Elastic Agent has been uninstalled.\n") - install.RemovePath(install.InstallPath) + install.RemovePath(paths.InstallPath) return nil } diff --git a/x-pack/elastic-agent/pkg/agent/configuration/fleet.go b/x-pack/elastic-agent/pkg/agent/configuration/fleet.go index af60651a3626..60a6b7ced22e 100644 --- a/x-pack/elastic-agent/pkg/agent/configuration/fleet.go +++ b/x-pack/elastic-agent/pkg/agent/configuration/fleet.go @@ -7,18 +7,18 @@ package configuration import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/kibana" - fleetreporter "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter/fleet" + fleetreporterConfig "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter/fleet/config" ) // FleetAgentConfig is the internal configuration of the agent after the enrollment is done, // this configuration is not exposed in anyway in the elastic-agent.yml and is only internal configuration. type FleetAgentConfig struct { - Enabled bool `config:"enabled" yaml:"enabled"` - AccessAPIKey string `config:"access_api_key" yaml:"access_api_key"` - Kibana *kibana.Config `config:"kibana" yaml:"kibana"` - Reporting *fleetreporter.Config `config:"reporting" yaml:"reporting"` - Info *AgentInfo `config:"agent" yaml:"agent"` - Server *FleetServerConfig `config:"server" yaml:"server,omitempty"` + Enabled bool `config:"enabled" yaml:"enabled"` + AccessAPIKey string `config:"access_api_key" yaml:"access_api_key"` + Kibana *kibana.Config `config:"kibana" yaml:"kibana"` + Reporting *fleetreporterConfig.Config `config:"reporting" yaml:"reporting"` + Info *AgentInfo `config:"agent" yaml:"agent"` + Server *FleetServerConfig `config:"server" yaml:"server,omitempty"` } // Valid validates the required fields for accessing the API. @@ -46,7 +46,7 @@ func DefaultFleetAgentConfig() *FleetAgentConfig { return &FleetAgentConfig{ Enabled: false, Kibana: kibana.DefaultClientConfig(), - Reporting: fleetreporter.DefaultConfig(), + Reporting: fleetreporterConfig.DefaultConfig(), Info: &AgentInfo{}, } } diff --git a/x-pack/elastic-agent/pkg/agent/configuration/info.go b/x-pack/elastic-agent/pkg/agent/configuration/info.go new file mode 100644 index 000000000000..37eebe096b32 --- /dev/null +++ b/x-pack/elastic-agent/pkg/agent/configuration/info.go @@ -0,0 +1,15 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package configuration + +// IsStandalone decides based on missing of fleet.enabled: true or fleet.{access_token,kibana} will place Elastic Agent into standalone mode. +func IsStandalone(cfg *FleetAgentConfig) bool { + return cfg == nil || !cfg.Enabled +} + +// IsFleetServerBootstrap decides if Elastic Agent is started in bootstrap mode. +func IsFleetServerBootstrap(cfg *FleetAgentConfig) bool { + return cfg != nil && cfg.Server != nil && cfg.Server.Bootstrap +} diff --git a/x-pack/elastic-agent/pkg/agent/control/addr.go b/x-pack/elastic-agent/pkg/agent/control/addr.go index 31005e8e34d8..116f9d8dd95d 100644 --- a/x-pack/elastic-agent/pkg/agent/control/addr.go +++ b/x-pack/elastic-agent/pkg/agent/control/addr.go @@ -10,15 +10,15 @@ import ( "crypto/sha256" "fmt" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/install" ) // Address returns the address to connect to Elastic Agent daemon. func Address() string { // when installed the control address is fixed - if install.RunningInstalled() { - return install.SocketPath + if info.RunningInstalled() { + return paths.SocketPath } // not install, adjust the path based on data path diff --git a/x-pack/elastic-agent/pkg/agent/control/addr_windows.go b/x-pack/elastic-agent/pkg/agent/control/addr_windows.go index cbfcdf2c99ea..49c6e75407b8 100644 --- a/x-pack/elastic-agent/pkg/agent/control/addr_windows.go +++ b/x-pack/elastic-agent/pkg/agent/control/addr_windows.go @@ -10,15 +10,15 @@ import ( "crypto/sha256" "fmt" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/install" ) // Address returns the address to connect to Elastic Agent daemon. func Address() string { // when installed the control address is fixed - if install.RunningInstalled() { - return install.SocketPath + if info.RunningInstalled() { + return paths.SocketPath } // not install, adjust the path based on data path diff --git a/x-pack/elastic-agent/pkg/agent/install/install.go b/x-pack/elastic-agent/pkg/agent/install/install.go index 3e7df33ccd7f..20491680b9c1 100644 --- a/x-pack/elastic-agent/pkg/agent/install/install.go +++ b/x-pack/elastic-agent/pkg/agent/install/install.go @@ -9,36 +9,36 @@ import ( "io/ioutil" "os" "path/filepath" - "strings" "github.com/otiai10/copy" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" ) // Install installs Elastic Agent persistently on the system including creating and starting its service. -func Install() error { +func Install(cfgFile string) error { dir, err := findDirectory() if err != nil { return errors.New(err, "failed to discover the source directory for installation", errors.TypeFilesystem) } // uninstall current installation - err = Uninstall() + err = Uninstall(cfgFile) if err != nil { return err } // ensure parent directory exists, copy source into install path - err = os.MkdirAll(filepath.Dir(InstallPath), 0755) + err = os.MkdirAll(filepath.Dir(paths.InstallPath), 0755) if err != nil { return errors.New( err, - fmt.Sprintf("failed to create installation parent directory (%s)", filepath.Dir(InstallPath)), - errors.M("directory", filepath.Dir(InstallPath))) + fmt.Sprintf("failed to create installation parent directory (%s)", filepath.Dir(paths.InstallPath)), + errors.M("directory", filepath.Dir(paths.InstallPath))) } - err = copy.Copy(dir, InstallPath, copy.Options{ + err = copy.Copy(dir, paths.InstallPath, copy.Options{ OnSymlink: func(_ string) copy.SymlinkAction { return copy.Shallow }, @@ -47,21 +47,21 @@ func Install() error { if err != nil { return errors.New( err, - fmt.Sprintf("failed to copy source directory (%s) to destination (%s)", dir, InstallPath), - errors.M("source", dir), errors.M("destination", InstallPath)) + fmt.Sprintf("failed to copy source directory (%s) to destination (%s)", dir, paths.InstallPath), + errors.M("source", dir), errors.M("destination", paths.InstallPath)) } // place shell wrapper, if present on platform - if ShellWrapperPath != "" { - err = os.MkdirAll(filepath.Dir(ShellWrapperPath), 0755) + if paths.ShellWrapperPath != "" { + err = os.MkdirAll(filepath.Dir(paths.ShellWrapperPath), 0755) if err == nil { - err = ioutil.WriteFile(ShellWrapperPath, []byte(ShellWrapper), 0755) + err = ioutil.WriteFile(paths.ShellWrapperPath, []byte(paths.ShellWrapper), 0755) } if err != nil { return errors.New( err, - fmt.Sprintf("failed to write shell wrapper (%s)", ShellWrapperPath), - errors.M("destination", ShellWrapperPath)) + fmt.Sprintf("failed to write shell wrapper (%s)", paths.ShellWrapperPath), + errors.M("destination", paths.ShellWrapperPath)) } } @@ -80,8 +80,8 @@ func Install() error { if err != nil { return errors.New( err, - fmt.Sprintf("failed to install service (%s)", ServiceName), - errors.M("service", ServiceName)) + fmt.Sprintf("failed to install service (%s)", paths.ServiceName), + errors.M("service", paths.ServiceName)) } return nil } @@ -98,8 +98,8 @@ func StartService() error { if err != nil { return errors.New( err, - fmt.Sprintf("failed to start service (%s)", ServiceName), - errors.M("service", ServiceName)) + fmt.Sprintf("failed to start service (%s)", paths.ServiceName), + errors.M("service", paths.ServiceName)) } return nil } @@ -114,8 +114,8 @@ func StopService() error { if err != nil { return errors.New( err, - fmt.Sprintf("failed to stop service (%s)", ServiceName), - errors.M("service", ServiceName)) + fmt.Sprintf("failed to stop service (%s)", paths.ServiceName), + errors.M("service", paths.ServiceName)) } return nil } @@ -133,7 +133,7 @@ func findDirectory() (string, error) { return "", err } sourceDir := filepath.Dir(execPath) - if insideData(sourceDir) { + if info.IsInsideData(sourceDir) { // executable path is being reported as being down inside of data path // move up to directories to perform the copy sourceDir = filepath.Dir(filepath.Dir(sourceDir)) @@ -147,15 +147,9 @@ func findDirectory() (string, error) { // verifyDirectory ensures that the directory includes the executable. func verifyDirectory(dir string) error { - _, err := os.Stat(filepath.Join(dir, BinaryName)) + _, err := os.Stat(filepath.Join(dir, paths.BinaryName)) if os.IsNotExist(err) { - return fmt.Errorf("missing %s", BinaryName) + return fmt.Errorf("missing %s", paths.BinaryName) } return nil } - -// insideData returns true when the exePath is inside of the current Agents data path. -func insideData(exePath string) bool { - expectedPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) - return strings.HasSuffix(exePath, expectedPath) -} diff --git a/x-pack/elastic-agent/pkg/agent/install/install_windows.go b/x-pack/elastic-agent/pkg/agent/install/install_windows.go index ec10467ce79a..247e774b36b9 100644 --- a/x-pack/elastic-agent/pkg/agent/install/install_windows.go +++ b/x-pack/elastic-agent/pkg/agent/install/install_windows.go @@ -11,13 +11,14 @@ import ( "os" "path/filepath" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" ) // postInstall performs post installation for Windows systems. func postInstall() error { // delete the top-level elastic-agent.exe - binary := filepath.Join(InstallPath, BinaryName) + binary := filepath.Join(paths.InstallPath, paths.BinaryName) err := os.Remove(binary) if err != nil { // do not handle does not exist, it should have existed @@ -25,7 +26,7 @@ func postInstall() error { } // create top-level symlink to nested binary - realBinary := filepath.Join(InstallPath, "data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit()), BinaryName) + realBinary := filepath.Join(paths.InstallPath, "data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit()), paths.BinaryName) err = os.Symlink(realBinary, binary) if err != nil { return err diff --git a/x-pack/elastic-agent/pkg/agent/install/installed.go b/x-pack/elastic-agent/pkg/agent/install/installed.go index cc39bc15eac8..18964e0831bc 100644 --- a/x-pack/elastic-agent/pkg/agent/install/installed.go +++ b/x-pack/elastic-agent/pkg/agent/install/installed.go @@ -9,6 +9,8 @@ import ( "path/filepath" "github.com/kardianos/service" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" ) // StatusType is the return status types. @@ -25,7 +27,7 @@ const ( // Status returns the installation status of Agent. func Status() (StatusType, string) { - expected := filepath.Join(InstallPath, BinaryName) + expected := filepath.Join(paths.InstallPath, paths.BinaryName) status, reason := checkService() _, err := os.Stat(expected) if os.IsNotExist(err) { @@ -42,25 +44,6 @@ func Status() (StatusType, string) { return Installed, "" } -// RunningInstalled returns true when executing Agent is the installed Agent. -// -// This verifies the running executable path based on hard-coded paths -// for each platform type. -func RunningInstalled() bool { - expected := filepath.Join(InstallPath, BinaryName) - execPath, _ := os.Executable() - execPath, _ = filepath.Abs(execPath) - execName := filepath.Base(execPath) - execDir := filepath.Dir(execPath) - if insideData(execDir) { - // executable path is being reported as being down inside of data path - // move up to directories to perform the comparison - execDir = filepath.Dir(filepath.Dir(execDir)) - execPath = filepath.Join(execDir, execName) - } - return ArePathsEqual(expected, execPath) -} - // checkService only checks the status of the service. func checkService() (StatusType, string) { svc, err := newService() diff --git a/x-pack/elastic-agent/pkg/agent/install/paths.go b/x-pack/elastic-agent/pkg/agent/install/paths.go deleted file mode 100644 index b6dc11fb4900..000000000000 --- a/x-pack/elastic-agent/pkg/agent/install/paths.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -// +build !darwin -// +build !windows - -package install - -const ( - // BinaryName is the name of the installed binary. - BinaryName = "elastic-agent" - - // InstallPath is the installation path using for install command. - InstallPath = "/opt/Elastic/Agent" - - // SocketPath is the socket path used when installed. - SocketPath = "unix:///run/elastic-agent.sock" - - // ServiceName is the service name when installed. - ServiceName = "elastic-agent" - - // ShellWrapperPath is the path to the installed shell wrapper. - ShellWrapperPath = "/usr/bin/elastic-agent" - - // ShellWrapper is the wrapper that is installed. - ShellWrapper = `#!/bin/sh -exec /opt/Elastic/Agent/elastic-agent $@ -` -) - -// ArePathsEqual determines whether paths are equal taking case sensitivity of os into account. -func ArePathsEqual(expected, actual string) bool { - return expected == actual -} diff --git a/x-pack/elastic-agent/pkg/agent/install/svc.go b/x-pack/elastic-agent/pkg/agent/install/svc.go index 18cbc6d840f5..9276922da411 100644 --- a/x-pack/elastic-agent/pkg/agent/install/svc.go +++ b/x-pack/elastic-agent/pkg/agent/install/svc.go @@ -8,6 +8,8 @@ import ( "path/filepath" "github.com/kardianos/service" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" ) const ( @@ -20,19 +22,19 @@ const ( // ExecutablePath returns the path for the installed Agents executable. func ExecutablePath() string { - exec := filepath.Join(InstallPath, BinaryName) - if ShellWrapperPath != "" { - exec = ShellWrapperPath + exec := filepath.Join(paths.InstallPath, paths.BinaryName) + if paths.ShellWrapperPath != "" { + exec = paths.ShellWrapperPath } return exec } func newService() (service.Service, error) { return service.New(nil, &service.Config{ - Name: ServiceName, + Name: paths.ServiceName, DisplayName: ServiceDisplayName, Description: ServiceDescription, Executable: ExecutablePath(), - WorkingDirectory: InstallPath, + WorkingDirectory: paths.InstallPath, }) } diff --git a/x-pack/elastic-agent/pkg/agent/install/uninstall.go b/x-pack/elastic-agent/pkg/agent/install/uninstall.go index 001ba7430242..4601585416e2 100644 --- a/x-pack/elastic-agent/pkg/agent/install/uninstall.go +++ b/x-pack/elastic-agent/pkg/agent/install/uninstall.go @@ -5,20 +5,37 @@ package install import ( + "context" "fmt" "os" "os/exec" "path/filepath" "runtime" "strings" + "sync" "github.com/kardianos/service" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/program" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/transpiler" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact/uninstall" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/capabilities" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/composable" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config/operations" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/app" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/status" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" ) // Uninstall uninstalls persistently Elastic Agent on the system. -func Uninstall() error { +func Uninstall(cfgFile string) error { // uninstall the current service svc, err := newService() if err != nil { @@ -30,26 +47,30 @@ func Uninstall() error { if err != nil { return errors.New( err, - fmt.Sprintf("failed to stop service (%s)", ServiceName), - errors.M("service", ServiceName)) + fmt.Sprintf("failed to stop service (%s)", paths.ServiceName), + errors.M("service", paths.ServiceName)) } status = service.StatusStopped } _ = svc.Uninstall() + if err := uninstallPrograms(context.Background(), cfgFile); err != nil { + return err + } + // remove, if present on platform - if ShellWrapperPath != "" { - err = os.Remove(ShellWrapperPath) + if paths.ShellWrapperPath != "" { + err = os.Remove(paths.ShellWrapperPath) if !os.IsNotExist(err) && err != nil { return errors.New( err, - fmt.Sprintf("failed to remove shell wrapper (%s)", ShellWrapperPath), - errors.M("destination", ShellWrapperPath)) + fmt.Sprintf("failed to remove shell wrapper (%s)", paths.ShellWrapperPath), + errors.M("destination", paths.ShellWrapperPath)) } } // remove existing directory - err = os.RemoveAll(InstallPath) + err = os.RemoveAll(paths.InstallPath) if err != nil { if runtime.GOOS == "windows" { // possible to fail on Windows, because elastic-agent.exe is running from @@ -58,8 +79,8 @@ func Uninstall() error { } return errors.New( err, - fmt.Sprintf("failed to remove installation directory (%s)", InstallPath), - errors.M("directory", InstallPath)) + fmt.Sprintf("failed to remove installation directory (%s)", paths.InstallPath), + errors.M("directory", paths.InstallPath)) } return nil @@ -96,3 +117,134 @@ func delayedRemoval(path string) { _ = rmdir.Start() } + +func uninstallPrograms(ctx context.Context, cfgFile string) error { + log, err := logger.NewWithLogpLevel("", logp.ErrorLevel) + if err != nil { + return err + } + + cfg, err := operations.LoadFullAgentConfig(cfgFile) + if err != nil { + return err + } + + cfg, err = applyDynamics(ctx, log, cfg) + if err != nil { + return err + } + + pp, err := programsFromConfig(cfg) + if err != nil { + return err + } + + uninstaller, err := uninstall.NewUninstaller() + if err != nil { + return err + } + + currentVersion := release.Version() + if release.Snapshot() { + currentVersion = fmt.Sprintf("%s-SNAPSHOT", currentVersion) + } + artifactConfig := artifact.DefaultConfig() + + for _, p := range pp { + descriptor := app.NewDescriptor(p.Spec, currentVersion, artifactConfig, nil) + if err := uninstaller.Uninstall(ctx, p.Spec, currentVersion, descriptor.Directory()); err != nil { + fmt.Printf("failed to uninstall '%s': %v\n", p.Spec.Name, err) + } + } + + return nil +} + +func programsFromConfig(cfg *config.Config) ([]program.Program, error) { + mm, err := cfg.ToMapStr() + if err != nil { + return nil, errors.New("failed to create a map from config", err) + } + ast, err := transpiler.NewAST(mm) + if err != nil { + return nil, errors.New("failed to create a ast from config", err) + } + + agentInfo, err := info.NewAgentInfo() + if err != nil { + return nil, errors.New("failed to get an agent info", err) + } + + ppMap, err := program.Programs(agentInfo, ast) + + var pp []program.Program + check := make(map[string]bool) + + for _, v := range ppMap { + for _, p := range v { + if _, found := check[p.Spec.Cmd]; found { + continue + } + + pp = append(pp, p) + check[p.Spec.Cmd] = true + } + } + + return pp, nil +} + +func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) (*config.Config, error) { + cfgMap, err := cfg.ToMapStr() + ast, err := transpiler.NewAST(cfgMap) + if err != nil { + return nil, err + } + + // apply dynamic inputs + inputs, ok := transpiler.Lookup(ast, "inputs") + if ok { + varsArray := make([]*transpiler.Vars, 0, 0) + var wg sync.WaitGroup + wg.Add(1) + varsCallback := func(vv []*transpiler.Vars) { + varsArray = vv + wg.Done() + } + + ctrl, err := composable.New(log, cfg) + if err != nil { + return nil, err + } + ctrl.Run(ctx, varsCallback) + wg.Wait() + + renderedInputs, err := transpiler.RenderInputs(inputs, varsArray) + if err != nil { + return nil, err + } + err = transpiler.Insert(ast, renderedInputs, "inputs") + if err != nil { + return nil, err + } + } + + // apply caps + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, status.NewController(log)) + if err != nil { + return nil, err + } + + astIface, err := caps.Apply(ast) + if err != nil { + return nil, err + } + + newAst, ok := astIface.(*transpiler.AST) + if ok { + ast = newAst + } + + finalConfig, err := newAst.Map() + return config.NewConfigFrom(finalConfig) +} diff --git a/x-pack/elastic-agent/pkg/agent/application/action_store.go b/x-pack/elastic-agent/pkg/agent/storage/store/action_store.go similarity index 89% rename from x-pack/elastic-agent/pkg/agent/application/action_store.go rename to x-pack/elastic-agent/pkg/agent/storage/store/action_store.go index 646ab828ef5e..a0dbbf61b799 100644 --- a/x-pack/elastic-agent/pkg/agent/application/action_store.go +++ b/x-pack/elastic-agent/pkg/agent/storage/store/action_store.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package application +package store import ( "fmt" @@ -14,24 +14,25 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/fleetapi" ) -// actionStore receives multiples actions to persist to disk, the implementation of the store only +// ActionStore receives multiples actions to persist to disk, the implementation of the store only // take care of action policy change every other action are discarded. The store will only keep the // last good action on disk, we assume that the action is added to the store after it was ACK with // Fleet. The store is not threadsafe. // ATTN!!!: THE actionStore is deprecated, please use and extend the stateStore instead. The actionStore will be eventually removed. -type actionStore struct { +type ActionStore struct { log *logger.Logger store storeLoad dirty bool action action } -func newActionStore(log *logger.Logger, store storeLoad) (*actionStore, error) { +// NewActionStore creates a new action store. +func NewActionStore(log *logger.Logger, store storeLoad) (*ActionStore, error) { // If the store exists we will read it, if any errors is returned we assume we do not have anything // persisted and we return an empty store. reader, err := store.Load() if err != nil { - return &actionStore{log: log, store: store}, nil + return &ActionStore{log: log, store: store}, nil } defer reader.Close() @@ -40,7 +41,7 @@ func newActionStore(log *logger.Logger, store storeLoad) (*actionStore, error) { dec := yaml.NewDecoder(reader) err = dec.Decode(&action) if err == io.EOF { - return &actionStore{ + return &ActionStore{ log: log, store: store, }, nil @@ -51,7 +52,7 @@ func newActionStore(log *logger.Logger, store storeLoad) (*actionStore, error) { apc := fleetapi.ActionPolicyChange(action) - return &actionStore{ + return &ActionStore{ log: log, store: store, action: &apc, @@ -60,7 +61,7 @@ func newActionStore(log *logger.Logger, store storeLoad) (*actionStore, error) { // Add is only taking care of ActionPolicyChange for now and will only keep the last one it receive, // any other type of action will be silently ignored. -func (s *actionStore) Add(a action) { +func (s *ActionStore) Add(a action) { switch v := a.(type) { case *fleetapi.ActionPolicyChange, *fleetapi.ActionUnenroll: // Only persist the action if the action is different. @@ -72,7 +73,8 @@ func (s *actionStore) Add(a action) { } } -func (s *actionStore) Save() error { +// Save saves actions to backing store. +func (s *ActionStore) Save() error { defer func() { s.dirty = false }() if !s.dirty { return nil @@ -112,7 +114,7 @@ func (s *actionStore) Save() error { // Actions returns a slice of action to execute in order, currently only a action policy change is // persisted. -func (s *actionStore) Actions() []action { +func (s *ActionStore) Actions() []action { if s.action == nil { return []action{} } diff --git a/x-pack/elastic-agent/pkg/agent/application/action_store_test.go b/x-pack/elastic-agent/pkg/agent/storage/store/action_store_test.go similarity index 92% rename from x-pack/elastic-agent/pkg/agent/application/action_store_test.go rename to x-pack/elastic-agent/pkg/agent/storage/store/action_store_test.go index cc5aa47ebca6..6f5fbe4046a1 100644 --- a/x-pack/elastic-agent/pkg/agent/application/action_store_test.go +++ b/x-pack/elastic-agent/pkg/agent/storage/store/action_store_test.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package application +package store import ( "io/ioutil" @@ -32,7 +32,7 @@ func TestActionStore(t *testing.T) { t.Run("action returns empty when no action is saved on disk", withFile(func(t *testing.T, file string) { s := storage.NewDiskStore(file) - store, err := newActionStore(log, s) + store, err := NewActionStore(log, s) require.NoError(t, err) require.Equal(t, 0, len(store.Actions())) })) @@ -44,7 +44,7 @@ func TestActionStore(t *testing.T) { } s := storage.NewDiskStore(file) - store, err := newActionStore(log, s) + store, err := NewActionStore(log, s) require.NoError(t, err) require.Equal(t, 0, len(store.Actions())) @@ -65,7 +65,7 @@ func TestActionStore(t *testing.T) { } s := storage.NewDiskStore(file) - store, err := newActionStore(log, s) + store, err := NewActionStore(log, s) require.NoError(t, err) require.Equal(t, 0, len(store.Actions())) @@ -75,7 +75,7 @@ func TestActionStore(t *testing.T) { require.Equal(t, 1, len(store.Actions())) s = storage.NewDiskStore(file) - store1, err := newActionStore(log, s) + store1, err := NewActionStore(log, s) require.NoError(t, err) actions := store1.Actions() diff --git a/x-pack/elastic-agent/pkg/agent/application/state_store.go b/x-pack/elastic-agent/pkg/agent/storage/store/state_store.go similarity index 74% rename from x-pack/elastic-agent/pkg/agent/application/state_store.go rename to x-pack/elastic-agent/pkg/agent/storage/store/state_store.go index 283ab8e480dc..902e5f9f746c 100644 --- a/x-pack/elastic-agent/pkg/agent/application/state_store.go +++ b/x-pack/elastic-agent/pkg/agent/storage/store/state_store.go @@ -2,9 +2,10 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package application +package store import ( + "bytes" "context" "fmt" "io" @@ -12,19 +13,41 @@ import ( yaml "gopkg.in/yaml.v2" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/fleetapi" ) -// stateStore is a combined agent state storage initially derived from the former actionStore +type dispatcher interface { + Dispatch(acker FleetAcker, actions ...action) error +} + +type store interface { + Save(io.Reader) error +} + +// FleetAcker is an acker of actions to fleet. +type FleetAcker interface { + Ack(ctx context.Context, action fleetapi.Action) error + Commit(ctx context.Context) error +} + +type storeLoad interface { + store + Load() (io.ReadCloser, error) +} + +type action = fleetapi.Action + +// StateStore is a combined agent state storage initially derived from the former actionStore // and modified to allow persistence of additional agent specific state information. // The following is the original actionStore implementation description: // receives multiples actions to persist to disk, the implementation of the store only // take care of action policy change every other action are discarded. The store will only keep the // last good action on disk, we assume that the action is added to the store after it was ACK with // Fleet. The store is not threadsafe. -type stateStore struct { +type StateStore struct { log *logger.Logger store storeLoad dirty bool @@ -51,6 +74,73 @@ type stateSerializer struct { AckToken string `yaml:"ack_token,omitempty"` } +// NewStateStoreWithMigration creates a new state store and migrates the old one. +func NewStateStoreWithMigration(log *logger.Logger, actionStorePath, stateStorePath string) (*StateStore, error) { + err := migrateStateStore(log, actionStorePath, stateStorePath) + if err != nil { + return nil, err + } + + return NewStateStore(log, storage.NewDiskStore(stateStorePath)) +} + +// NewStateStoreActionAcker creates a new state store backed action acker. +func NewStateStoreActionAcker(acker FleetAcker, store *StateStore) *StateStoreActionAcker { + return &StateStoreActionAcker{acker: acker, store: store} +} + +// NewStateStore creates a new state store. +func NewStateStore(log *logger.Logger, store storeLoad) (*StateStore, error) { + // If the store exists we will read it, if any errors is returned we assume we do not have anything + // persisted and we return an empty store. + reader, err := store.Load() + if err != nil { + return &StateStore{log: log, store: store}, nil + } + defer reader.Close() + + var sr stateSerializer + + dec := yaml.NewDecoder(reader) + err = dec.Decode(&sr) + if err == io.EOF { + return &StateStore{ + log: log, + store: store, + }, nil + } + + if err != nil { + return nil, err + } + + state := stateT{ + ackToken: sr.AckToken, + } + + if sr.Action != nil { + if sr.Action.IsDetected != nil { + state.action = &fleetapi.ActionUnenroll{ + ActionID: sr.Action.ID, + ActionType: sr.Action.Type, + IsDetected: *sr.Action.IsDetected, + } + } else { + state.action = &fleetapi.ActionPolicyChange{ + ActionID: sr.Action.ID, + ActionType: sr.Action.Type, + Policy: sr.Action.Policy, + } + } + } + + return &StateStore{ + log: log, + store: store, + state: state, + }, nil +} + func migrateStateStore(log *logger.Logger, actionStorePath, stateStorePath string) (err error) { log = log.Named("state_migration") actionDiskStore := storage.NewDiskStore(actionStorePath) @@ -91,7 +181,7 @@ func migrateStateStore(log *logger.Logger, actionStorePath, stateStorePath strin return nil } - actionStore, err := newActionStore(log, actionDiskStore) + actionStore, err := NewActionStore(log, actionDiskStore) if err != nil { log.Errorf("failed to create action store %s: %v", actionStorePath, err) return err @@ -103,7 +193,7 @@ func migrateStateStore(log *logger.Logger, actionStorePath, stateStorePath strin return nil } - stateStore, err := newStateStore(log, stateDiskStore) + stateStore, err := NewStateStore(log, stateDiskStore) if err != nil { return err } @@ -118,69 +208,9 @@ func migrateStateStore(log *logger.Logger, actionStorePath, stateStorePath strin return err } -func newStateStoreWithMigration(log *logger.Logger, actionStorePath, stateStorePath string) (*stateStore, error) { - err := migrateStateStore(log, actionStorePath, stateStorePath) - if err != nil { - return nil, err - } - - return newStateStore(log, storage.NewDiskStore(stateStorePath)) -} - -func newStateStore(log *logger.Logger, store storeLoad) (*stateStore, error) { - // If the store exists we will read it, if any errors is returned we assume we do not have anything - // persisted and we return an empty store. - reader, err := store.Load() - if err != nil { - return &stateStore{log: log, store: store}, nil - } - defer reader.Close() - - var sr stateSerializer - - dec := yaml.NewDecoder(reader) - err = dec.Decode(&sr) - if err == io.EOF { - return &stateStore{ - log: log, - store: store, - }, nil - } - - if err != nil { - return nil, err - } - - state := stateT{ - ackToken: sr.AckToken, - } - - if sr.Action != nil { - if sr.Action.IsDetected != nil { - state.action = &fleetapi.ActionUnenroll{ - ActionID: sr.Action.ID, - ActionType: sr.Action.Type, - IsDetected: *sr.Action.IsDetected, - } - } else { - state.action = &fleetapi.ActionPolicyChange{ - ActionID: sr.Action.ID, - ActionType: sr.Action.Type, - Policy: sr.Action.Policy, - } - } - } - - return &stateStore{ - log: log, - store: store, - state: state, - }, nil -} - // Add is only taking care of ActionPolicyChange for now and will only keep the last one it receive, // any other type of action will be silently ignored. -func (s *stateStore) Add(a action) { +func (s *StateStore) Add(a action) { s.mx.Lock() defer s.mx.Unlock() @@ -196,7 +226,7 @@ func (s *stateStore) Add(a action) { } // SetAckToken set ack token to the agent state -func (s *stateStore) SetAckToken(ackToken string) { +func (s *StateStore) SetAckToken(ackToken string) { s.mx.Lock() defer s.mx.Unlock() @@ -207,7 +237,8 @@ func (s *stateStore) SetAckToken(ackToken string) { s.state.ackToken = ackToken } -func (s *stateStore) Save() error { +// Save saves the actions into a state store. +func (s *StateStore) Save() error { s.mx.Lock() defer s.mx.Unlock() @@ -245,7 +276,7 @@ func (s *stateStore) Save() error { // Actions returns a slice of action to execute in order, currently only a action policy change is // persisted. -func (s *stateStore) Actions() []action { +func (s *StateStore) Actions() []action { s.mx.RLock() defer s.mx.RUnlock() @@ -257,21 +288,23 @@ func (s *stateStore) Actions() []action { } // AckToken return the agent state persisted ack_token -func (s *stateStore) AckToken() string { +func (s *StateStore) AckToken() string { s.mx.RLock() defer s.mx.RUnlock() return s.state.ackToken } -// actionStoreAcker wraps an existing acker and will send any acked event to the action store, +// StateStoreActionAcker wraps an existing acker and will send any acked event to the action store, // its up to the action store to decide if we need to persist the event for future replay or just // discard the event. -type stateStoreActionAcker struct { - acker fleetAcker - store *stateStore +type StateStoreActionAcker struct { + acker FleetAcker + store *StateStore } -func (a *stateStoreActionAcker) Ack(ctx context.Context, action fleetapi.Action) error { +// Ack acks action using underlying acker. +// After action is acked it is stored to backing store. +func (a *StateStoreActionAcker) Ack(ctx context.Context, action fleetapi.Action) error { if err := a.acker.Ack(ctx, action); err != nil { return err } @@ -279,18 +312,16 @@ func (a *stateStoreActionAcker) Ack(ctx context.Context, action fleetapi.Action) return a.store.Save() } -func (a *stateStoreActionAcker) Commit(ctx context.Context) error { +// Commit commits acks. +func (a *StateStoreActionAcker) Commit(ctx context.Context) error { return a.acker.Commit(ctx) } -func newStateStoreActionAcker(acker fleetAcker, store *stateStore) *stateStoreActionAcker { - return &stateStoreActionAcker{acker: acker, store: store} -} - -func replayActions( +// ReplayActions replays list of actions. +func ReplayActions( log *logger.Logger, dispatcher dispatcher, - acker fleetAcker, + acker FleetAcker, actions ...action, ) error { log.Info("restoring current policy from disk") @@ -301,3 +332,11 @@ func replayActions( return nil } + +func yamlToReader(in interface{}) (io.Reader, error) { + data, err := yaml.Marshal(in) + if err != nil { + return nil, errors.New(err, "could not marshal to YAML") + } + return bytes.NewReader(data), nil +} diff --git a/x-pack/elastic-agent/pkg/agent/application/state_store_test.go b/x-pack/elastic-agent/pkg/agent/storage/store/state_store_test.go similarity index 82% rename from x-pack/elastic-agent/pkg/agent/application/state_store_test.go rename to x-pack/elastic-agent/pkg/agent/storage/store/state_store_test.go index 1c6a7bfd7319..1b575599dd3b 100644 --- a/x-pack/elastic-agent/pkg/agent/application/state_store_test.go +++ b/x-pack/elastic-agent/pkg/agent/storage/store/state_store_test.go @@ -2,13 +2,14 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package application +package store import ( "context" "io/ioutil" "os" "path/filepath" + "sync" "testing" "github.com/google/go-cmp/cmp" @@ -44,7 +45,7 @@ func runTestStateStore(t *testing.T, ackToken string) { t.Run("action returns empty when no action is saved on disk", withFile(func(t *testing.T, file string) { s := storage.NewDiskStore(file) - store, err := newStateStore(log, s) + store, err := NewStateStore(log, s) require.NoError(t, err) require.Equal(t, 0, len(store.Actions())) })) @@ -56,7 +57,7 @@ func runTestStateStore(t *testing.T, ackToken string) { } s := storage.NewDiskStore(file) - store, err := newStateStore(log, s) + store, err := NewStateStore(log, s) require.NoError(t, err) require.Equal(t, 0, len(store.Actions())) @@ -79,7 +80,7 @@ func runTestStateStore(t *testing.T, ackToken string) { } s := storage.NewDiskStore(file) - store, err := newStateStore(log, s) + store, err := NewStateStore(log, s) require.NoError(t, err) require.Equal(t, 0, len(store.Actions())) @@ -91,7 +92,7 @@ func runTestStateStore(t *testing.T, ackToken string) { require.Equal(t, ackToken, store.AckToken()) s = storage.NewDiskStore(file) - store1, err := newStateStore(log, s) + store1, err := NewStateStore(log, s) require.NoError(t, err) actions := store1.Actions() @@ -109,7 +110,7 @@ func runTestStateStore(t *testing.T, ackToken string) { } s := storage.NewDiskStore(file) - store, err := newStateStore(log, s) + store, err := NewStateStore(log, s) require.NoError(t, err) require.Equal(t, 0, len(store.Actions())) @@ -121,7 +122,7 @@ func runTestStateStore(t *testing.T, ackToken string) { require.Equal(t, ackToken, store.AckToken()) s = storage.NewDiskStore(file) - store1, err := newStateStore(log, s) + store1, err := NewStateStore(log, s) require.NoError(t, err) actions := store1.Actions() @@ -138,11 +139,11 @@ func runTestStateStore(t *testing.T, ackToken string) { } s := storage.NewDiskStore(file) - store, err := newStateStore(log, s) + store, err := NewStateStore(log, s) require.NoError(t, err) store.SetAckToken(ackToken) - acker := newStateStoreActionAcker(&testAcker{}, store) + acker := NewStateStoreActionAcker(&testAcker{}, store) require.Equal(t, 0, len(store.Actions())) require.NoError(t, acker.Ack(context.Background(), ActionPolicyChange)) @@ -155,7 +156,7 @@ func runTestStateStore(t *testing.T, ackToken string) { withFile(func(t *testing.T, stateStorePath string) { err := migrateStateStore(log, actionStorePath, stateStorePath) require.NoError(t, err) - stateStore, err := newStateStore(log, storage.NewDiskStore(stateStorePath)) + stateStore, err := NewStateStore(log, storage.NewDiskStore(stateStorePath)) require.NoError(t, err) stateStore.SetAckToken(ackToken) require.Equal(t, 0, len(stateStore.Actions())) @@ -173,7 +174,7 @@ func runTestStateStore(t *testing.T, ackToken string) { }, } - actionStore, err := newActionStore(log, storage.NewDiskStore(actionStorePath)) + actionStore, err := NewActionStore(log, storage.NewDiskStore(actionStorePath)) require.NoError(t, err) require.Equal(t, 0, len(actionStore.Actions())) @@ -186,7 +187,7 @@ func runTestStateStore(t *testing.T, ackToken string) { err = migrateStateStore(log, actionStorePath, stateStorePath) require.NoError(t, err) - stateStore, err := newStateStore(log, storage.NewDiskStore(stateStorePath)) + stateStore, err := NewStateStore(log, storage.NewDiskStore(stateStorePath)) require.NoError(t, err) stateStore.SetAckToken(ackToken) diff := cmp.Diff(actionStore.Actions(), stateStore.Actions()) @@ -198,3 +199,37 @@ func runTestStateStore(t *testing.T, ackToken string) { })) } + +type testAcker struct { + acked []string + ackedLock sync.Mutex +} + +func (t *testAcker) Ack(_ context.Context, action fleetapi.Action) error { + t.ackedLock.Lock() + defer t.ackedLock.Unlock() + + if t.acked == nil { + t.acked = make([]string, 0) + } + + t.acked = append(t.acked, action.ID()) + return nil +} + +func (t *testAcker) Commit(_ context.Context) error { + return nil +} + +func (t *testAcker) Clear() { + t.ackedLock.Lock() + defer t.ackedLock.Unlock() + + t.acked = make([]string, 0) +} + +func (t *testAcker) Items() []string { + t.ackedLock.Lock() + defer t.ackedLock.Unlock() + return t.acked +} diff --git a/x-pack/elastic-agent/pkg/agent/transpiler/utils..go b/x-pack/elastic-agent/pkg/agent/transpiler/utils..go new file mode 100644 index 000000000000..0777ad6affd7 --- /dev/null +++ b/x-pack/elastic-agent/pkg/agent/transpiler/utils..go @@ -0,0 +1,95 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package transpiler + +import "fmt" + +// RenderInputs renders dynamic inputs section +func RenderInputs(inputs Node, varsArray []*Vars) (Node, error) { + l, ok := inputs.Value().(*List) + if !ok { + return nil, fmt.Errorf("inputs must be an array") + } + nodes := []*Dict{} + nodesMap := map[string]*Dict{} + for _, vars := range varsArray { + for _, node := range l.Value().([]Node) { + dict, ok := node.Clone().(*Dict) + if !ok { + continue + } + n, err := dict.Apply(vars) + if err == ErrNoMatch { + // has a variable that didn't exist, so we ignore it + continue + } + if err != nil { + // another error that needs to be reported + return nil, err + } + if n == nil { + // condition removed it + continue + } + dict = n.(*Dict) + hash := string(dict.Hash()) + _, exists := nodesMap[hash] + if !exists { + nodesMap[hash] = dict + nodes = append(nodes, dict) + } + } + } + nInputs := []Node{} + for _, node := range nodes { + nInputs = append(nInputs, promoteProcessors(node)) + } + return NewList(nInputs), nil +} + +func promoteProcessors(dict *Dict) *Dict { + p := dict.Processors() + if p == nil { + return dict + } + var currentList *List + current, ok := dict.Find("processors") + if ok { + currentList, ok = current.Value().(*List) + if !ok { + return dict + } + } + ast, _ := NewAST(map[string]interface{}{ + "processors": p, + }) + procs, _ := Lookup(ast, "processors") + nodes := nodesFromList(procs.Value().(*List)) + if ok && currentList != nil { + nodes = append(nodes, nodesFromList(currentList)...) + } + dictNodes := dict.Value().([]Node) + set := false + for i, node := range dictNodes { + switch n := node.(type) { + case *Key: + if n.Name() == "processors" { + dictNodes[i] = NewKey("processors", NewList(nodes)) + set = true + } + } + if set { + break + } + } + if !set { + dictNodes = append(dictNodes, NewKey("processors", NewList(nodes))) + } + return NewDict(dictNodes) +} + +func nodesFromList(list *List) []Node { + return list.Value().([]Node) +} diff --git a/x-pack/elastic-agent/pkg/agent/transpiler/utils_test.go b/x-pack/elastic-agent/pkg/agent/transpiler/utils_test.go new file mode 100644 index 000000000000..f94c87f64997 --- /dev/null +++ b/x-pack/elastic-agent/pkg/agent/transpiler/utils_test.go @@ -0,0 +1,745 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package transpiler + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRenderInputs(t *testing.T) { + testcases := map[string]struct { + input Node + expected Node + varsArray []*Vars + err bool + }{ + "inputs not list": { + input: NewKey("inputs", NewStrVal("not list")), + err: true, + varsArray: []*Vars{ + mustMakeVars(map[string]interface{}{}), + }, + }, + "bad variable error": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("key", NewStrVal("${var1.name|'missing ending quote}")), + }), + })), + err: true, + varsArray: []*Vars{ + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + }, + }), + }, + }, + "basic single var": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("key", NewStrVal("${var1.name}")), + }), + })), + expected: NewList([]Node{ + NewDict([]Node{ + NewKey("key", NewStrVal("value1")), + }), + }), + varsArray: []*Vars{ + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + }, + }), + }, + }, + "duplicate result is removed": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("key", NewStrVal("${var1.name}")), + }), + NewDict([]Node{ + NewKey("key", NewStrVal("${var1.diff}")), + }), + })), + expected: NewList([]Node{ + NewDict([]Node{ + NewKey("key", NewStrVal("value1")), + }), + }), + varsArray: []*Vars{ + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + "diff": "value1", + }, + }), + }, + }, + "missing var removes input": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("key", NewStrVal("${var1.name}")), + }), + NewDict([]Node{ + NewKey("key", NewStrVal("${var1.missing|var1.diff}")), + }), + NewDict([]Node{ + NewKey("key", NewStrVal("${var1.removed}")), + }), + })), + expected: NewList([]Node{ + NewDict([]Node{ + NewKey("key", NewStrVal("value1")), + }), + }), + varsArray: []*Vars{ + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + "diff": "value1", + }, + }), + }, + }, + "duplicate var result but unique input not removed": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("key", NewStrVal("${var1.name}")), + NewKey("unique", NewStrVal("0")), + }), + NewDict([]Node{ + NewKey("key", NewStrVal("${var1.diff}")), + NewKey("unique", NewStrVal("1")), + }), + })), + expected: NewList([]Node{ + NewDict([]Node{ + NewKey("key", NewStrVal("value1")), + NewKey("unique", NewStrVal("0")), + }), + NewDict([]Node{ + NewKey("key", NewStrVal("value1")), + NewKey("unique", NewStrVal("1")), + }), + }), + varsArray: []*Vars{ + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + "diff": "value1", + }, + }), + }, + }, + "duplicates across vars array handled": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("key", NewStrVal("${var1.name}")), + }), + NewDict([]Node{ + NewKey("key", NewStrVal("${var1.diff}")), + }), + })), + expected: NewList([]Node{ + NewDict([]Node{ + NewKey("key", NewStrVal("value1")), + }), + NewDict([]Node{ + NewKey("key", NewStrVal("value2")), + }), + NewDict([]Node{ + NewKey("key", NewStrVal("value3")), + }), + NewDict([]Node{ + NewKey("key", NewStrVal("value4")), + }), + }), + varsArray: []*Vars{ + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + "diff": "value1", + }, + }), + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + "diff": "value2", + }, + }), + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + "diff": "value3", + }, + }), + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + "diff": "value2", + }, + }), + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + "diff": "value4", + }, + }), + }, + }, + "nested in streams": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/${var1.name}.log"), + })), + }), + })), + }), + })), + expected: NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value1.log"), + })), + }), + })), + }), + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value2.log"), + })), + }), + })), + }), + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value3.log"), + })), + }), + })), + }), + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value4.log"), + })), + }), + })), + }), + }), + varsArray: []*Vars{ + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + }, + }), + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value2", + }, + }), + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value2", + }, + }), + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value3", + }, + }), + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value4", + }, + }), + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "missing": "other", + }, + }), + }, + }, + "inputs with processors": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/${var1.name}.log"), + })), + }), + })), + NewKey("processors", NewList([]Node{ + NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("fields", NewDict([]Node{ + NewKey("user", NewStrVal("user1")), + })), + NewKey("to", NewStrVal("user")), + })), + }), + })), + }), + })), + expected: NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value1.log"), + })), + }), + })), + NewKey("processors", NewList([]Node{ + NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("fields", NewDict([]Node{ + NewKey("user", NewStrVal("user1")), + })), + NewKey("to", NewStrVal("user")), + })), + }), + })), + }), + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value2.log"), + })), + }), + })), + NewKey("processors", NewList([]Node{ + NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("fields", NewDict([]Node{ + NewKey("user", NewStrVal("user1")), + })), + NewKey("to", NewStrVal("user")), + })), + }), + })), + }), + }), + varsArray: []*Vars{ + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + }, + }), + mustMakeVars(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value2", + }, + }), + }, + }, + "vars with processors": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/${var1.name}.log"), + })), + }), + })), + NewKey("processors", NewList([]Node{ + NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("fields", NewDict([]Node{ + NewKey("user", NewStrVal("user1")), + })), + NewKey("to", NewStrVal("user")), + })), + }), + })), + }), + })), + expected: NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value1.log"), + })), + }), + })), + NewKey("processors", NewList([]Node{ + NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("fields", NewDict([]Node{ + NewKey("custom", NewStrVal("value1")), + })), + NewKey("to", NewStrVal("dynamic")), + })), + }), + NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("fields", NewDict([]Node{ + NewKey("user", NewStrVal("user1")), + })), + NewKey("to", NewStrVal("user")), + })), + }), + })), + }), + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value2.log"), + })), + }), + })), + NewKey("processors", NewList([]Node{ + NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("fields", NewDict([]Node{ + NewKey("custom", NewStrVal("value2")), + })), + NewKey("to", NewStrVal("dynamic")), + })), + }), + NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("fields", NewDict([]Node{ + NewKey("user", NewStrVal("user1")), + })), + NewKey("to", NewStrVal("user")), + })), + }), + })), + }), + }), + varsArray: []*Vars{ + mustMakeVarsP(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + }, + }, + "var1", + []map[string]interface{}{ + { + "add_fields": map[string]interface{}{ + "fields": map[string]interface{}{ + "custom": "value1", + }, + "to": "dynamic", + }, + }, + }), + mustMakeVarsP(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value2", + }, + }, + "var1", + []map[string]interface{}{ + { + "add_fields": map[string]interface{}{ + "fields": map[string]interface{}{ + "custom": "value2", + }, + "to": "dynamic", + }, + }, + }), + }, + }, + "inputs without processors and vars with processors": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/${var1.name}.log"), + })), + }), + })), + }), + })), + expected: NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value1.log"), + })), + }), + })), + NewKey("processors", NewList([]Node{ + NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("fields", NewDict([]Node{ + NewKey("custom", NewStrVal("value1")), + })), + NewKey("to", NewStrVal("dynamic")), + })), + }), + })), + }), + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value2.log"), + })), + }), + })), + NewKey("processors", NewList([]Node{ + NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("fields", NewDict([]Node{ + NewKey("custom", NewStrVal("value2")), + })), + NewKey("to", NewStrVal("dynamic")), + })), + }), + })), + }), + }), + varsArray: []*Vars{ + mustMakeVarsP(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + }, + }, + "var1", + []map[string]interface{}{ + { + "add_fields": map[string]interface{}{ + "fields": map[string]interface{}{ + "custom": "value1", + }, + "to": "dynamic", + }, + }, + }), + mustMakeVarsP(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value2", + }, + }, + "var1", + []map[string]interface{}{ + { + "add_fields": map[string]interface{}{ + "fields": map[string]interface{}{ + "custom": "value2", + }, + "to": "dynamic", + }, + }, + }), + }, + }, + "processors incorrectly a map": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/${var1.name}.log"), + })), + }), + })), + NewKey("processors", NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("invalid", NewStrVal("value")), + })), + })), + }), + })), + expected: NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value1.log"), + })), + }), + })), + NewKey("processors", NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("invalid", NewStrVal("value")), + })), + })), + }), + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value2.log"), + })), + }), + })), + NewKey("processors", NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("invalid", NewStrVal("value")), + })), + })), + }), + }), + varsArray: []*Vars{ + mustMakeVarsP(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + }, + }, + "var1", + []map[string]interface{}{ + { + "add_fields": map[string]interface{}{ + "fields": map[string]interface{}{ + "custom": "value1", + }, + "to": "dynamic", + }, + }, + }), + mustMakeVarsP(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value2", + }, + }, + "var1", + []map[string]interface{}{ + { + "add_fields": map[string]interface{}{ + "fields": map[string]interface{}{ + "custom": "value2", + }, + "to": "dynamic", + }, + }, + }), + }, + }, + "same var result with different processors": { + input: NewKey("inputs", NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/${var1.name}.log"), + })), + }), + })), + }), + })), + expected: NewList([]Node{ + NewDict([]Node{ + NewKey("type", NewStrVal("logfile")), + NewKey("streams", NewList([]Node{ + NewDict([]Node{ + NewKey("paths", NewList([]Node{ + NewStrVal("/var/log/value1.log"), + })), + }), + })), + NewKey("processors", NewList([]Node{ + NewDict([]Node{ + NewKey("add_fields", NewDict([]Node{ + NewKey("fields", NewDict([]Node{ + NewKey("custom", NewStrVal("value1")), + })), + NewKey("to", NewStrVal("dynamic")), + })), + }), + })), + }), + }), + varsArray: []*Vars{ + mustMakeVarsP(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + }, + }, + "var1", + []map[string]interface{}{ + { + "add_fields": map[string]interface{}{ + "fields": map[string]interface{}{ + "custom": "value1", + }, + "to": "dynamic", + }, + }, + }), + mustMakeVarsP(map[string]interface{}{ + "var1": map[string]interface{}{ + "name": "value1", + }, + }, + "var1", + []map[string]interface{}{ + { + "add_fields": map[string]interface{}{ + "fields": map[string]interface{}{ + "custom": "value2", + }, + "to": "dynamic", + }, + }, + }), + }, + }, + } + + for name, test := range testcases { + t.Run(name, func(t *testing.T) { + v, err := RenderInputs(test.input, test.varsArray) + if test.err { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, test.expected.String(), v.String()) + } + }) + } +} + +func mustMakeVarsP(mapping map[string]interface{}, processorKey string, processors Processors) *Vars { + v, err := NewVarsWithProcessors(mapping, processorKey, processors) + if err != nil { + panic(err) + } + return v +} diff --git a/x-pack/elastic-agent/pkg/config/operations/enricher.go b/x-pack/elastic-agent/pkg/config/operations/enricher.go new file mode 100644 index 000000000000..24e2234b00cf --- /dev/null +++ b/x-pack/elastic-agent/pkg/config/operations/enricher.go @@ -0,0 +1,39 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operations + +import ( + "runtime" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" +) + +// InjectAgentConfig injects config to a provided configuration. +func InjectAgentConfig(c *config.Config) error { + globalConfig := agentGlobalConfig() + if err := c.Merge(globalConfig); err != nil { + return errors.New("failed to inject agent global config", err, errors.TypeConfig) + } + + return nil +} + +// agentGlobalConfig gets global config used for resolution of variables inside configuration +// such as ${path.data}. +func agentGlobalConfig() map[string]interface{} { + return map[string]interface{}{ + "path": map[string]interface{}{ + "data": paths.Data(), + "config": paths.Config(), + "home": paths.Home(), + "logs": paths.Logs(), + }, + "runtime.os": runtime.GOOS, + "runtime.arch": runtime.GOARCH, + } +} diff --git a/x-pack/elastic-agent/pkg/config/operations/inspector.go b/x-pack/elastic-agent/pkg/config/operations/inspector.go new file mode 100644 index 000000000000..6a0697e06846 --- /dev/null +++ b/x-pack/elastic-agent/pkg/config/operations/inspector.go @@ -0,0 +1,106 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package operations + +import ( + "fmt" + + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage/store" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/fleetapi" +) + +// LoadFullAgentConfig load agent config based on provided paths and defined capabilities. +// In case fleet is used, config from policy action is returned. +func LoadFullAgentConfig(cfgPath string) (*config.Config, error) { + rawConfig, err := loadConfig(cfgPath) + if err != nil { + return nil, err + } + + cfg, err := configuration.NewFromConfig(rawConfig) + if err != nil { + return nil, err + } + + if configuration.IsStandalone(cfg.Fleet) { + return rawConfig, nil + } + + fleetConfig, err := loadFleetConfig(rawConfig) + if err != nil { + return nil, err + } else if fleetConfig == nil { + return nil, fmt.Errorf("no fleet config retrieved yet") + } + + return config.NewConfigFrom(fleetConfig) +} + +func loadConfig(configPath string) (*config.Config, error) { + rawConfig, err := config.LoadFile(configPath) + if err != nil { + return nil, err + } + + path := paths.AgentConfigFile() + + store := storage.NewDiskStore(path) + reader, err := store.Load() + if err != nil { + return nil, errors.New(err, "could not initialize config store", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, path)) + } + + config, err := config.NewConfigFrom(reader) + if err != nil { + return nil, errors.New(err, + fmt.Sprintf("fail to read configuration %s for the elastic-agent", path), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, path)) + } + + // merge local configuration and configuration persisted from fleet. + rawConfig.Merge(config) + + if err := InjectAgentConfig(rawConfig); err != nil { + return nil, err + } + + return rawConfig, nil +} + +func loadFleetConfig(cfg *config.Config) (map[string]interface{}, error) { + log, err := newErrorLogger() + if err != nil { + return nil, err + } + + stateStore, err := store.NewStateStoreWithMigration(log, paths.AgentActionStoreFile(), paths.AgentStateStoreFile()) + if err != nil { + return nil, err + } + + for _, c := range stateStore.Actions() { + cfgChange, ok := c.(*fleetapi.ActionPolicyChange) + if !ok { + continue + } + + return cfgChange.Policy, nil + } + return nil, nil +} + +func newErrorLogger() (*logger.Logger, error) { + return logger.NewWithLogpLevel("", logp.ErrorLevel) +} diff --git a/x-pack/elastic-agent/pkg/config/operations/svc_unix.go b/x-pack/elastic-agent/pkg/config/operations/svc_unix.go new file mode 100644 index 000000000000..ba63130b89f5 --- /dev/null +++ b/x-pack/elastic-agent/pkg/config/operations/svc_unix.go @@ -0,0 +1,15 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !windows + +package operations + +import "os" + +// RunningUnderSupervisor returns true when executing Agent is running under +// the supervisor processes of the OS. +func RunningUnderSupervisor() bool { + return os.Getppid() == 1 +} diff --git a/x-pack/elastic-agent/pkg/config/operations/svc_windows.go b/x-pack/elastic-agent/pkg/config/operations/svc_windows.go new file mode 100644 index 000000000000..1d976d9be2b9 --- /dev/null +++ b/x-pack/elastic-agent/pkg/config/operations/svc_windows.go @@ -0,0 +1,53 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build windows + +package operations + +import ( + "golang.org/x/sys/windows" +) + +const ( + ML_SYSTEM_RID = 0x4000 +) + +// RunningUnderSupervisor returns true when executing Agent is running under +// the supervisor processes of the OS. +func RunningUnderSupervisor() bool { + serviceSid, err := allocSid(ML_SYSTEM_RID) + if err != nil { + return false + } + defer windows.FreeSid(serviceSid) + + t, err := windows.OpenCurrentProcessToken() + if err != nil { + return false + } + defer t.Close() + + gs, err := t.GetTokenGroups() + if err != nil { + return false + } + + for _, g := range gs.AllGroups() { + if windows.EqualSid(g.Sid, serviceSid) { + return true + } + } + return false +} + +func allocSid(subAuth0 uint32) (*windows.SID, error) { + var sid *windows.SID + err := windows.AllocateAndInitializeSid(&windows.SECURITY_MANDATORY_LABEL_AUTHORITY, + 1, subAuth0, 0, 0, 0, 0, 0, 0, 0, &sid) + if err != nil { + return nil, err + } + return sid, nil +} diff --git a/x-pack/elastic-agent/pkg/reporter/fleet/config.go b/x-pack/elastic-agent/pkg/reporter/fleet/config/config.go similarity index 97% rename from x-pack/elastic-agent/pkg/reporter/fleet/config.go rename to x-pack/elastic-agent/pkg/reporter/fleet/config/config.go index cfc85d73a63c..1e42b956ee85 100644 --- a/x-pack/elastic-agent/pkg/reporter/fleet/config.go +++ b/x-pack/elastic-agent/pkg/reporter/fleet/config/config.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package fleet +package config // Config is a configuration describing fleet connected parts type Config struct { diff --git a/x-pack/elastic-agent/pkg/reporter/fleet/reporter.go b/x-pack/elastic-agent/pkg/reporter/fleet/reporter.go index fcc1d28f3aa8..b4bd233d205c 100644 --- a/x-pack/elastic-agent/pkg/reporter/fleet/reporter.go +++ b/x-pack/elastic-agent/pkg/reporter/fleet/reporter.go @@ -12,6 +12,7 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/fleetapi" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter/fleet/config" ) const ( @@ -57,7 +58,7 @@ type agentInfo interface { } // NewReporter creates a new fleet reporter. -func NewReporter(agentInfo agentInfo, l *logger.Logger, c *Config) (*Reporter, error) { +func NewReporter(agentInfo agentInfo, l *logger.Logger, c *config.Config) (*Reporter, error) { r := &Reporter{ info: agentInfo, queue: make([]fleetapi.SerializableEvent, 0),