From 17043143716299818bd13fca93126e876d92b5d4 Mon Sep 17 00:00:00 2001 From: Nicolas Guerguadj <35628945+Kaderinho@users.noreply.github.com> Date: Thu, 14 Dec 2023 20:47:35 +0100 Subject: [PATCH 01/66] Add secret e2e tests for Windows (#21234) Add secret e2e tests for Windows --- .../secret/fixtures/setup_secret.ps1 | 30 ++++++++ .../secret/secret_common_test.go | 22 ++++++ .../secret_nix_test.go} | 34 +++++---- .../secret/secret_win_test.go | 75 +++++++++++++++++++ 4 files changed, 147 insertions(+), 14 deletions(-) create mode 100644 test/new-e2e/tests/agent-subcommands/secret/fixtures/setup_secret.ps1 create mode 100644 test/new-e2e/tests/agent-subcommands/secret/secret_common_test.go rename test/new-e2e/tests/agent-subcommands/{secret_test.go => secret/secret_nix_test.go} (63%) create mode 100644 test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go diff --git a/test/new-e2e/tests/agent-subcommands/secret/fixtures/setup_secret.ps1 b/test/new-e2e/tests/agent-subcommands/secret/fixtures/setup_secret.ps1 new file mode 100644 index 00000000000000..978823ea547207 --- /dev/null +++ b/test/new-e2e/tests/agent-subcommands/secret/fixtures/setup_secret.ps1 @@ -0,0 +1,30 @@ +param ( + [string]$FilePath, + [string]$FileContent +) + +echo Filepath = $FilePath +echo FileContnet = $FileContent + +$user = "ddagentuser" +$permissions = "Read", "ReadAndExecute" + + +# Create the file and get its permissions +$FileContent | Set-Content -Path $FilePath +$acl = Get-Acl -Path $FilePath + +# Disable inheritance and remove all existing access rules +$acl.SetAccessRuleProtection($true, $false) +$acl.Access | ForEach-Object { + $acl.RemoveAccessRule($_) +} + +# Add the desired access rule for the specific user +foreach ($permission in $permissions) { + $rule = New-Object System.Security.AccessControl.FileSystemAccessRule($user, $permission, "Allow") + $acl.AddAccessRule($rule) +} + +# Set the modified ACL on the file +Set-Acl -Path $FilePath -AclObject $acl \ No newline at end of file diff --git a/test/new-e2e/tests/agent-subcommands/secret/secret_common_test.go b/test/new-e2e/tests/agent-subcommands/secret/secret_common_test.go new file mode 100644 index 00000000000000..f888c3e2fbdf2e --- /dev/null +++ b/test/new-e2e/tests/agent-subcommands/secret/secret_common_test.go @@ -0,0 +1,22 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package secret + +import ( + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" + + "github.com/stretchr/testify/assert" +) + +type baseSecretSuite struct { + e2e.Suite[e2e.AgentEnv] +} + +func (v *baseSecretSuite) TestAgentSecretNotEnabledByDefault() { + secret := v.Env().Agent.Secret() + + assert.Contains(v.T(), secret, "No secret_backend_command set") +} diff --git a/test/new-e2e/tests/agent-subcommands/secret_test.go b/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go similarity index 63% rename from test/new-e2e/tests/agent-subcommands/secret_test.go rename to test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go index aa09893e9c93e7..f2c5e5f662620a 100644 --- a/test/new-e2e/tests/agent-subcommands/secret_test.go +++ b/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go @@ -3,32 +3,37 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package agentsubcommands +package secret import ( "testing" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" - + "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2params" "github.com/stretchr/testify/assert" ) -type agentSecretSuite struct { - e2e.Suite[e2e.AgentEnv] +type linuxSecretSuite struct { + baseSecretSuite } -func TestAgentSecretSuite(t *testing.T) { - e2e.Run(t, &agentSecretSuite{}, e2e.AgentStackDef()) +func TestLinuxSecretSuite(t *testing.T) { + t.Parallel() + e2e.Run(t, &linuxSecretSuite{}, e2e.AgentStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.UbuntuOS)))) } -func (v *agentSecretSuite) TestAgentSecretNotEnabledByDefault() { - secret := v.Env().Agent.Secret() - - assert.Contains(v.T(), secret, "No secret_backend_command set") +func (v *linuxSecretSuite) TestAgentSecretExecDoesNotExist() { + v.UpdateEnv(e2e.AgentStackDef(e2e.WithAgentParams(agentparams.WithAgentConfig("secret_backend_command: /does/not/exist")))) + output := v.Env().Agent.Secret() + assert.Contains(v.T(), output, "=== Checking executable permissions ===") + assert.Contains(v.T(), output, "Executable path: /does/not/exist") + assert.Contains(v.T(), output, "Executable permissions: error: invalid executable '/does/not/exist': can't stat it: no such file or directory") + assert.Regexp(v.T(), "Number of secrets .+: 0", output) } -func (v *agentSecretSuite) TestAgentSecretChecksExecutablePermissions() { +func (v *linuxSecretSuite) TestAgentSecretChecksExecutablePermissions() { v.UpdateEnv(e2e.AgentStackDef(e2e.WithAgentParams(agentparams.WithAgentConfig("secret_backend_command: /usr/bin/echo")))) output := v.Env().Agent.Secret() @@ -36,9 +41,10 @@ func (v *agentSecretSuite) TestAgentSecretChecksExecutablePermissions() { assert.Contains(v.T(), output, "=== Checking executable permissions ===") assert.Contains(v.T(), output, "Executable path: /usr/bin/echo") assert.Contains(v.T(), output, "Executable permissions: error: invalid executable: '/usr/bin/echo' isn't owned by this user") + assert.Regexp(v.T(), "Number of secrets .+: 0", output) } -func (v *agentSecretSuite) TestAgentSecretCorrectPermissions() { +func (v *linuxSecretSuite) TestAgentSecretCorrectPermissions() { secretScript := `#!/usr/bin/env sh printf '{"alias_secret": {"value": "a_super_secret_string"}}\n'` config := `secret_backend_command: /tmp/bin/secret.sh @@ -57,8 +63,8 @@ host_aliases: assert.Contains(v.T(), output, "File mode: 100700") assert.Contains(v.T(), output, "Owner: dd-agent") assert.Contains(v.T(), output, "Group: dd-agent") - assert.Contains(v.T(), output, "Number of secrets resolved: 1") + assert.Regexp(v.T(), "Number of secrets .+: 1", output) assert.Contains(v.T(), output, "- 'alias_secret':\n\tused in 'datadog.yaml' configuration in entry 'host_aliases'") - // assert we don't output the decrypted secret + // assert we don't output the resolved secret assert.NotContains(v.T(), output, "a_super_secret_string") } diff --git a/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go b/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go new file mode 100644 index 00000000000000..d77d94ceaf527e --- /dev/null +++ b/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go @@ -0,0 +1,75 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package secret + +import ( + _ "embed" + "testing" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2params" + "github.com/stretchr/testify/assert" +) + +type windowsSecretSuite struct { + baseSecretSuite +} + +func TestWindowsSecretSuite(t *testing.T) { + t.Parallel() + e2e.Run(t, &windowsSecretSuite{}, e2e.AgentStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.WindowsOS)))) +} + +func (v *windowsSecretSuite) TestAgentSecretExecDoesNotExist() { + v.UpdateEnv(e2e.AgentStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.WindowsOS)), e2e.WithAgentParams(agentparams.WithAgentConfig("secret_backend_command: /does/not/exist")))) + output := v.Env().Agent.Secret() + assert.Contains(v.T(), output, "=== Checking executable permissions ===") + assert.Contains(v.T(), output, "Executable path: /does/not/exist") + assert.Contains(v.T(), output, "Executable permissions: error: secretBackendCommand '/does/not/exist' does not exist") + assert.Regexp(v.T(), "Number of secrets .+: 0", output) +} + +func (v *windowsSecretSuite) TestAgentSecretChecksExecutablePermissions() { + v.UpdateEnv(e2e.AgentStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.WindowsOS)), e2e.WithAgentParams(agentparams.WithAgentConfig("secret_backend_command: C:\\Windows\\system32\\cmd.exe")))) + + output := v.Env().Agent.Secret() + + assert.Contains(v.T(), output, "=== Checking executable permissions ===") + assert.Contains(v.T(), output, "Executable path: C:\\Windows\\system32\\cmd.exe") + assert.Regexp(v.T(), "Executable permissions: error: invalid executable 'C:\\\\Windows\\\\system32\\\\cmd.exe': other users/groups than LOCAL_SYSTEM, .+ have rights on it", output) + assert.Regexp(v.T(), "Number of secrets .+: 0", output) +} + +//go:embed fixtures/setup_secret.ps1 +var secretSetupScript []byte + +func (v *windowsSecretSuite) TestAgentSecretCorrectPermissions() { + config := `secret_backend_command: C:\secret.bat +host_aliases: + - ENC[alias_secret]` + + // We embed a script that file create the secret binary (C:\secret.bat) with the correct permissions + v.UpdateEnv(e2e.AgentStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.WindowsOS)), + e2e.WithAgentParams(agentparams.WithFile(`C:/secret.bat`, string(secretSetupScript), true)), + e2e.WithAgentParams(agentparams.WithFile(`C:/Users/Administator/scripts/setup_secret.ps1`, string(secretSetupScript), true)))) + v.Env().VM.Execute(`C:/Users/Administator/scripts/setup_secret.ps1 -FilePath "C:/secret.bat" -FileContent '@echo {"alias_secret": {"value": "a_super_secret_string"}}'`) + v.UpdateEnv(e2e.AgentStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.WindowsOS)), e2e.WithAgentParams(agentparams.WithAgentConfig(config)))) + + output := v.Env().Agent.Secret() + + assert.Contains(v.T(), output, "=== Checking executable permissions ===") + assert.Contains(v.T(), output, "Executable path: C:\\secret.bat") + assert.Contains(v.T(), output, "Executable permissions: OK, the executable has the correct permissions") + + ddagentRegex := `Access : .+\\ddagentuser Allow ReadAndExecute` + assert.Regexp(v.T(), ddagentRegex, output) + assert.Regexp(v.T(), "Number of secrets .+: 1", output) + assert.Contains(v.T(), output, "- 'alias_secret':\r\n\tused in 'datadog.yaml' configuration in entry 'host_aliases'") + // assert we don't output the resolved secret + assert.NotContains(v.T(), output, "a_super_secret_string") +} From b8cc04c1e13f5cd2ca5e7b8845512ba9438f638a Mon Sep 17 00:00:00 2001 From: Guy Arbitman Date: Thu, 14 Dec 2023 15:26:17 -0500 Subject: [PATCH 02/66] usm: http: statskeeper: make incomplete buffer interface (#21519) usm: http: statskeeper: make incomplete buffer interface --- .../protocols/http/incomplete_iface.go | 19 +++++++++++++++++++ .../protocols/http/incomplete_stats.go | 3 ++- .../protocols/http/incomplete_stats_test.go | 5 +++-- .../http/incomplete_stats_windows.go | 8 ++++---- pkg/network/protocols/http/protocol.go | 2 +- pkg/network/protocols/http/statkeeper.go | 6 +++--- pkg/network/protocols/http/statkeeper_test.go | 16 ++++++++-------- pkg/network/protocols/http2/protocol.go | 3 ++- pkg/network/usm/monitor_windows.go | 2 +- 9 files changed, 43 insertions(+), 21 deletions(-) create mode 100644 pkg/network/protocols/http/incomplete_iface.go diff --git a/pkg/network/protocols/http/incomplete_iface.go b/pkg/network/protocols/http/incomplete_iface.go new file mode 100644 index 00000000000000..ae4b11b32f9472 --- /dev/null +++ b/pkg/network/protocols/http/incomplete_iface.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022-present Datadog, Inc. + +//go:build linux_bpf || (windows && npm) + +package http + +import ( + "time" +) + +// IncompleteBuffer is responsible for buffering incomplete transactions +// (eg. httpTX objects that have either only the request or response information) +type IncompleteBuffer interface { + Add(tx Transaction) + Flush(now time.Time) []Transaction +} diff --git a/pkg/network/protocols/http/incomplete_stats.go b/pkg/network/protocols/http/incomplete_stats.go index 2c2b47a3e9bd83..841a2cd0ec157a 100644 --- a/pkg/network/protocols/http/incomplete_stats.go +++ b/pkg/network/protocols/http/incomplete_stats.go @@ -63,7 +63,8 @@ func newTXParts(requestCapacity, responseCapacity int) *txParts { } } -func newIncompleteBuffer(c *config.Config, telemetry *Telemetry) *incompleteBuffer { +// NewIncompleteBuffer returns a new incompleteBuffer instance +func NewIncompleteBuffer(c *config.Config, telemetry *Telemetry) IncompleteBuffer { return &incompleteBuffer{ data: make(map[types.ConnectionKey]*txParts), maxEntries: c.MaxHTTPStatsBuffered, diff --git a/pkg/network/protocols/http/incomplete_stats_test.go b/pkg/network/protocols/http/incomplete_stats_test.go index fa17bf2c5a5146..20b00449919e99 100644 --- a/pkg/network/protocols/http/incomplete_stats_test.go +++ b/pkg/network/protocols/http/incomplete_stats_test.go @@ -21,7 +21,7 @@ func TestOrphanEntries(t *testing.T) { t.Run("orphan entries can be joined even after flushing", func(t *testing.T) { now := time.Now() tel := NewTelemetry("http") - buffer := newIncompleteBuffer(config.New(), tel) + buffer := NewIncompleteBuffer(config.New(), tel) request := &EbpfEvent{ Http: EbpfTx{ Request_fragment: requestFragment([]byte("GET /foo/bar")), @@ -54,7 +54,8 @@ func TestOrphanEntries(t *testing.T) { t.Run("orphan entries are not kept indefinitely", func(t *testing.T) { tel := NewTelemetry("http") - buffer := newIncompleteBuffer(config.New(), tel) + // Temporary cast until we introduce a HTTP2 dedicated implementation for incompleteBuffer. + buffer := NewIncompleteBuffer(config.New(), tel).(*incompleteBuffer) now := time.Now() buffer.minAgeNano = (30 * time.Second).Nanoseconds() request := &EbpfEvent{ diff --git a/pkg/network/protocols/http/incomplete_stats_windows.go b/pkg/network/protocols/http/incomplete_stats_windows.go index 6d298134cf575a..291d444a791787 100644 --- a/pkg/network/protocols/http/incomplete_stats_windows.go +++ b/pkg/network/protocols/http/incomplete_stats_windows.go @@ -17,13 +17,13 @@ import ( // see both directions of traffic type incompleteBuffer struct{} -//nolint:revive // TODO(WKIT) Fix revive linter -func newIncompleteBuffer(c *config.Config, telemetry *Telemetry) *incompleteBuffer { +// NewIncompleteBuffer returns a new incompleteBuffer instance +func NewIncompleteBuffer(*config.Config, *Telemetry) IncompleteBuffer { return &incompleteBuffer{} } //nolint:revive // TODO(WKIT) Fix revive linter -func (b *incompleteBuffer) Add(tx Transaction) {} +func (b *incompleteBuffer) Add(Transaction) {} //nolint:revive // TODO(WKIT) Fix revive linter -func (b *incompleteBuffer) Flush(now time.Time) []Transaction { return nil } +func (b *incompleteBuffer) Flush(time.Time) []Transaction { return nil } diff --git a/pkg/network/protocols/http/protocol.go b/pkg/network/protocols/http/protocol.go index 0a1ec6de9a5b94..f4e0106fa24212 100644 --- a/pkg/network/protocols/http/protocol.go +++ b/pkg/network/protocols/http/protocol.go @@ -127,7 +127,7 @@ func (p *protocol) PreStart(mgr *manager.Manager) (err error) { return } - p.statkeeper = NewStatkeeper(p.cfg, p.telemetry) + p.statkeeper = NewStatkeeper(p.cfg, p.telemetry, NewIncompleteBuffer(p.cfg, p.telemetry)) p.eventsConsumer.Start() return diff --git a/pkg/network/protocols/http/statkeeper.go b/pkg/network/protocols/http/statkeeper.go index f7e78ddbd4b097..c87ba9a3a40f8a 100644 --- a/pkg/network/protocols/http/statkeeper.go +++ b/pkg/network/protocols/http/statkeeper.go @@ -21,7 +21,7 @@ import ( type StatKeeper struct { mux sync.Mutex stats map[Key]*RequestStats - incomplete *incompleteBuffer + incomplete IncompleteBuffer maxEntries int quantizer *URLQuantizer telemetry *Telemetry @@ -37,7 +37,7 @@ type StatKeeper struct { } // NewStatkeeper returns a new StatKeeper. -func NewStatkeeper(c *config.Config, telemetry *Telemetry) *StatKeeper { +func NewStatkeeper(c *config.Config, telemetry *Telemetry, incompleteBuffer IncompleteBuffer) *StatKeeper { var quantizer *URLQuantizer // For now we're only enabling path quantization for HTTP/1 traffic if c.EnableUSMQuantization && telemetry.protocol == "http" { @@ -46,7 +46,7 @@ func NewStatkeeper(c *config.Config, telemetry *Telemetry) *StatKeeper { return &StatKeeper{ stats: make(map[Key]*RequestStats), - incomplete: newIncompleteBuffer(c, telemetry), + incomplete: incompleteBuffer, maxEntries: c.MaxHTTPStatsBuffered, quantizer: quantizer, replaceRules: c.HTTPReplaceRules, diff --git a/pkg/network/protocols/http/statkeeper_test.go b/pkg/network/protocols/http/statkeeper_test.go index 603f7707c80d0b..8ae60da8b58592 100644 --- a/pkg/network/protocols/http/statkeeper_test.go +++ b/pkg/network/protocols/http/statkeeper_test.go @@ -24,7 +24,7 @@ func TestProcessHTTPTransactions(t *testing.T) { cfg := config.New() cfg.MaxHTTPStatsBuffered = 1000 tel := NewTelemetry("http") - sk := NewStatkeeper(cfg, tel) + sk := NewStatkeeper(cfg, tel, NewIncompleteBuffer(cfg, tel)) srcString := "1.1.1.1" dstString := "2.2.2.2" @@ -71,7 +71,7 @@ func BenchmarkProcessHTTPTransactions(b *testing.B) { cfg := config.New() cfg.MaxHTTPStatsBuffered = 100000 tel := NewTelemetry("http") - sk := NewStatkeeper(cfg, tel) + sk := NewStatkeeper(cfg, tel, NewIncompleteBuffer(cfg, tel)) srcString := "1.1.1.1" dstString := "2.2.2.2" @@ -104,7 +104,7 @@ func BenchmarkProcessHTTPTransactions(b *testing.B) { func BenchmarkProcessSameConn(b *testing.B) { cfg := &config.Config{MaxHTTPStatsBuffered: 1000} tel := NewTelemetry("http") - sk := NewStatkeeper(cfg, tel) + sk := NewStatkeeper(cfg, tel, NewIncompleteBuffer(cfg, tel)) tx := generateIPv4HTTPTransaction( util.AddressFromString("1.1.1.1"), util.AddressFromString("2.2.2.2"), @@ -138,7 +138,7 @@ func TestPathProcessing(t *testing.T) { c.HTTPReplaceRules = rules tel := NewTelemetry("http") - return NewStatkeeper(c, tel) + return NewStatkeeper(c, tel, NewIncompleteBuffer(cfg, tel)) } t.Run("reject rule", func(t *testing.T) { @@ -230,7 +230,7 @@ func TestHTTPCorrectness(t *testing.T) { cfg.MaxHTTPStatsBuffered = 1000 libtelemetry.Clear() tel := NewTelemetry("http") - sk := NewStatkeeper(cfg, tel) + sk := NewStatkeeper(cfg, tel, NewIncompleteBuffer(cfg, tel)) tx := generateIPv4HTTPTransaction( util.AddressFromString("1.1.1.1"), util.AddressFromString("2.2.2.2"), @@ -254,7 +254,7 @@ func TestHTTPCorrectness(t *testing.T) { cfg.MaxHTTPStatsBuffered = 1000 libtelemetry.Clear() tel := NewTelemetry("http") - sk := NewStatkeeper(cfg, tel) + sk := NewStatkeeper(cfg, tel, NewIncompleteBuffer(cfg, tel)) tx := generateIPv4HTTPTransaction( util.AddressFromString("1.1.1.1"), util.AddressFromString("2.2.2.2"), @@ -279,7 +279,7 @@ func TestHTTPCorrectness(t *testing.T) { cfg.MaxHTTPStatsBuffered = 1000 libtelemetry.Clear() tel := NewTelemetry("http") - sk := NewStatkeeper(cfg, tel) + sk := NewStatkeeper(cfg, tel, NewIncompleteBuffer(cfg, tel)) tx := generateIPv4HTTPTransaction( util.AddressFromString("1.1.1.1"), util.AddressFromString("2.2.2.2"), @@ -303,7 +303,7 @@ func TestHTTPCorrectness(t *testing.T) { cfg.MaxHTTPStatsBuffered = 1000 libtelemetry.Clear() tel := NewTelemetry("http") - sk := NewStatkeeper(cfg, tel) + sk := NewStatkeeper(cfg, tel, NewIncompleteBuffer(cfg, tel)) tx := generateIPv4HTTPTransaction( util.AddressFromString("1.1.1.1"), util.AddressFromString("2.2.2.2"), diff --git a/pkg/network/protocols/http2/protocol.go b/pkg/network/protocols/http2/protocol.go index a4103d0a130bf0..d8522c6fd67432 100644 --- a/pkg/network/protocols/http2/protocol.go +++ b/pkg/network/protocols/http2/protocol.go @@ -196,7 +196,8 @@ func (p *Protocol) PreStart(mgr *manager.Manager) (err error) { if err = p.dynamicTable.preStart(mgr); err != nil { return } - p.statkeeper = http.NewStatkeeper(p.cfg, p.telemetry) + + p.statkeeper = http.NewStatkeeper(p.cfg, p.telemetry, http.NewIncompleteBuffer(p.cfg, p.telemetry)) p.eventsConsumer.Start() if err = p.createStaticTable(mgr); err != nil { diff --git a/pkg/network/usm/monitor_windows.go b/pkg/network/usm/monitor_windows.go index 234e30b32d3730..8d02c89542287b 100644 --- a/pkg/network/usm/monitor_windows.go +++ b/pkg/network/usm/monitor_windows.go @@ -55,7 +55,7 @@ func NewWindowsMonitor(c *config.Config, dh driver.Handle) (Monitor, error) { di: di, hei: hei, telemetry: telemetry, - statkeeper: http.NewStatkeeper(c, telemetry), + statkeeper: http.NewStatkeeper(c, telemetry, http.NewIncompleteBuffer(c, telemetry)), }, nil } From 4c5f9d69388bf806b895695fd1e76990f0450715 Mon Sep 17 00:00:00 2001 From: Bryce Kahle Date: Thu, 14 Dec 2023 15:01:01 -0800 Subject: [PATCH 03/66] remove force-recreate (#21549) remove force-recreate from docker-compose up --- pkg/network/protocols/testutil/serverutils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/network/protocols/testutil/serverutils.go b/pkg/network/protocols/testutil/serverutils.go index 07e53d88746823..a94f972a1c3942 100644 --- a/pkg/network/protocols/testutil/serverutils.go +++ b/pkg/network/protocols/testutil/serverutils.go @@ -49,7 +49,7 @@ func runDockerServer(t testing.TB, serverName, dockerPath string, env []string, ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - cmd := exec.CommandContext(ctx, "docker-compose", "-f", dockerPath, "up", "--force-recreate", "--remove-orphans", "-V") + cmd := exec.CommandContext(ctx, "docker-compose", "-f", dockerPath, "up", "--remove-orphans", "-V") patternScanner := NewScanner(serverStartRegex, make(chan struct{}, 1)) cmd.Stdout = patternScanner From 73b5bc31aeb992a71567e42183f9bc9952f75a4e Mon Sep 17 00:00:00 2001 From: "Brian L. Troutwine" Date: Thu, 14 Dec 2023 15:20:47 -0800 Subject: [PATCH 04/66] Update lading and smp to latest release in Regression Detector (#21577) This commit updates `lading` and `smp` to their latest releases. This most important change here is the introduction of memory/CPU optimization goals. These new goals are demonstrated in this PR. Signed-off-by: Brian L. Troutwine --- .gitlab/functional_test/regression_detector.yml | 4 ++-- test/regression/cases/file_to_blackhole/experiment.yaml | 2 +- test/regression/cases/file_tree/experiment.yaml | 2 +- test/regression/cases/idle/experiment.yaml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml index 00861cbebae8c9..6823f39571aa38 100644 --- a/.gitlab/functional_test/regression_detector.yml +++ b/.gitlab/functional_test/regression_detector.yml @@ -17,8 +17,8 @@ single-machine-performance-regression_detector: - outputs/report.html # for debugging, also on S3 when: always variables: - SMP_VERSION: 0.11.0 - LADING_VERSION: 0.20.1 + SMP_VERSION: 0.12.0 + LADING_VERSION: 0.20.4 CPUS: 7 MEMORY: "30g" # At present we require two artifacts to exist for the 'baseline' and the diff --git a/test/regression/cases/file_to_blackhole/experiment.yaml b/test/regression/cases/file_to_blackhole/experiment.yaml index ceb8abc46adf71..138b1d9767b561 100644 --- a/test/regression/cases/file_to_blackhole/experiment.yaml +++ b/test/regression/cases/file_to_blackhole/experiment.yaml @@ -1,4 +1,4 @@ -optimization_goal: egress_throughput +optimization_goal: cpu erratic: true environment: diff --git a/test/regression/cases/file_tree/experiment.yaml b/test/regression/cases/file_tree/experiment.yaml index ddae44307f7c3e..fac17ffafd70aa 100644 --- a/test/regression/cases/file_tree/experiment.yaml +++ b/test/regression/cases/file_tree/experiment.yaml @@ -1,4 +1,4 @@ -optimization_goal: egress_throughput +optimization_goal: memory erratic: true environment: diff --git a/test/regression/cases/idle/experiment.yaml b/test/regression/cases/idle/experiment.yaml index 483bd4674c74f1..d5e4a79fb0edda 100644 --- a/test/regression/cases/idle/experiment.yaml +++ b/test/regression/cases/idle/experiment.yaml @@ -1,4 +1,4 @@ -optimization_goal: egress_throughput +optimization_goal: memory erratic: true environment: From 2de567fa072385d44b155f06b3d33b4b4d70b693 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 15 Dec 2023 08:35:42 +0000 Subject: [PATCH 05/66] CWS: sync BTFhub constants (#21579) CWS: sync BTFHub constants Co-authored-by: paulcacheux --- .../probe/constantfetch/btfhub/constants.json | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants.json index 53575660be3c9e..0bed8a2bfefbee 100644 --- a/pkg/security/probe/constantfetch/btfhub/constants.json +++ b/pkg/security/probe/constantfetch/btfhub/constants.json @@ -1,5 +1,5 @@ { - "commit": "12986b7127d3a55b9737b527c50b0141e4cab6e9", + "commit": "332a1326b48dbf33aad402eb274fbffb37ec8c68", "constants": [ { "binprm_file_offset": 168, @@ -8183,6 +8183,13 @@ "uname_release": "4.14.328-248.540.amzn2.aarch64", "cindex": 3 }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.330-250.540.amzn2.aarch64", + "cindex": 3 + }, { "distrib": "amzn", "version": "2", @@ -8855,6 +8862,13 @@ "uname_release": "4.14.33-59.37.amzn2.x86_64", "cindex": 8 }, + { + "distrib": "amzn", + "version": "2", + "arch": "x86_64", + "uname_release": "4.14.330-250.540.amzn2.x86_64", + "cindex": 7 + }, { "distrib": "amzn", "version": "2", @@ -10066,6 +10080,13 @@ "uname_release": "3.10.0-1160.102.1.el7.x86_64", "cindex": 34 }, + { + "distrib": "centos", + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-1160.105.1.el7.x86_64", + "cindex": 34 + }, { "distrib": "centos", "version": "7", From 23313f373d47e08758221884d90a964d64a52175 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Fri, 15 Dec 2023 12:02:27 +0100 Subject: [PATCH 06/66] cleanup pid entry at the end of the loop instead of the function (#21581) [CWS] cleanup pid entry at the end of the loop instead of the function --- pkg/security/probe/probe_windows.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/security/probe/probe_windows.go b/pkg/security/probe/probe_windows.go index 46fdb6314599ab..6e119b6fd85d53 100644 --- a/pkg/security/probe/probe_windows.go +++ b/pkg/security/probe/probe_windows.go @@ -79,6 +79,8 @@ func (p *WindowsProbe) Start() error { for { var pce *model.ProcessCacheEntry ev := p.zeroEvent() + var pidToCleanup uint32 + select { case <-p.ctx.Done(): return @@ -113,7 +115,6 @@ func (p *WindowsProbe) Start() error { log.Infof("Received stop %v", stop) pce = p.Resolvers.ProcessResolver.GetEntry(pid) - defer p.Resolvers.ProcessResolver.DeleteEntry(pid, time.Now()) ev.Type = uint32(model.ExitEventType) if pce == nil { @@ -132,6 +133,11 @@ func (p *WindowsProbe) Start() error { ev.ProcessContext = &pce.ProcessContext p.DispatchEvent(ev) + + if pidToCleanup != 0 { + p.Resolvers.ProcessResolver.DeleteEntry(pidToCleanup, time.Now()) + pidToCleanup = 0 + } } }() return p.pm.Start() From e54fe9ef80a84c94d0ae9ffb67f0d5a16a9a3eb5 Mon Sep 17 00:00:00 2001 From: Sylvain Afchain Date: Fri, 15 Dec 2023 12:30:01 +0100 Subject: [PATCH 07/66] [CWS] add an option to change UID/GID (#21558) --- .../subcommands/tracecmd/trace.go | 19 ++++++++++++++++++- pkg/security/ptracer/cws.go | 3 ++- pkg/security/ptracer/exec.go | 17 ++++++++++++++++- pkg/security/ptracer/ptracer.go | 9 ++++++++- 4 files changed, 44 insertions(+), 4 deletions(-) diff --git a/cmd/cws-instrumentation/subcommands/tracecmd/trace.go b/cmd/cws-instrumentation/subcommands/tracecmd/trace.go index a44349d0e7cef5..379573e97d4f44 100644 --- a/cmd/cws-instrumentation/subcommands/tracecmd/trace.go +++ b/cmd/cws-instrumentation/subcommands/tracecmd/trace.go @@ -19,11 +19,17 @@ const ( probeAddr = "probe-addr" // logLevel defines the log level verbose = "verbose" + // uid used to start the tracee + uid = "uid" + // gid used to start the tracee + gid = "gid" ) type traceCliParams struct { ProbeAddr string Verbose bool + UID int32 + GID int32 } // Command returns the commands for the trace subcommand @@ -34,12 +40,23 @@ func Command() []*cobra.Command { Use: "trace", Short: "trace the syscalls and signals of the given binary", RunE: func(cmd *cobra.Command, args []string) error { - return ptracer.StartCWSPtracer(args, params.ProbeAddr, params.Verbose) + creds := ptracer.Creds{} + if params.UID != -1 { + uid := uint32(params.UID) + creds.UID = &uid + } + if params.GID != -1 { + gid := uint32(params.GID) + creds.GID = &gid + } + return ptracer.StartCWSPtracer(args, params.ProbeAddr, creds, params.Verbose) }, } traceCmd.Flags().StringVar(¶ms.ProbeAddr, probeAddr, "localhost:5678", "system-probe eBPF less GRPC address") traceCmd.Flags().BoolVar(¶ms.Verbose, verbose, false, "enable verbose output") + traceCmd.Flags().Int32Var(¶ms.UID, uid, -1, "uid used to start the tracee") + traceCmd.Flags().Int32Var(¶ms.GID, gid, -1, "gid used to start the tracee") return []*cobra.Command{traceCmd} } diff --git a/pkg/security/ptracer/cws.go b/pkg/security/ptracer/cws.go index 85391a833bbe57..23ea5421325a95 100644 --- a/pkg/security/ptracer/cws.go +++ b/pkg/security/ptracer/cws.go @@ -368,7 +368,7 @@ func checkEntryPoint(path string) (string, error) { } // StartCWSPtracer start the ptracer -func StartCWSPtracer(args []string, probeAddr string, verbose bool) error { +func StartCWSPtracer(args []string, probeAddr string, creds Creds, verbose bool) error { entry, err := checkEntryPoint(args[0]) if err != nil { return err @@ -419,6 +419,7 @@ func StartCWSPtracer(args []string, probeAddr string, verbose bool) error { opts := Opts{ Syscalls: PtracedSyscalls, + Creds: creds, } tracer, err := NewTracer(entry, args, opts) diff --git a/pkg/security/ptracer/exec.go b/pkg/security/ptracer/exec.go index e1d73ae18d9bd1..3673bff4365753 100644 --- a/pkg/security/ptracer/exec.go +++ b/pkg/security/ptracer/exec.go @@ -25,7 +25,7 @@ func runtimeAfterForkInChild() //go:norace //nolint:unused -func forkExec(argv0 string, argv []string, envv []string, prog *syscall.SockFprog) (int, error) { +func forkExec(argv0 string, argv []string, envv []string, creds Creds, prog *syscall.SockFprog) (int, error) { argv0p, err := syscall.BytePtrFromString(argv0) if err != nil { return 0, err @@ -92,6 +92,21 @@ func forkExec(argv0 string, argv []string, envv []string, prog *syscall.SockFpro exit(errno) } + if creds.GID != nil { + _, _, errno = syscall.RawSyscall(syscall.SYS_SETGID, uintptr(*creds.GID), 0, 0) + if errno != 0 { + exit(errno) + } + + } + + if creds.UID != nil { + _, _, errno = syscall.RawSyscall(syscall.SYS_SETUID, uintptr(*creds.UID), 0, 0) + if errno != 0 { + exit(errno) + } + } + _, _, err = syscall.RawSyscall(syscall.SYS_EXECVE, uintptr(unsafe.Pointer(argv0p)), uintptr(unsafe.Pointer(&argvp[0])), diff --git a/pkg/security/ptracer/ptracer.go b/pkg/security/ptracer/ptracer.go index ceed9b89a01515..80fda54e41a9d5 100644 --- a/pkg/security/ptracer/ptracer.go +++ b/pkg/security/ptracer/ptracer.go @@ -50,9 +50,16 @@ type Tracer struct { info *arch.Info } +// Creds defines credentials +type Creds struct { + UID *uint32 + GID *uint32 +} + // Opts defines syscall filters type Opts struct { Syscalls []string + Creds Creds } func processVMReadv(pid int, addr uintptr, data []byte) (int, error) { @@ -303,7 +310,7 @@ func NewTracer(path string, args []string, opts Opts) (*Tracer, error) { runtime.LockOSThread() - pid, err := forkExec(path, args, os.Environ(), prog) + pid, err := forkExec(path, args, os.Environ(), opts.Creds, prog) if err != nil { return nil, err } From 03d95887253bb3380233fe57fa5439b57458b27f Mon Sep 17 00:00:00 2001 From: Nicolas Guerguadj <35628945+Kaderinho@users.noreply.github.com> Date: Fri, 15 Dec 2023 12:47:09 +0100 Subject: [PATCH 08/66] Move args from StartAgent to Api component dependencies (#21530) Move args from StartAgent to Api component dependencies --- cmd/agent/subcommands/jmx/command.go | 23 +++++++ cmd/agent/subcommands/run/command.go | 26 +------- cmd/agent/subcommands/run/command_windows.go | 12 +--- comp/api/api/apiimpl/api.go | 70 ++++++++++++++------ comp/api/api/apiimpl/api_mock.go | 20 ------ comp/api/api/component.go | 28 ++------ pkg/cli/standalone/jmx.go | 2 +- pkg/cli/subcommands/check/command.go | 18 +++++ 8 files changed, 96 insertions(+), 103 deletions(-) diff --git a/cmd/agent/subcommands/jmx/command.go b/cmd/agent/subcommands/jmx/command.go index 12da87423f2413..edf20dc7cf31e4 100644 --- a/cmd/agent/subcommands/jmx/command.go +++ b/cmd/agent/subcommands/jmx/command.go @@ -22,16 +22,26 @@ import ( "github.com/DataDog/datadog-agent/cmd/agent/command" "github.com/DataDog/datadog-agent/cmd/agent/common" "github.com/DataDog/datadog-agent/cmd/agent/common/path" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" "github.com/DataDog/datadog-agent/comp/aggregator/diagnosesendermanager" "github.com/DataDog/datadog-agent/comp/aggregator/diagnosesendermanager/diagnosesendermanagerimpl" internalAPI "github.com/DataDog/datadog-agent/comp/api/api" + "github.com/DataDog/datadog-agent/comp/api/api/apiimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/flare" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/workloadmeta" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors" + "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" + dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" + serverdebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" + "github.com/DataDog/datadog-agent/comp/metadata/host" + "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" + "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" + "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost" "github.com/DataDog/datadog-agent/pkg/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/cli/standalone" "github.com/DataDog/datadog-agent/pkg/collector" @@ -112,6 +122,19 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { }), workloadmeta.Module(), apiimpl.Module(), + + // TODO(components): this is a temporary hack as the StartServer() method of the API package was previously called with nil arguments + // This highlights the fact that the API Server created by JMX (through ExecJmx... function) should be different from the ones created + // in others commands such as run. + fx.Provide(func() flare.Component { return nil }), + fx.Provide(func() dogstatsdServer.Component { return nil }), + fx.Provide(func() replay.Component { return nil }), + fx.Provide(func() serverdebug.Component { return nil }), + fx.Provide(func() host.Component { return nil }), + fx.Provide(func() inventoryagent.Component { return nil }), + fx.Provide(func() inventoryhost.Component { return nil }), + fx.Provide(func() demultiplexer.Component { return nil }), + fx.Provide(func() inventorychecks.Component { return nil }), ) } diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 6f17783f34f0bc..14fd7b756c05b5 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -52,7 +52,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors" "github.com/DataDog/datadog-agent/comp/dogstatsd" - "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" dogstatsddebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/comp/forwarder" @@ -66,10 +65,8 @@ import ( "github.com/DataDog/datadog-agent/comp/logs" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/metadata" - "github.com/DataDog/datadog-agent/comp/metadata/host" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" - "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost" "github.com/DataDog/datadog-agent/comp/metadata/runner" "github.com/DataDog/datadog-agent/comp/ndmtmp" "github.com/DataDog/datadog-agent/comp/netflow" @@ -200,7 +197,6 @@ func run(log log.Component, telemetry telemetry.Component, sysprobeconfig sysprobeconfig.Component, server dogstatsdServer.Component, - capture replay.Component, serverDebug dogstatsddebug.Component, forwarder defaultforwarder.Component, wmeta workloadmeta.Component, @@ -212,10 +208,8 @@ func run(log log.Component, cliParams *cliParams, logsAgent optional.Option[logsAgent.Component], otelcollector otelcollector.Component, - hostMetadata host.Component, invAgent inventoryagent.Component, - invHost inventoryhost.Component, - secretResolver secrets.Component, + _ secrets.Component, invChecks inventorychecks.Component, _ netflowServer.Component, _ langDetectionCl.Component, @@ -268,7 +262,6 @@ func run(log log.Component, telemetry, sysprobeconfig, server, - capture, serverDebug, wmeta, rcclient, @@ -277,10 +270,7 @@ func run(log log.Component, sharedSerializer, otelcollector, demultiplexer, - hostMetadata, invAgent, - invHost, - secretResolver, agentAPI, invChecks, ); err != nil { @@ -386,7 +376,6 @@ func startAgent( //nolint:revive // TODO(ASC) Fix revive linter sysprobeconfig sysprobeconfig.Component, server dogstatsdServer.Component, - capture replay.Component, serverDebug dogstatsddebug.Component, wmeta workloadmeta.Component, rcclient rcclient.Component, @@ -397,10 +386,7 @@ func startAgent( sharedSerializer serializer.MetricSerializer, otelcollector otelcollector.Component, demultiplexer demultiplexer.Component, - hostMetadata host.Component, invAgent inventoryagent.Component, - invHost inventoryhost.Component, - secretResolver secrets.Component, agentAPI internalAPI.Component, invChecks inventorychecks.Component, ) error { @@ -544,19 +530,9 @@ func startAgent( // start the cmd HTTP server if err = agentAPI.StartServer( configService, - flare, - server, - capture, - serverDebug, wmeta, logsAgent, demultiplexer, - hostMetadata, - invAgent, - demultiplexer, - invHost, - secretResolver, - invChecks, ); err != nil { return log.Errorf("Error while starting api server, exiting: %v", err) } diff --git a/cmd/agent/subcommands/run/command_windows.go b/cmd/agent/subcommands/run/command_windows.go index 6a402d16f20b3d..6b928d17e0c8bf 100644 --- a/cmd/agent/subcommands/run/command_windows.go +++ b/cmd/agent/subcommands/run/command_windows.go @@ -44,15 +44,12 @@ import ( "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/workloadmeta" - "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" dogstatsddebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" - "github.com/DataDog/datadog-agent/comp/metadata/host" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" - "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost" "github.com/DataDog/datadog-agent/comp/metadata/runner" netflowServer "github.com/DataDog/datadog-agent/comp/netflow/server" otelcollector "github.com/DataDog/datadog-agent/comp/otelcol/collector" @@ -84,7 +81,6 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error sysprobeconfig sysprobeconfig.Component, server dogstatsdServer.Component, serverDebug dogstatsddebug.Component, - capture replay.Component, wmeta workloadmeta.Component, rcclient rcclient.Component, forwarder defaultforwarder.Component, @@ -93,10 +89,8 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error sharedSerializer serializer.MetricSerializer, otelcollector otelcollector.Component, demultiplexer demultiplexer.Component, - hostMetadata host.Component, invAgent inventoryagent.Component, - invHost inventoryhost.Component, - secretResolver secrets.Component, + _ secrets.Component, invChecks inventorychecks.Component, _ netflowServer.Component, agentAPI internalAPI.Component, @@ -111,7 +105,6 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error telemetry, sysprobeconfig, server, - capture, serverDebug, wmeta, rcclient, @@ -120,10 +113,7 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error sharedSerializer, otelcollector, demultiplexer, - hostMetadata, invAgent, - invHost, - secretResolver, agentAPI, invChecks, ) diff --git a/comp/api/api/apiimpl/api.go b/comp/api/api/apiimpl/api.go index ece88229387313..b870b0f8303f76 100644 --- a/comp/api/api/apiimpl/api.go +++ b/comp/api/api/apiimpl/api.go @@ -37,45 +37,71 @@ func Module() fxutil.Module { } type apiServer struct { + flare flare.Component + dogstatsdServer dogstatsdServer.Component + capture replay.Component + serverDebug dogstatsddebug.Component + hostMetadata host.Component + invAgent inventoryagent.Component + demux demultiplexer.Component + invHost inventoryhost.Component + secretResolver secrets.Component + invChecks inventorychecks.Component +} + +type dependencies struct { + fx.In + + Flare flare.Component + DogstatsdServer dogstatsdServer.Component + Capture replay.Component + ServerDebug dogstatsddebug.Component + HostMetadata host.Component + InvAgent inventoryagent.Component + Demux demultiplexer.Component + InvHost inventoryhost.Component + SecretResolver secrets.Component + InvChecks inventorychecks.Component } var _ api.Component = (*apiServer)(nil) -func newAPIServer() api.Component { - return &apiServer{} +func newAPIServer(deps dependencies) api.Component { + return &apiServer{ + flare: deps.Flare, + dogstatsdServer: deps.DogstatsdServer, + capture: deps.Capture, + serverDebug: deps.ServerDebug, + hostMetadata: deps.HostMetadata, + invAgent: deps.InvAgent, + demux: deps.Demux, + invHost: deps.InvHost, + secretResolver: deps.SecretResolver, + invChecks: deps.InvChecks, + } } // StartServer creates the router and starts the HTTP server func (server *apiServer) StartServer( configService *remoteconfig.Service, - flare flare.Component, - dogstatsdServer dogstatsdServer.Component, - capture replay.Component, - serverDebug dogstatsddebug.Component, wmeta workloadmeta.Component, logsAgent optional.Option[logsAgent.Component], senderManager sender.DiagnoseSenderManager, - hostMetadata host.Component, - invAgent inventoryagent.Component, - demux demultiplexer.Component, - invHost inventoryhost.Component, - secretResolver secrets.Component, - invChecks inventorychecks.Component, ) error { return StartServers(configService, - flare, - dogstatsdServer, - capture, - serverDebug, + server.flare, + server.dogstatsdServer, + server.capture, + server.serverDebug, wmeta, logsAgent, senderManager, - hostMetadata, - invAgent, - demux, - invHost, - secretResolver, - invChecks, + server.hostMetadata, + server.invAgent, + server.demux, + server.invHost, + server.secretResolver, + server.invChecks, ) } diff --git a/comp/api/api/apiimpl/api_mock.go b/comp/api/api/apiimpl/api_mock.go index 1431b722ed44a6..85b2076254a0c3 100644 --- a/comp/api/api/apiimpl/api_mock.go +++ b/comp/api/api/apiimpl/api_mock.go @@ -12,19 +12,9 @@ import ( "go.uber.org/fx" - "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" "github.com/DataDog/datadog-agent/comp/api/api" - "github.com/DataDog/datadog-agent/comp/core/flare" - "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/workloadmeta" - "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" - dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" - dogstatsddebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" - "github.com/DataDog/datadog-agent/comp/metadata/host" - "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" - "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" - "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -49,19 +39,9 @@ func newMock() api.Mock { // StartServer creates the router and starts the HTTP server func (mock *mockAPIServer) StartServer( _ *remoteconfig.Service, - _ flare.Component, - _ dogstatsdServer.Component, - _ replay.Component, - _ dogstatsddebug.Component, _ workloadmeta.Component, _ optional.Option[logsAgent.Component], _ sender.DiagnoseSenderManager, - _ host.Component, - _ inventoryagent.Component, - _ demultiplexer.Component, - _ inventoryhost.Component, - _ secrets.Component, - _ inventorychecks.Component, ) error { return nil } diff --git a/comp/api/api/component.go b/comp/api/api/component.go index 0a5d01dff526a2..f84839a1dc60e4 100644 --- a/comp/api/api/component.go +++ b/comp/api/api/component.go @@ -9,18 +9,8 @@ package api import ( "net" - "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" - "github.com/DataDog/datadog-agent/comp/core/flare" - "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/workloadmeta" - "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" - dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" - dogstatsddebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" - "github.com/DataDog/datadog-agent/comp/metadata/host" - "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" - "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" - "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -28,28 +18,18 @@ import ( // team: agent-shared-components -// TODO(components): This component is still in progress: -// * Code from /cmd/agent/internal/api will be moved in the apiimpl folder -// * StartServer args will be moved into the Component struct directly -// * Lifecycle will be used instead of directly calling StartServer and StopServer +// TODO(components): +// * Lifecycle can't be used atm because: +// - logsAgent and remoteconfig.Service are modified in `startAgent` in the run subcommand +// - Same for workloadmeta and senderManager in `execJmxCommand` in the jmx subcommand // Component is the component type. type Component interface { StartServer( configService *remoteconfig.Service, - flare flare.Component, - dogstatsdServer dogstatsdServer.Component, - capture replay.Component, - serverDebug dogstatsddebug.Component, wmeta workloadmeta.Component, logsAgent optional.Option[logsAgent.Component], senderManager sender.DiagnoseSenderManager, - hostMetadata host.Component, - invAgent inventoryagent.Component, - demux demultiplexer.Component, - invHost inventoryhost.Component, - secretResolver secrets.Component, - invChecks inventorychecks.Component, ) error StopServer() ServerAddress() *net.TCPAddr diff --git a/pkg/cli/standalone/jmx.go b/pkg/cli/standalone/jmx.go index b7b0adf192ae7c..bf45eb2c78ef2c 100644 --- a/pkg/cli/standalone/jmx.go +++ b/pkg/cli/standalone/jmx.go @@ -58,7 +58,7 @@ func ExecJmxListWithRateMetricsJSON(selectedChecks []string, logLevel string, co // The common utils, including AutoConfig, must have already been initialized. func execJmxCommand(command string, selectedChecks []string, reporter jmxfetch.JMXReporter, output func(...interface{}), logLevel string, configs []integration.Config, wmeta workloadmeta.Component, senderManager sender.DiagnoseSenderManager, agentAPI internalAPI.Component) error { // start the cmd HTTP server - if err := agentAPI.StartServer(nil, nil, nil, nil, nil, wmeta, optional.NewNoneOption[logsAgent.Component](), senderManager, nil, nil, nil, nil, nil, nil); err != nil { + if err := agentAPI.StartServer(nil, wmeta, optional.NewNoneOption[logsAgent.Component](), senderManager); err != nil { return fmt.Errorf("Error while starting api server, exiting: %v", err) } diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index dfea061db4e18d..71d0dff4d2a98c 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -31,15 +31,22 @@ import ( "github.com/DataDog/datadog-agent/comp/api/api/apiimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/flare" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" "github.com/DataDog/datadog-agent/comp/core/workloadmeta" + "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" + "github.com/DataDog/datadog-agent/comp/dogstatsd/server" + serverdebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/comp/forwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" orchestratorForwarderImpl "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" + "github.com/DataDog/datadog-agent/comp/metadata/host" + "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks/inventorychecksimpl" + "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/autodiscovery" "github.com/DataDog/datadog-agent/pkg/autodiscovery/integration" @@ -158,6 +165,17 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { opts.UseNoopEventPlatformForwarder = true return demultiplexer.Params{Options: opts} }), + + // TODO(components): this is a temporary hack as the StartServer() method of the API package was previously called with nil arguments + // This highlights the fact that the API Server created by JMX (through ExecJmx... function) should be different from the ones created + // in others commands such as run. + fx.Provide(func() flare.Component { return nil }), + fx.Provide(func() server.Component { return nil }), + fx.Provide(func() replay.Component { return nil }), + fx.Provide(func() serverdebug.Component { return nil }), + fx.Provide(func() host.Component { return nil }), + fx.Provide(func() inventoryagent.Component { return nil }), + fx.Provide(func() inventoryhost.Component { return nil }), ) }, } From 48c5069846530f7a9e83cd6ed0697bd36b8af41e Mon Sep 17 00:00:00 2001 From: Yoann Ghigoff Date: Fri, 15 Dec 2023 14:35:27 +0100 Subject: [PATCH 09/66] [CWS] fix ebpfless probe status stacktrace (#21572) [CWS] fix stacktrace when reporting status of ebpfless probe --- pkg/security/module/server_linux.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/security/module/server_linux.go b/pkg/security/module/server_linux.go index 3e044916dc738f..4ca3c5d4ae339f 100644 --- a/pkg/security/module/server_linux.go +++ b/pkg/security/module/server_linux.go @@ -183,13 +183,13 @@ func (a *APIServer) GetStatus(_ context.Context, _ *api.GetStatusParams) (*api.S UseMmapableMaps: p.GetKernelVersion().HaveMmapableMaps(), UseRingBuffer: p.UseRingBuffers(), } - } - envErrors := p.VerifyEnvironment() - if envErrors != nil { - apiStatus.Environment.Warnings = make([]string, len(envErrors.Errors)) - for i, err := range envErrors.Errors { - apiStatus.Environment.Warnings[i] = err.Error() + envErrors := p.VerifyEnvironment() + if envErrors != nil { + apiStatus.Environment.Warnings = make([]string, len(envErrors.Errors)) + for i, err := range envErrors.Errors { + apiStatus.Environment.Warnings[i] = err.Error() + } } } From 18169a18e5e8dacffca8a98e5e081c21607e2aa1 Mon Sep 17 00:00:00 2001 From: Hasan Mahmood <6599778+hmahmood@users.noreply.github.com> Date: Fri, 15 Dec 2023 09:39:55 -0500 Subject: [PATCH 10/66] [NPM][USM] Move probes on sockfd_lookup_light to USM (#21259) --- pkg/network/ebpf/c/co-re/tracer-fentry.c | 51 +-------- pkg/network/ebpf/c/prebuilt/usm.c | 1 + pkg/network/ebpf/c/protocols/http/http.h | 2 +- pkg/network/ebpf/c/protocols/sockfd-probes.h | 101 ++++++++++++++++++ pkg/network/ebpf/c/{ => protocols}/sockfd.h | 0 pkg/network/ebpf/c/runtime/usm.c | 1 + pkg/network/ebpf/c/tracer.c | 86 +-------------- .../tracer/connection/fentry/manager.go | 3 +- .../tracer/connection/fentry/probes.go | 4 - .../tracer/connection/kprobe/config.go | 6 -- pkg/network/usm/ebpf_main.go | 44 ++++---- 11 files changed, 131 insertions(+), 168 deletions(-) create mode 100644 pkg/network/ebpf/c/protocols/sockfd-probes.h rename pkg/network/ebpf/c/{ => protocols}/sockfd.h (100%) diff --git a/pkg/network/ebpf/c/co-re/tracer-fentry.c b/pkg/network/ebpf/c/co-re/tracer-fentry.c index 9a6efcd3506081..37312140a2de38 100644 --- a/pkg/network/ebpf/c/co-re/tracer-fentry.c +++ b/pkg/network/ebpf/c/co-re/tracer-fentry.c @@ -3,7 +3,6 @@ #include "bpf_endian.h" #include "bpf_tracing.h" -#include "sockfd.h" #include "ip.h" #include "ipv6.h" #include "sock.h" @@ -17,6 +16,8 @@ #include "tracer/telemetry.h" #include "tracer/port.h" +#include "protocols/sockfd.h" + BPF_PERCPU_HASH_MAP(udp6_send_skb_args, u64, u64, 1024) BPF_PERCPU_HASH_MAP(udp_send_skb_args, u64, conn_tuple_t, 1024) @@ -607,52 +608,4 @@ int BPF_PROG(inet6_bind_exit, struct socket *sock, struct sockaddr *uaddr, int a return sys_exit_bind(rc); } -// this kretprobe is essentially creating: -// * an index of pid_fd_t to a struct sock*; -// * an index of struct sock* to pid_fd_t; -SEC("fexit/sockfd_lookup_light") -int BPF_PROG(sockfd_lookup_light_exit, int fd, int *err, int *fput_needed, struct socket *socket) { - RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/sockfd_lookup_light"); - u64 pid_tgid = bpf_get_current_pid_tgid(); - // Check if have already a map entry for this pid_fd_t - // TODO: This lookup eliminates *4* map operations for existing entries - // but can reduce the accuracy of programs relying on socket FDs for - // processes with a lot of FD churn - pid_fd_t key = { - .pid = pid_tgid >> 32, - .fd = fd, - }; - - struct sock **skpp = bpf_map_lookup_elem(&sock_by_pid_fd, &key); - if (skpp != NULL) { - return 0; - } - - // For now let's only store information for TCP sockets - const struct proto_ops *proto_ops = BPF_CORE_READ(socket, ops); - if (!proto_ops) { - return 0; - } - - enum sock_type sock_type = BPF_CORE_READ(socket, type); - int family = BPF_CORE_READ(proto_ops, family); - if (sock_type != SOCK_STREAM || !(family == AF_INET || family == AF_INET6)) { - return 0; - } - - // Retrieve struct sock* pointer from struct socket* - struct sock *sock = BPF_CORE_READ(socket, sk); - - pid_fd_t pid_fd = { - .pid = pid_tgid >> 32, - .fd = fd, - }; - - // These entries are cleaned up by tcp_close - bpf_map_update_with_telemetry(pid_fd_by_sock, &sock, &pid_fd, BPF_ANY); - bpf_map_update_with_telemetry(sock_by_pid_fd, &pid_fd, &sock, BPF_ANY); - - return 0; -} - char _license[] SEC("license") = "GPL"; diff --git a/pkg/network/ebpf/c/prebuilt/usm.c b/pkg/network/ebpf/c/prebuilt/usm.c index 52831d6dcb848c..963b50d1037748 100644 --- a/pkg/network/ebpf/c/prebuilt/usm.c +++ b/pkg/network/ebpf/c/prebuilt/usm.c @@ -10,6 +10,7 @@ #include "protocols/http/http.h" #include "protocols/http2/decoding.h" #include "protocols/kafka/kafka-parsing.h" +#include "protocols/sockfd-probes.h" #include "protocols/tls/java/erpc_dispatcher.h" #include "protocols/tls/java/erpc_handlers.h" #include "protocols/tls/https.h" diff --git a/pkg/network/ebpf/c/protocols/http/http.h b/pkg/network/ebpf/c/protocols/http/http.h index b3ca4ca9d99720..92f514573f842f 100644 --- a/pkg/network/ebpf/c/protocols/http/http.h +++ b/pkg/network/ebpf/c/protocols/http/http.h @@ -4,7 +4,7 @@ #include "bpf_builtins.h" #include "bpf_telemetry.h" -#include "sockfd.h" +#include "protocols/sockfd.h" #include "protocols/classification/common.h" diff --git a/pkg/network/ebpf/c/protocols/sockfd-probes.h b/pkg/network/ebpf/c/protocols/sockfd-probes.h new file mode 100644 index 00000000000000..69b5e7a3913dff --- /dev/null +++ b/pkg/network/ebpf/c/protocols/sockfd-probes.h @@ -0,0 +1,101 @@ +#ifndef __SOCKFD_PROBES_H +#define __SOCKFD_PROBES_H + +#include "ktypes.h" +#include "bpf_builtins.h" +#include "map-defs.h" + +#ifndef COMPILE_CORE +#include +#include +#endif + +#include "sock.h" +#include "sockfd.h" + +SEC("kprobe/sockfd_lookup_light") +int kprobe__sockfd_lookup_light(struct pt_regs *ctx) { + int sockfd = (int)PT_REGS_PARM1(ctx); + u64 pid_tgid = bpf_get_current_pid_tgid(); + + // Check if have already a map entry for this pid_fd_t + // TODO: This lookup eliminates *4* map operations for existing entries + // but can reduce the accuracy of programs relying on socket FDs for + // processes with a lot of FD churn + pid_fd_t key = { + .pid = pid_tgid >> 32, + .fd = sockfd, + }; + struct sock **sock = bpf_map_lookup_elem(&sock_by_pid_fd, &key); + if (sock != NULL) { + return 0; + } + + bpf_map_update_with_telemetry(sockfd_lookup_args, &pid_tgid, &sockfd, BPF_ANY); + return 0; +} + +static __always_inline const struct proto_ops * socket_proto_ops(struct socket *sock) { + const struct proto_ops *proto_ops = NULL; +#ifdef COMPILE_PREBUILT + // (struct socket).ops is always directly after (struct socket).sk, + // which is a pointer. + u64 ops_offset = offset_socket_sk() + sizeof(void *); + bpf_probe_read_kernel_with_telemetry(&proto_ops, sizeof(proto_ops), (char*)sock + ops_offset); +#elif defined(COMPILE_RUNTIME) || defined(COMPILE_CORE) + BPF_CORE_READ_INTO(&proto_ops, sock, ops); +#endif + + return proto_ops; +} + +// this kretprobe is essentially creating: +// * an index of pid_fd_t to a struct sock*; +// * an index of struct sock* to pid_fd_t; +SEC("kretprobe/sockfd_lookup_light") +int kretprobe__sockfd_lookup_light(struct pt_regs *ctx) { + u64 pid_tgid = bpf_get_current_pid_tgid(); + int *sockfd = bpf_map_lookup_elem(&sockfd_lookup_args, &pid_tgid); + if (sockfd == NULL) { + return 0; + } + + // For now let's only store information for TCP sockets + struct socket *socket = (struct socket *)PT_REGS_RC(ctx); + if (!socket) + goto cleanup; + + enum sock_type sock_type = 0; + bpf_probe_read_kernel_with_telemetry(&sock_type, sizeof(short), &socket->type); + + const struct proto_ops *proto_ops = socket_proto_ops(socket); + if (!proto_ops) { + goto cleanup; + } + + int family = 0; + bpf_probe_read_kernel_with_telemetry(&family, sizeof(family), &proto_ops->family); + if (sock_type != SOCK_STREAM || !(family == AF_INET || family == AF_INET6)) { + goto cleanup; + } + + // Retrieve struct sock* pointer from struct socket* + struct sock *sock = socket_sk(socket); + if (!sock) { + goto cleanup; + } + + pid_fd_t pid_fd = { + .pid = pid_tgid >> 32, + .fd = (*sockfd), + }; + + // These entries are cleaned up by tcp_close + bpf_map_update_with_telemetry(pid_fd_by_sock, &sock, &pid_fd, BPF_ANY); + bpf_map_update_with_telemetry(sock_by_pid_fd, &pid_fd, &sock, BPF_ANY); +cleanup: + bpf_map_delete_elem(&sockfd_lookup_args, &pid_tgid); + return 0; +} + +#endif // __SOCKFD_PROBES_H diff --git a/pkg/network/ebpf/c/sockfd.h b/pkg/network/ebpf/c/protocols/sockfd.h similarity index 100% rename from pkg/network/ebpf/c/sockfd.h rename to pkg/network/ebpf/c/protocols/sockfd.h diff --git a/pkg/network/ebpf/c/runtime/usm.c b/pkg/network/ebpf/c/runtime/usm.c index 0aa1110e746342..ba2656f256c957 100644 --- a/pkg/network/ebpf/c/runtime/usm.c +++ b/pkg/network/ebpf/c/runtime/usm.c @@ -17,6 +17,7 @@ #include "protocols/http/http.h" #include "protocols/http2/decoding.h" #include "protocols/kafka/kafka-parsing.h" +#include "protocols/sockfd-probes.h" #include "protocols/tls/java/erpc_dispatcher.h" #include "protocols/tls/java/erpc_handlers.h" #include "protocols/tls/go-tls-types.h" diff --git a/pkg/network/ebpf/c/tracer.c b/pkg/network/ebpf/c/tracer.c index 936e8946ad002a..213cd76e11f1df 100644 --- a/pkg/network/ebpf/c/tracer.c +++ b/pkg/network/ebpf/c/tracer.c @@ -11,13 +11,13 @@ #include "prebuilt/offsets.h" #endif #include "skb.h" -#include "sockfd.h" #include "tracer/bind.h" #include "tracer/events.h" #include "tracer/maps.h" #include "tracer/port.h" #include "tracer/tcp_recv.h" #include "protocols/classification/protocol-classification.h" +#include "protocols/sockfd.h" SEC("socket/classifier_entry") int socket__classifier_entry(struct __sk_buff *skb) { @@ -1017,90 +1017,6 @@ int kretprobe__inet6_bind(struct pt_regs *ctx) { return sys_exit_bind(ret); } -SEC("kprobe/sockfd_lookup_light") -int kprobe__sockfd_lookup_light(struct pt_regs *ctx) { - int sockfd = (int)PT_REGS_PARM1(ctx); - u64 pid_tgid = bpf_get_current_pid_tgid(); - - // Check if have already a map entry for this pid_fd_t - // TODO: This lookup eliminates *4* map operations for existing entries - // but can reduce the accuracy of programs relying on socket FDs for - // processes with a lot of FD churn - pid_fd_t key = { - .pid = pid_tgid >> 32, - .fd = sockfd, - }; - struct sock **sock = bpf_map_lookup_elem(&sock_by_pid_fd, &key); - if (sock != NULL) { - return 0; - } - - bpf_map_update_with_telemetry(sockfd_lookup_args, &pid_tgid, &sockfd, BPF_ANY); - return 0; -} - -static __always_inline const struct proto_ops * socket_proto_ops(struct socket *sock) { - const struct proto_ops *proto_ops = NULL; -#ifdef COMPILE_PREBUILT - // (struct socket).ops is always directly after (struct socket).sk, - // which is a pointer. - u64 ops_offset = offset_socket_sk() + sizeof(void *); - bpf_probe_read_kernel_with_telemetry(&proto_ops, sizeof(proto_ops), (char*)sock + ops_offset); -#elif defined(COMPILE_RUNTIME) || defined(COMPILE_CORE) - BPF_CORE_READ_INTO(&proto_ops, sock, ops); -#endif - - return proto_ops; -} - -// this kretprobe is essentially creating: -// * an index of pid_fd_t to a struct sock*; -// * an index of struct sock* to pid_fd_t; -SEC("kretprobe/sockfd_lookup_light") -int kretprobe__sockfd_lookup_light(struct pt_regs *ctx) { - u64 pid_tgid = bpf_get_current_pid_tgid(); - int *sockfd = bpf_map_lookup_elem(&sockfd_lookup_args, &pid_tgid); - if (sockfd == NULL) { - return 0; - } - - // For now let's only store information for TCP sockets - struct socket *socket = (struct socket *)PT_REGS_RC(ctx); - if (!socket) - goto cleanup; - - enum sock_type sock_type = 0; - bpf_probe_read_kernel_with_telemetry(&sock_type, sizeof(short), &socket->type); - - const struct proto_ops *proto_ops = socket_proto_ops(socket); - if (!proto_ops) { - goto cleanup; - } - - int family = 0; - bpf_probe_read_kernel_with_telemetry(&family, sizeof(family), &proto_ops->family); - if (sock_type != SOCK_STREAM || !(family == AF_INET || family == AF_INET6)) { - goto cleanup; - } - - // Retrieve struct sock* pointer from struct socket* - struct sock *sock = socket_sk(socket); - if (!sock) { - goto cleanup; - } - - pid_fd_t pid_fd = { - .pid = pid_tgid >> 32, - .fd = (*sockfd), - }; - - // These entries are cleaned up by tcp_close - bpf_map_update_with_telemetry(pid_fd_by_sock, &sock, &pid_fd, BPF_ANY); - bpf_map_update_with_telemetry(sock_by_pid_fd, &pid_fd, &sock, BPF_ANY); -cleanup: - bpf_map_delete_elem(&sockfd_lookup_args, &pid_tgid); - return 0; -} // Represents the parameters being passed to the tracepoint net/net_dev_queue struct net_dev_queue_ctx { u64 unused; diff --git a/pkg/network/tracer/connection/fentry/manager.go b/pkg/network/tracer/connection/fentry/manager.go index 7241a1fe5c8357..e26b2080e23f05 100644 --- a/pkg/network/tracer/connection/fentry/manager.go +++ b/pkg/network/tracer/connection/fentry/manager.go @@ -11,12 +11,11 @@ package fentry import ( "os" - manager "github.com/DataDog/ebpf-manager" - "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/ebpf/probes" errtelemetry "github.com/DataDog/datadog-agent/pkg/network/telemetry" + manager "github.com/DataDog/ebpf-manager" ) func initManager(mgr *errtelemetry.Manager, closedHandler *ebpf.PerfHandler, cfg *config.Config) { diff --git a/pkg/network/tracer/connection/fentry/probes.go b/pkg/network/tracer/connection/fentry/probes.go index cc4d43af2ad596..4fa6ecc383384c 100644 --- a/pkg/network/tracer/connection/fentry/probes.go +++ b/pkg/network/tracer/connection/fentry/probes.go @@ -81,9 +81,6 @@ const ( inetBindRet = "inet_bind_exit" // inet6BindRet traces the bind() syscall for IPv6 inet6BindRet = "inet6_bind_exit" - - // sockFDLookupRet is the kretprobe used for mapping socket FDs to kernel sock structs - sockFDLookupRet = "sockfd_lookup_light_exit" ) var programs = map[string]struct{}{ @@ -93,7 +90,6 @@ var programs = map[string]struct{}{ inetBindRet: {}, inetCskAcceptReturn: {}, inetCskListenStop: {}, - sockFDLookupRet: {}, // TODO: not available on certain kernels, will have to one or more hooks to get equivalent functionality; affects HTTPS monitoring (OpenSSL/GnuTLS/GoTLS) tcpRecvMsgReturn: {}, tcpClose: {}, tcpCloseReturn: {}, diff --git a/pkg/network/tracer/connection/kprobe/config.go b/pkg/network/tracer/connection/kprobe/config.go index 35ce14f824d75e..62cb571a2bf3a7 100644 --- a/pkg/network/tracer/connection/kprobe/config.go +++ b/pkg/network/tracer/connection/kprobe/config.go @@ -67,12 +67,6 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes // runtime compiled implementation enableProbe(enabled, selectVersionBasedProbe(runtimeTracer || coreTracer, kv, probes.TCPRetransmit, probes.TCPRetransmitPre470, kv470)) enableProbe(enabled, probes.TCPRetransmitRet) - - missing, err := ebpf.VerifyKernelFuncs("sockfd_lookup_light") - if err == nil && len(missing) == 0 { - enableProbe(enabled, probes.SockFDLookup) - enableProbe(enabled, probes.SockFDLookupRet) - } } if c.CollectUDPv4Conns { diff --git a/pkg/network/usm/ebpf_main.go b/pkg/network/usm/ebpf_main.go index 2da9b7d57b1e5d..f41c7ea6f77621 100644 --- a/pkg/network/usm/ebpf_main.go +++ b/pkg/network/usm/ebpf_main.go @@ -19,8 +19,6 @@ import ( "github.com/davecgh/go-spew/spew" "golang.org/x/sys/unix" - manager "github.com/DataDog/ebpf-manager" - ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" "github.com/DataDog/datadog-agent/pkg/network" @@ -36,6 +34,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/usm/buildmode" "github.com/DataDog/datadog-agent/pkg/network/usm/utils" "github.com/DataDog/datadog-agent/pkg/util/log" + manager "github.com/DataDog/ebpf-manager" ) var ( @@ -112,6 +111,26 @@ func newEBPFProgram(c *config.Config, sockFD, connectionProtocolMap *ebpf.Map, b }, } + if c.CollectTCPv4Conns || c.CollectTCPv6Conns { + missing, err := ddebpf.VerifyKernelFuncs("sockfd_lookup_light") + if err == nil && len(missing) == 0 { + mgr.Probes = append(mgr.Probes, []*manager.Probe{ + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: probes.SockFDLookup, + UID: probeUID, + }, + }, + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: probes.SockFDLookupRet, + UID: probeUID, + }, + }, + }...) + } + } + program := &ebpfProgram{ Manager: errtelemetry.NewManager(mgr, bpfTelemetry), cfg: c, @@ -334,25 +353,8 @@ func (e *ebpfProgram) init(buf bytecode.AssetReader, options manager.Options) er manager.ConstantEditor{Name: "ephemeral_range_begin", Value: uint64(begin)}, manager.ConstantEditor{Name: "ephemeral_range_end", Value: uint64(end)}) - options.ActivatedProbes = []manager.ProbesSelector{ - &manager.ProbeSelector{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: protocolDispatcherSocketFilterFunction, - UID: probeUID, - }, - }, - &manager.ProbeSelector{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: "kprobe__tcp_sendmsg", - UID: probeUID, - }, - }, - &manager.ProbeSelector{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: "tracepoint__net__netif_receive_skb", - UID: probeUID, - }, - }, + for _, p := range e.Manager.Probes { + options.ActivatedProbes = append(options.ActivatedProbes, &manager.ProbeSelector{ProbeIdentificationPair: p.ProbeIdentificationPair}) } // Some parts of USM (https capturing, and part of the classification) use `read_conn_tuple`, and has some if From 348f6584002a9cdb8a01b68d1a1234dd39f784ad Mon Sep 17 00:00:00 2001 From: Lucas Liseth <36653792+soberpeach@users.noreply.github.com> Date: Fri, 15 Dec 2023 15:59:47 +0100 Subject: [PATCH 11/66] AMLII-1110 - Fix Additional UseSSL setting for additional endpionts in logs agent (#21551) * Modified additional endpoint UseSSL setting to respect config * Fixed UseSSL errors and implemented comment suggestions * Fix failing tests * Use pointer creation helper * Fix failing test due to change from bool to *bool * Add testing for UseSSL boolean pointer * Amend UseSSL pointer logic * Fixed tests and useSSL setting for additional endpoints * Add warning for upcoming breaking change * Added release note * Fixed failing test that relied on old allocation behavior * Update releasenotes/notes/fixed-ssl-settings-for-addition-endpoints-4e63b4271b21ff17.yaml Co-authored-by: Brett Blue <84536271+brett0000FF@users.noreply.github.com> * Remove unnecessary import naming * Implemented PR suggestions * Remove unnecessary package name * Remove unnecessary pointer naming * Implemented PR suggestions * Revert mistaken change --------- Co-authored-by: Brett Blue <84536271+brett0000FF@users.noreply.github.com> --- comp/logs/agent/config/config.go | 26 ++- comp/logs/agent/config/config_test.go | 44 ++--- comp/logs/agent/config/endpoints.go | 15 +- comp/logs/agent/config/endpoints_test.go | 156 +++++++++++++++++- pkg/logs/client/http/destination.go | 2 +- pkg/logs/client/http/destination_test.go | 9 +- pkg/logs/client/http/test_utils.go | 4 +- pkg/logs/client/tcp/connection_manager.go | 2 +- .../client/tcp/connection_manager_test.go | 3 +- pkg/logs/client/tcp/test_utils.go | 3 +- pkg/security/utils/endpoint.go | 2 +- ...r-addition-endpoints-4e63b4271b21ff17.yaml | 17 ++ 12 files changed, 230 insertions(+), 53 deletions(-) create mode 100644 releasenotes/notes/fixed-ssl-settings-for-addition-endpoints-4e63b4271b21ff17.yaml diff --git a/comp/logs/agent/config/config.go b/comp/logs/agent/config/config.go index 138bd491143752..bde99373c71c68 100644 --- a/comp/logs/agent/config/config.go +++ b/comp/logs/agent/config/config.go @@ -17,6 +17,7 @@ import ( pkgConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/pointer" ) // ContainerCollectAll is the name of the docker integration that collect logs from all containers @@ -144,6 +145,7 @@ func buildTCPEndpoints(coreConfig pkgConfig.Reader, logsConfig *LogsConfigKeys) APIKey: logsConfig.getLogsAPIKey(), ProxyAddress: proxyAddress, ConnectionResetInterval: logsConfig.connectionResetInterval(), + UseSSL: pointer.Ptr(logsConfig.logsNoSSL()), } if logsDDURL, defined := logsConfig.logsDDURL(); defined { @@ -156,11 +158,11 @@ func buildTCPEndpoints(coreConfig pkgConfig.Reader, logsConfig *LogsConfigKeys) } main.Host = host main.Port = port - main.UseSSL = !logsConfig.logsNoSSL() + *main.UseSSL = !logsConfig.logsNoSSL() } else if logsConfig.usePort443() { main.Host = logsConfig.ddURL443() main.Port = 443 - main.UseSSL = true + *main.UseSSL = true } else { // If no proxy is set, we default to 'logs_config.dd_url' if set, or to 'site'. // if none of them is set, we default to the US agent endpoint. @@ -170,12 +172,14 @@ func buildTCPEndpoints(coreConfig pkgConfig.Reader, logsConfig *LogsConfigKeys) } else { main.Port = logsConfig.ddPort() } - main.UseSSL = !logsConfig.devModeNoSSL() + *main.UseSSL = !logsConfig.devModeNoSSL() } additionals := logsConfig.getAdditionalEndpoints() for i := 0; i < len(additionals); i++ { - additionals[i].UseSSL = main.UseSSL + if additionals[i].UseSSL == nil { + additionals[i].UseSSL = main.UseSSL + } additionals[i].ProxyAddress = proxyAddress additionals[i].APIKey = utils.SanitizeAPIKey(additionals[i].APIKey) } @@ -207,6 +211,7 @@ func BuildHTTPEndpointsWithConfig(coreConfig pkgConfig.Reader, logsConfig *LogsC BackoffFactor: logsConfig.senderBackoffFactor(), RecoveryInterval: logsConfig.senderRecoveryInterval(), RecoveryReset: logsConfig.senderRecoveryReset(), + UseSSL: pointer.Ptr(defaultNoSSL), } if logsConfig.useV2API() && intakeTrackType != "" { @@ -225,7 +230,7 @@ func BuildHTTPEndpointsWithConfig(coreConfig pkgConfig.Reader, logsConfig *LogsC } main.Host = host main.Port = port - main.UseSSL = useSSL + *main.UseSSL = useSSL } else if logsDDURL, logsDDURLDefined := logsConfig.logsDDURL(); logsDDURLDefined { host, port, useSSL, err := parseAddressWithScheme(logsDDURL, defaultNoSSL, parseAddress) if err != nil { @@ -233,7 +238,7 @@ func BuildHTTPEndpointsWithConfig(coreConfig pkgConfig.Reader, logsConfig *LogsC } main.Host = host main.Port = port - main.UseSSL = useSSL + *main.UseSSL = useSSL } else { addr := utils.GetMainEndpoint(coreConfig, endpointPrefix, logsConfig.getConfigKey("dd_url")) host, port, useSSL, err := parseAddressWithScheme(addr, logsConfig.devModeNoSSL(), parseAddressAsHost) @@ -243,12 +248,14 @@ func BuildHTTPEndpointsWithConfig(coreConfig pkgConfig.Reader, logsConfig *LogsC main.Host = host main.Port = port - main.UseSSL = useSSL + *main.UseSSL = useSSL } additionals := logsConfig.getAdditionalEndpoints() for i := 0; i < len(additionals); i++ { - additionals[i].UseSSL = main.UseSSL + if additionals[i].UseSSL == nil { + additionals[i].UseSSL = main.UseSSL + } additionals[i].APIKey = utils.SanitizeAPIKey(additionals[i].APIKey) additionals[i].UseCompression = main.UseCompression additionals[i].CompressionLevel = main.CompressionLevel @@ -281,6 +288,9 @@ type defaultParseAddressFunc func(string) (host string, port int, err error) func parseAddressWithScheme(address string, defaultNoSSL bool, defaultParser defaultParseAddressFunc) (host string, port int, useSSL bool, err error) { if strings.HasPrefix(address, "https://") || strings.HasPrefix(address, "http://") { + if strings.HasPrefix(address, "https://") && !defaultNoSSL { + log.Warn("dd_url set to a URL with an HTTPS prefix and logs_no_ssl set to true. These are conflicting options. In a future release logs_no_ssl will override the dd_url prefix.") + } host, port, useSSL, err = parseURL(address) } else { host, port, err = defaultParser(address) diff --git a/comp/logs/agent/config/config_test.go b/comp/logs/agent/config/config_test.go index 8a256f93e96086..ad7107df79aa46 100644 --- a/comp/logs/agent/config/config_test.go +++ b/comp/logs/agent/config/config_test.go @@ -13,6 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" coreConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/stretchr/testify/suite" "go.uber.org/fx" ) @@ -162,7 +163,7 @@ func (suite *ConfigTestSuite) TestMultipleHttpEndpointsEnvVar() { APIKey: "123", Host: "agent-http-intake.logs.datadoghq.com", Port: 443, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: 3, @@ -176,7 +177,7 @@ func (suite *ConfigTestSuite) TestMultipleHttpEndpointsEnvVar() { APIKey: "456", Host: "additional.endpoint.1", Port: 1234, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: 3, @@ -190,7 +191,7 @@ func (suite *ConfigTestSuite) TestMultipleHttpEndpointsEnvVar() { APIKey: "789", Host: "additional.endpoint.2", Port: 1234, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: 3, @@ -222,7 +223,7 @@ func (suite *ConfigTestSuite) TestMultipleTCPEndpointsEnvVar() { APIKey: "123", Host: "agent-http-intake.logs.datadoghq.com", Port: 443, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: false, CompressionLevel: 0, ProxyAddress: "proxy.test:3128", @@ -231,7 +232,7 @@ func (suite *ConfigTestSuite) TestMultipleTCPEndpointsEnvVar() { APIKey: "456", Host: "additional.endpoint", Port: 1234, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: false, CompressionLevel: 0, ProxyAddress: "proxy.test:3128", @@ -273,7 +274,7 @@ func (suite *ConfigTestSuite) TestMultipleHttpEndpointsInConfig() { APIKey: "123", Host: "agent-http-intake.logs.datadoghq.com", Port: 443, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: coreConfig.DefaultLogsSenderBackoffFactor, @@ -286,7 +287,7 @@ func (suite *ConfigTestSuite) TestMultipleHttpEndpointsInConfig() { APIKey: "456", Host: "additional.endpoint.1", Port: 1234, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: coreConfig.DefaultLogsSenderBackoffFactor, @@ -299,7 +300,7 @@ func (suite *ConfigTestSuite) TestMultipleHttpEndpointsInConfig() { APIKey: "789", Host: "additional.endpoint.2", Port: 1234, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: coreConfig.DefaultLogsSenderBackoffFactor, @@ -345,7 +346,7 @@ func (suite *ConfigTestSuite) TestMultipleHttpEndpointsInConfig2() { APIKey: "123", Host: "agent-http-intake.logs.datadoghq.com", Port: 443, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: coreConfig.DefaultLogsSenderBackoffFactor, @@ -361,7 +362,7 @@ func (suite *ConfigTestSuite) TestMultipleHttpEndpointsInConfig2() { APIKey: "456", Host: "additional.endpoint.1", Port: 1234, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: coreConfig.DefaultLogsSenderBackoffFactor, @@ -374,7 +375,7 @@ func (suite *ConfigTestSuite) TestMultipleHttpEndpointsInConfig2() { APIKey: "789", Host: "additional.endpoint.2", Port: 1234, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: coreConfig.DefaultLogsSenderBackoffFactor, @@ -413,7 +414,7 @@ func (suite *ConfigTestSuite) TestMultipleTCPEndpointsInConf() { APIKey: "123", Host: "agent-http-intake.logs.datadoghq.com", Port: 443, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: false, CompressionLevel: 0, ProxyAddress: "proxy.test:3128", @@ -422,7 +423,7 @@ func (suite *ConfigTestSuite) TestMultipleTCPEndpointsInConf() { APIKey: "456", Host: "additional.endpoint", Port: 1234, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: false, CompressionLevel: 0, ProxyAddress: "proxy.test:3128", @@ -448,7 +449,7 @@ func (suite *ConfigTestSuite) TestEndpointsSetLogsDDUrl() { APIKey: "123", Host: "my-proxy", Port: 443, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: coreConfig.DefaultLogsSenderBackoffFactor, @@ -492,7 +493,7 @@ func (suite *ConfigTestSuite) TestEndpointsSetDDSite() { APIKey: "123", Host: "default-intake.logs.mydomain.com", Port: 0, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: coreConfig.DefaultLogsSenderBackoffFactor, @@ -528,7 +529,7 @@ func (suite *ConfigTestSuite) TestBuildServerlessEndpoints() { APIKey: "123", Host: "http-intake.logs.datadoghq.com", Port: 0, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: coreConfig.DefaultLogsSenderBackoffFactor, @@ -559,11 +560,10 @@ func (suite *ConfigTestSuite) TestBuildServerlessEndpoints() { } func getTestEndpoint(host string, port int, ssl bool) Endpoint { - return Endpoint{ + e := Endpoint{ APIKey: "123", Host: host, Port: port, - UseSSL: ssl, UseCompression: true, CompressionLevel: 6, BackoffFactor: coreConfig.DefaultLogsSenderBackoffFactor, @@ -574,7 +574,9 @@ func getTestEndpoint(host string, port int, ssl bool) Endpoint { TrackType: "test-track", Protocol: "test-proto", Origin: "test-source", + UseSSL: pointer.Ptr(ssl), } + return e } func getTestEndpoints(e Endpoint) *Endpoints { @@ -674,7 +676,7 @@ func (suite *ConfigTestSuite) TestEndpointsSetNonDefaultCustomConfigs() { APIKey: "123", Host: "ndmflow-intake.datadoghq.com", Port: 0, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: false, CompressionLevel: 10, BackoffFactor: 4, @@ -717,7 +719,7 @@ func (suite *ConfigTestSuite) TestEndpointsSetLogsDDUrlWithPrefix() { APIKey: "123", Host: "my-proxy.com", Port: 443, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: coreConfig.DefaultLogsSenderBackoffFactor, @@ -758,7 +760,7 @@ func (suite *ConfigTestSuite) TestEndpointsSetDDUrlWithPrefix() { APIKey: "123", Host: "my-proxy.com", Port: 443, - UseSSL: true, + UseSSL: pointer.Ptr(true), UseCompression: true, CompressionLevel: 6, BackoffFactor: coreConfig.DefaultLogsSenderBackoffFactor, diff --git a/comp/logs/agent/config/endpoints.go b/comp/logs/agent/config/endpoints.go index ce06c4f90be929..cf12f9e394f21b 100644 --- a/comp/logs/agent/config/endpoints.go +++ b/comp/logs/agent/config/endpoints.go @@ -37,9 +37,9 @@ type Endpoint struct { APIKey string `mapstructure:"api_key" json:"api_key"` Host string Port int - UseSSL bool - UseCompression bool `mapstructure:"use_compression" json:"use_compression"` - CompressionLevel int `mapstructure:"compression_level" json:"compression_level"` + UseSSL *bool `mapstructure:"use_ssl" json:"use_ssl"` + UseCompression bool `mapstructure:"use_compression" json:"use_compression"` + CompressionLevel int `mapstructure:"compression_level" json:"compression_level"` ProxyAddress string IsReliable *bool `mapstructure:"is_reliable" json:"is_reliable"` ConnectionResetInterval time.Duration @@ -56,6 +56,11 @@ type Endpoint struct { Origin IntakeOrigin } +// GetUseSSL returns the UseSSL config setting +func (e *Endpoint) GetUseSSL() bool { + return e.UseSSL == nil || *e.UseSSL +} + // GetStatus returns the endpoint status func (e *Endpoint) GetStatus(prefix string, useHTTP bool) string { compression := "uncompressed" @@ -68,7 +73,7 @@ func (e *Endpoint) GetStatus(prefix string, useHTTP bool) string { var protocol string if useHTTP { - if e.UseSSL { + if e.GetUseSSL() { protocol = "HTTPS" if port == 0 { port = 443 // use default port @@ -83,7 +88,7 @@ func (e *Endpoint) GetStatus(prefix string, useHTTP bool) string { } } } else { - if e.UseSSL { + if e.GetUseSSL() { protocol = "SSL encrypted TCP" } else { protocol = "TCP" diff --git a/comp/logs/agent/config/endpoints_test.go b/comp/logs/agent/config/endpoints_test.go index 45cad7c4432d15..dcd6f62e7fff7f 100644 --- a/comp/logs/agent/config/endpoints_test.go +++ b/comp/logs/agent/config/endpoints_test.go @@ -80,7 +80,7 @@ func (suite *EndpointsTestSuite) TestBuildEndpointsShouldSucceedWithDefaultAndVa suite.Equal("azerty", endpoint.APIKey) suite.Equal("agent-intake.logs.datadoghq.com", endpoint.Host) suite.Equal(10516, endpoint.Port) - suite.True(endpoint.UseSSL) + suite.True(endpoint.GetUseSSL()) suite.Equal("boz:1234", endpoint.ProxyAddress) suite.Equal(1, len(endpoints.Endpoints)) @@ -91,7 +91,7 @@ func (suite *EndpointsTestSuite) TestBuildEndpointsShouldSucceedWithDefaultAndVa suite.Equal("azerty", endpoint.APIKey) suite.Equal("agent-443-intake.logs.datadoghq.com", endpoint.Host) suite.Equal(443, endpoint.Port) - suite.True(endpoint.UseSSL) + suite.True(endpoint.GetUseSSL()) suite.Equal("boz:1234", endpoint.ProxyAddress) suite.Equal(1, len(endpoints.Endpoints)) @@ -103,7 +103,7 @@ func (suite *EndpointsTestSuite) TestBuildEndpointsShouldSucceedWithDefaultAndVa suite.Equal("azerty", endpoint.APIKey) suite.Equal("host", endpoint.Host) suite.Equal(1234, endpoint.Port) - suite.False(endpoint.UseSSL) + suite.False(endpoint.GetUseSSL()) suite.Equal("boz:1234", endpoint.ProxyAddress) suite.Equal(1, len(endpoints.Endpoints)) @@ -115,7 +115,7 @@ func (suite *EndpointsTestSuite) TestBuildEndpointsShouldSucceedWithDefaultAndVa suite.Equal("azerty", endpoint.APIKey) suite.Equal("", endpoint.Host) suite.Equal(1234, endpoint.Port) - suite.True(endpoint.UseSSL) + suite.True(endpoint.GetUseSSL()) suite.Equal("boz:1234", endpoint.ProxyAddress) suite.Equal(1, len(endpoints.Endpoints)) } @@ -133,7 +133,7 @@ func (suite *EndpointsTestSuite) TestBuildEndpointsShouldSucceedWithValidHTTPCon suite.Equal(endpoints.BatchWait, 5*time.Second) endpoint = endpoints.Main - suite.True(endpoint.UseSSL) + suite.True(endpoint.GetUseSSL()) suite.Equal("agent-http-intake.logs.datadoghq.com", endpoint.Host) } @@ -187,7 +187,7 @@ func (suite *EndpointsTestSuite) TestBuildEndpointsShouldSucceedWithValidHTTPCon suite.Equal(endpoints.BatchWait, 9*time.Second) endpoint = endpoints.Main - suite.True(endpoint.UseSSL) + suite.True(endpoint.GetUseSSL()) suite.Equal("foo", endpoint.Host) } @@ -204,7 +204,7 @@ func (suite *EndpointsTestSuite) TestBuildEndpointsShouldSucceedWithValidProxyCo suite.True(endpoints.UseHTTP) endpoint = endpoints.Main - suite.True(endpoint.UseSSL) + suite.True(endpoint.GetUseSSL()) suite.Equal("foo", endpoint.Host) suite.Equal(1234, endpoint.Port) } @@ -396,7 +396,7 @@ func (suite *EndpointsTestSuite) TestAdditionalEndpoints() { endpoint = endpoints.Endpoints[1] suite.Equal("foo", endpoint.Host) suite.Equal("1234", endpoint.APIKey) - suite.True(endpoint.UseSSL) + suite.True(endpoint.GetUseSSL()) suite.config.SetWithoutSource("logs_config.use_http", true) endpoints, err = BuildEndpoints(suite.config, HTTPConnectivityFailure, "test-track", "test-proto", "test-source") @@ -411,7 +411,7 @@ func (suite *EndpointsTestSuite) TestAdditionalEndpoints() { suite.True(endpoint.UseCompression) suite.Equal(6, endpoint.CompressionLevel) - suite.True(endpoint.UseSSL) + suite.True(endpoint.GetUseSSL()) } func (suite *EndpointsTestSuite) TestAdditionalEndpointsMappedCorrectly() { @@ -488,6 +488,144 @@ func (suite *EndpointsTestSuite) TestIsReliableDefaultTrue() { suite.Len(endpoints.GetReliableEndpoints(), 3) } +func (suite *EndpointsTestSuite) TestAdditionalEndpointsUseSSLTCPMainEndpointTrue() { + var ( + endpoints *Endpoints + err error + ) + + suite.config.SetWithoutSource("logs_config.logs_no_ssl", "true") + suite.config.SetWithoutSource("logs_config.logs_dd_url", "rand_url.com:1") + + suite.config.SetWithoutSource("logs_config.additional_endpoints", []map[string]interface{}{ + { + "host": "a", + "api_key": "1", + }, + { + "host": "b", + "api_key": "2", + "use_ssl": true, + }, + { + "host": "c", + "api_key": "3", + "use_ssl": false, + }, + }) + + endpoints, err = BuildEndpoints(suite.config, HTTPConnectivityFailure, "test-track", "test-proto", "test-source") + suite.Nil(err) + suite.Len(endpoints.Endpoints, 4) + suite.False(endpoints.Endpoints[1].GetUseSSL()) + suite.True(endpoints.Endpoints[2].GetUseSSL()) + suite.False(endpoints.Endpoints[3].GetUseSSL()) +} + +func (suite *EndpointsTestSuite) TestAdditionalEndpointsUseSSLTCPMainEndpointFalse() { + var ( + endpoints *Endpoints + err error + ) + + suite.config.SetWithoutSource("logs_config.logs_no_ssl", "false") + suite.config.SetWithoutSource("logs_config.logs_dd_url", "rand_url.com:1") + + suite.config.SetWithoutSource("logs_config.additional_endpoints", []map[string]interface{}{ + { + "host": "a", + "api_key": "1", + }, + { + "host": "b", + "api_key": "2", + "use_ssl": true, + }, + { + "host": "c", + "api_key": "3", + "use_ssl": false, + }, + }) + + endpoints, err = BuildEndpoints(suite.config, HTTPConnectivityFailure, "test-track", "test-proto", "test-source") + suite.Nil(err) + suite.Len(endpoints.Endpoints, 4) + suite.True(endpoints.Endpoints[1].GetUseSSL()) + suite.True(endpoints.Endpoints[2].GetUseSSL()) + suite.False(endpoints.Endpoints[3].GetUseSSL()) +} + +func (suite *EndpointsTestSuite) TestAdditionalEndpointsUseSSLHTTPMainEndpointTrue() { + var ( + endpoints *Endpoints + err error + ) + + suite.config.SetWithoutSource("logs_config.logs_no_ssl", "true") + suite.config.SetWithoutSource("logs_config.use_http", "true") + suite.config.SetWithoutSource("logs_config.logs_dd_url", "http://rand_url.com:1") + + suite.config.SetWithoutSource("logs_config.additional_endpoints", []map[string]interface{}{ + { + "host": "a", + "api_key": "1", + }, + { + "host": "b", + "api_key": "2", + "use_ssl": true, + }, + { + "host": "c", + "api_key": "3", + "use_ssl": false, + }, + }) + + endpoints, err = BuildEndpoints(suite.config, HTTPConnectivitySuccess, "test-track", "test-proto", "test-source") + suite.Nil(err) + suite.Len(endpoints.Endpoints, 4) + suite.False(endpoints.Endpoints[1].GetUseSSL()) + suite.True(endpoints.Endpoints[2].GetUseSSL()) + suite.False(endpoints.Endpoints[3].GetUseSSL()) +} + +func (suite *EndpointsTestSuite) TestAdditionalEndpointsUseSSLHTTPMainEndpointFalse() { + var ( + endpoints *Endpoints + err error + ) + + suite.config.SetWithoutSource("logs_config.logs_no_ssl", "false") + suite.config.SetWithoutSource("logs_config.use_http", "true") + suite.config.SetWithoutSource("logs_config.logs_dd_url", "http://rand_url.com:1") + + suite.config.SetWithoutSource("logs_config.additional_endpoints", []map[string]interface{}{ + { + "host": "a", + "api_key": "1", + }, + { + "host": "b", + "api_key": "2", + "use_ssl": true, + }, + { + "host": "c", + "api_key": "3", + "use_ssl": false, + }, + }) + + endpoints, err = BuildEndpoints(suite.config, HTTPConnectivitySuccess, "test-track", "test-proto", "test-source") + suite.Nil(err) + suite.Len(endpoints.Endpoints, 4) + suite.False(endpoints.Endpoints[1].GetUseSSL()) + suite.True(endpoints.Endpoints[2].GetUseSSL()) + suite.False(endpoints.Endpoints[3].GetUseSSL()) +} + func TestEndpointsTestSuite(t *testing.T) { suite.Run(t, new(EndpointsTestSuite)) } diff --git a/pkg/logs/client/http/destination.go b/pkg/logs/client/http/destination.go index bb39132c981cb9..6de438f777b0d2 100644 --- a/pkg/logs/client/http/destination.go +++ b/pkg/logs/client/http/destination.go @@ -360,7 +360,7 @@ func httpClientFactory(timeout time.Duration) func() *http.Client { // buildURL buils a url from a config endpoint. func buildURL(endpoint config.Endpoint) string { var scheme string - if endpoint.UseSSL { + if endpoint.GetUseSSL() { scheme = "https" } else { scheme = "http" diff --git a/pkg/logs/client/http/destination_test.go b/pkg/logs/client/http/destination_test.go index 24df9adbb51c09..b91ceec31bc005 100644 --- a/pkg/logs/client/http/destination_test.go +++ b/pkg/logs/client/http/destination_test.go @@ -17,13 +17,14 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/comp/logs/agent/config" + "github.com/DataDog/datadog-agent/pkg/util/pointer" ) func TestBuildURLShouldReturnHTTPSWithUseSSL(t *testing.T) { url := buildURL(config.Endpoint{ APIKey: "bar", Host: "foo", - UseSSL: true, + UseSSL: pointer.Ptr(true), }) assert.Equal(t, "https://foo/v1/input", url) } @@ -32,7 +33,7 @@ func TestBuildURLShouldReturnHTTPWithoutUseSSL(t *testing.T) { url := buildURL(config.Endpoint{ APIKey: "bar", Host: "foo", - UseSSL: false, + UseSSL: pointer.Ptr(false), }) assert.Equal(t, "http://foo/v1/input", url) } @@ -42,7 +43,7 @@ func TestBuildURLShouldReturnAddressWithPortWhenDefined(t *testing.T) { APIKey: "bar", Host: "foo", Port: 1234, - UseSSL: false, + UseSSL: pointer.Ptr(false), }) assert.Equal(t, "http://foo:1234/v1/input", url) } @@ -51,7 +52,7 @@ func TestBuildURLShouldReturnAddressForVersion2(t *testing.T) { url := buildURL(config.Endpoint{ APIKey: "bar", Host: "foo", - UseSSL: false, + UseSSL: pointer.Ptr(false), Version: config.EPIntakeVersion2, TrackType: "test-track", }) diff --git a/pkg/logs/client/http/test_utils.go b/pkg/logs/client/http/test_utils.go index 41a86133738bcb..581523e02f38d3 100644 --- a/pkg/logs/client/http/test_utils.go +++ b/pkg/logs/client/http/test_utils.go @@ -15,6 +15,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/client" "github.com/DataDog/datadog-agent/comp/logs/agent/config" + + "github.com/DataDog/datadog-agent/pkg/util/pointer" ) // StatusCodeContainer is a lock around the status code to return @@ -76,7 +78,7 @@ func NewTestServerWithOptions(statusCode int, senders int, retryDestination bool APIKey: "test", Host: strings.Replace(url[1], "/", "", -1), Port: port, - UseSSL: false, + UseSSL: pointer.Ptr(false), BackoffFactor: 1, BackoffBase: 1, BackoffMax: 10, diff --git a/pkg/logs/client/tcp/connection_manager.go b/pkg/logs/client/tcp/connection_manager.go index 4ce169a3015d07..2e2d165c380834 100644 --- a/pkg/logs/client/tcp/connection_manager.go +++ b/pkg/logs/client/tcp/connection_manager.go @@ -108,7 +108,7 @@ func (cm *ConnectionManager) NewConnection(ctx context.Context) (net.Conn, error } log.Debugf("connected to %v", cm.address()) - if cm.endpoint.UseSSL { + if cm.endpoint.GetUseSSL() { sslConn := tls.Client(conn, &tls.Config{ ServerName: cm.endpoint.Host, }) diff --git a/pkg/logs/client/tcp/connection_manager_test.go b/pkg/logs/client/tcp/connection_manager_test.go index b63c4c9054fb78..785d20992697e4 100644 --- a/pkg/logs/client/tcp/connection_manager_test.go +++ b/pkg/logs/client/tcp/connection_manager_test.go @@ -18,6 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/client/mock" "github.com/DataDog/datadog-agent/pkg/logs/internal/util" "github.com/DataDog/datadog-agent/pkg/logs/sources" + "github.com/DataDog/datadog-agent/pkg/util/pointer" ) func newConnectionManagerForAddr(addr net.Addr) *ConnectionManager { @@ -26,7 +27,7 @@ func newConnectionManagerForAddr(addr net.Addr) *ConnectionManager { } func newConnectionManagerForHostPort(host string, port int) *ConnectionManager { - endpoint := config.Endpoint{Host: host, Port: port} + endpoint := config.Endpoint{Host: host, Port: port, UseSSL: pointer.Ptr(false)} return NewConnectionManager(endpoint) } diff --git a/pkg/logs/client/tcp/test_utils.go b/pkg/logs/client/tcp/test_utils.go index bf8fe9c3c42159..3312ffcd83cdf2 100644 --- a/pkg/logs/client/tcp/test_utils.go +++ b/pkg/logs/client/tcp/test_utils.go @@ -10,6 +10,7 @@ import ( "github.com/DataDog/datadog-agent/comp/logs/agent/config" "github.com/DataDog/datadog-agent/pkg/logs/client" + "github.com/DataDog/datadog-agent/pkg/util/pointer" ) // AddrToHostPort converts a net.Addr to a (string, int). @@ -26,7 +27,7 @@ func AddrToHostPort(remoteAddr net.Addr) (string, int) { // AddrToEndPoint creates an EndPoint from an Addr. func AddrToEndPoint(addr net.Addr) config.Endpoint { host, port := AddrToHostPort(addr) - return config.Endpoint{Host: host, Port: port} + return config.Endpoint{Host: host, Port: port, UseSSL: pointer.Ptr(false)} } // AddrToDestination creates a Destination from an Addr diff --git a/pkg/security/utils/endpoint.go b/pkg/security/utils/endpoint.go index 4e2cae8597f9b1..e5e07743a92451 100644 --- a/pkg/security/utils/endpoint.go +++ b/pkg/security/utils/endpoint.go @@ -16,7 +16,7 @@ import ( func GetEndpointURL(endpoint logsconfig.Endpoint, uri string) string { port := endpoint.Port var protocol string - if endpoint.UseSSL { + if endpoint.GetUseSSL() { protocol = "https" if port == 0 { port = 443 // use default port diff --git a/releasenotes/notes/fixed-ssl-settings-for-addition-endpoints-4e63b4271b21ff17.yaml b/releasenotes/notes/fixed-ssl-settings-for-addition-endpoints-4e63b4271b21ff17.yaml new file mode 100644 index 00000000000000..5ac61465dcd68d --- /dev/null +++ b/releasenotes/notes/fixed-ssl-settings-for-addition-endpoints-4e63b4271b21ff17.yaml @@ -0,0 +1,17 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixed Logs Agent additional endpoints to respect their + logs_no_ssl setting. +other: + - | + Added a warning when ``logs_no_ssl`` is set and ``dd_url`` + contains an https prefix. ``logs_no_ssl`` will take precedence + over the prefix in a future version. From c1ebd1e4585be8b934e5430eea38bc3460489963 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Fri, 15 Dec 2023 16:09:47 +0100 Subject: [PATCH 12/66] fix missing `pidToCleanup` when handling exit event in windows probe (#21585) fix missing `pidToCleanup` when handling exit event in windows probe --- pkg/security/probe/probe_windows.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/security/probe/probe_windows.go b/pkg/security/probe/probe_windows.go index 6e119b6fd85d53..7bc239c793cbb7 100644 --- a/pkg/security/probe/probe_windows.go +++ b/pkg/security/probe/probe_windows.go @@ -115,6 +115,7 @@ func (p *WindowsProbe) Start() error { log.Infof("Received stop %v", stop) pce = p.Resolvers.ProcessResolver.GetEntry(pid) + pidToCleanup = pid ev.Type = uint32(model.ExitEventType) if pce == nil { From 1001fbce8bf8d284699750a64c5c0725dd69ae9b Mon Sep 17 00:00:00 2001 From: Pedro Lambert Date: Fri, 15 Dec 2023 10:20:37 -0500 Subject: [PATCH 13/66] [usm] Handle invalid events (#21550) --- pkg/network/protocols/events/consumer.go | 72 +++++++++++++++++++----- 1 file changed, 57 insertions(+), 15 deletions(-) diff --git a/pkg/network/protocols/events/consumer.go b/pkg/network/protocols/events/consumer.go index 08c96d75d01b7d..0e8771a3a4abe8 100644 --- a/pkg/network/protocols/events/consumer.go +++ b/pkg/network/protocols/events/consumer.go @@ -8,6 +8,7 @@ package events import ( + "errors" "fmt" "sync" "unsafe" @@ -23,8 +24,11 @@ import ( const ( batchMapSuffix = "_batches" eventsMapSuffix = "_batch_events" + sizeOfBatch = int(unsafe.Sizeof(batch{})) ) +var errInvalidPerfEvent = errors.New("invalid perf event") + // Consumer provides a standardized abstraction for consuming (batched) events from eBPF type Consumer[V any] struct { mux sync.Mutex @@ -40,11 +44,12 @@ type Consumer[V any] struct { stopped bool // telemetry - metricGroup *telemetry.MetricGroup - eventsCount *telemetry.Counter - missesCount *telemetry.Counter - kernelDropsCount *telemetry.Counter - batchSize *atomic.Int64 + metricGroup *telemetry.MetricGroup + eventsCount *telemetry.Counter + missesCount *telemetry.Counter + kernelDropsCount *telemetry.Counter + invalidEventsCount *telemetry.Counter + batchSize *atomic.Int64 } // NewConsumer instantiates a new event Consumer @@ -85,6 +90,7 @@ func NewConsumer[V any](proto string, ebpf *manager.Manager, callback func([]V)) eventsCount := metricGroup.NewCounter("events_captured") missesCount := metricGroup.NewCounter("events_missed") kernelDropsCount := metricGroup.NewCounter("kernel_dropped_events") + invalidEventsCount := metricGroup.NewCounter("invalid_events") return &Consumer[V]{ proto: proto, @@ -95,11 +101,12 @@ func NewConsumer[V any](proto string, ebpf *manager.Manager, callback func([]V)) batchReader: batchReader, // telemetry - metricGroup: metricGroup, - eventsCount: eventsCount, - missesCount: missesCount, - kernelDropsCount: kernelDropsCount, - batchSize: atomic.NewInt64(0), + metricGroup: metricGroup, + eventsCount: eventsCount, + missesCount: missesCount, + kernelDropsCount: kernelDropsCount, + invalidEventsCount: invalidEventsCount, + batchSize: atomic.NewInt64(0), }, nil } @@ -115,8 +122,12 @@ func (c *Consumer[V]) Start() { return } - b := batchFromEventData(dataEvent.Data) - c.process(dataEvent.CPU, b, false) + b, err := batchFromEventData(dataEvent.Data) + if err == nil { + c.process(dataEvent.CPU, b, false) + } else { + c.invalidEventsCount.Add(1) + } dataEvent.Done() case _, ok := <-c.handler.LostChannel: if !ok { @@ -174,7 +185,26 @@ func (c *Consumer[V]) Stop() { } func (c *Consumer[V]) process(cpu int, b *batch, syncing bool) { + // Determine the subset of data we're interested in as we might have read + // part of this batch before during a Sync() call begin, end := c.offsets.Get(cpu, b, syncing) + length := end - begin + + // This can happen in the context of a low-traffic host + // (that is, when no events are enqueued in a batch between two consecutive + // calls to `Sync()`) + if length == 0 { + return + } + + // Sanity check. Ideally none of these conditions should evaluate to + // true. In case they do we bail out and increment the counter tracking + // invalid events + // TODO: investigate why we're sometimes getting invalid offsets + if length < 0 || length > int(b.Cap) { + c.invalidEventsCount.Add(1) + return + } // telemetry stuff c.batchSize.Store(int64(b.Cap)) @@ -182,15 +212,27 @@ func (c *Consumer[V]) process(cpu int, b *batch, syncing bool) { c.kernelDropsCount.Add(int64(b.Dropped_events)) // generate a slice of type []V from the batch - length := end - begin ptr := pointerToElement[V](b, begin) events := unsafe.Slice(ptr, length) c.callback(events) } -func batchFromEventData(data []byte) *batch { - return (*batch)(unsafe.Pointer(&data[0])) +func batchFromEventData(data []byte) (*batch, error) { + if len(data) < sizeOfBatch { + // For some reason the eBPF program sent us a perf event with a size + // different from what we're expecting. + // + // TODO: we're not ensuring that len(data) == sizeOfBatch, because we're + // consistently getting events that have a few bytes more than + // `sizeof(batch_event_t)`. I haven't determined yet where these extra + // bytes are coming from, but I already validated that is not padding + // coming from the clang/LLVM toolchain for alignment purposes, so it's + // something happening *after* the call to bpf_perf_event_output. + return nil, errInvalidPerfEvent + } + + return (*batch)(unsafe.Pointer(&data[0])), nil } func pointerToElement[V any](b *batch, elementIdx int) *V { From 81dd765245af0efda477baab7bf7ef6edb0e0cd1 Mon Sep 17 00:00:00 2001 From: Olivier G <52180542+ogaca-dd@users.noreply.github.com> Date: Fri, 15 Dec 2023 16:56:32 +0100 Subject: [PATCH 14/66] Migrate demultiplexer to new file hierarchy (#21478) Migrate demultiplexer to new file hierarchy --- cmd/agent/subcommands/run/command.go | 7 ++++--- .../internal/settings/runtime_settings_test.go | 3 ++- .../subcommands/run/command.go | 7 ++++--- cmd/cluster-agent/subcommands/start/command.go | 7 ++++--- cmd/dogstatsd/subcommands/start/command.go | 7 ++++--- cmd/otel-agent/main.go | 8 ++++---- cmd/security-agent/main_windows.go | 7 ++++--- cmd/security-agent/subcommands/start/command.go | 7 ++++--- comp/aggregator/bundle.go | 4 ++-- comp/aggregator/bundle_test.go | 4 ++-- comp/aggregator/demultiplexer/component.go | 9 --------- comp/aggregator/demultiplexer/component_mock.go | 9 --------- .../{ => demultiplexerimpl}/demultiplexer.go | 15 ++++++++++++--- .../{ => demultiplexerimpl}/demultiplexer_mock.go | 14 +++++++++++--- .../demultiplexer_mock_test.go | 7 ++++--- .../{ => demultiplexerimpl}/params.go | 2 +- comp/ndmtmp/bundle_test.go | 6 +++--- comp/netflow/server/server_test.go | 4 ++-- pkg/cli/subcommands/check/command.go | 7 ++++--- pkg/collector/corechecks/snmp/snmp_test.go | 3 ++- pkg/metadata/scheduler_test.go | 3 ++- 21 files changed, 75 insertions(+), 65 deletions(-) rename comp/aggregator/demultiplexer/{ => demultiplexerimpl}/demultiplexer.go (82%) rename comp/aggregator/demultiplexer/{ => demultiplexerimpl}/demultiplexer_mock.go (76%) rename comp/aggregator/demultiplexer/{ => demultiplexerimpl}/demultiplexer_mock_test.go (82%) rename comp/aggregator/demultiplexer/{ => demultiplexerimpl}/params.go (94%) diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 14fd7b756c05b5..07b4eadb3a7c7f 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -33,6 +33,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/agent/subcommands/run/internal/clcrunnerapi" "github.com/DataDog/datadog-agent/cmd/manager" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" netflowServer "github.com/DataDog/datadog-agent/comp/netflow/server" // checks implemented as components @@ -346,12 +347,12 @@ func getSharedFxOption() fx.Option { metadata.Bundle(), // injecting the aggregator demultiplexer to FX until we migrate it to a proper component. This allows // other already migrated components to request it. - fx.Provide(func(config config.Component) demultiplexer.Params { + fx.Provide(func(config config.Component) demultiplexerimpl.Params { opts := aggregator.DefaultAgentDemultiplexerOptions() opts.EnableNoAggregationPipeline = config.GetBool("dogstatsd_no_aggregation_pipeline") - return demultiplexer.Params{Options: opts} + return demultiplexerimpl.Params{Options: opts} }), - demultiplexer.Module(), + demultiplexerimpl.Module(), orchestratorForwarderImpl.Module(), fx.Supply(orchestratorForwarderImpl.NewDefaultParams()), // injecting the shared Serializer to FX until we migrate it to a prpoper component. This allows other diff --git a/cmd/agent/subcommands/run/internal/settings/runtime_settings_test.go b/cmd/agent/subcommands/run/internal/settings/runtime_settings_test.go index 7c96c38cd05626..0c6aaba6806ce7 100644 --- a/cmd/agent/subcommands/run/internal/settings/runtime_settings_test.go +++ b/cmd/agent/subcommands/run/internal/settings/runtime_settings_test.go @@ -10,6 +10,7 @@ import ( global "github.com/DataDog/datadog-agent/cmd/agent/dogstatsd" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/dogstatsd" "github.com/DataDog/datadog-agent/comp/dogstatsd/server" @@ -45,7 +46,7 @@ func TestDogstatsdMetricsStats(t *testing.T) { }), dogstatsd.Bundle(), defaultforwarder.MockModule(), - demultiplexer.MockModule(), + demultiplexerimpl.MockModule(), )) demux := deps.Demultiplexer diff --git a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go index d05160ca4f2a59..309a57b17a4ec4 100644 --- a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go +++ b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go @@ -26,6 +26,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/cluster-agent/api" dcav1 "github.com/DataDog/datadog-agent/cmd/cluster-agent/api/v1" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log" @@ -69,13 +70,13 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { core.Bundle(), forwarder.Bundle(), fx.Provide(defaultforwarder.NewParamsWithResolvers), - demultiplexer.Module(), + demultiplexerimpl.Module(), orchestratorForwarderImpl.Module(), fx.Supply(orchestratorForwarderImpl.NewDisabledParams()), - fx.Provide(func() demultiplexer.Params { + fx.Provide(func() demultiplexerimpl.Params { opts := aggregator.DefaultAgentDemultiplexerOptions() opts.UseEventPlatformForwarder = false - return demultiplexer.Params{Options: opts} + return demultiplexerimpl.Params{Options: opts} }), // setup workloadmeta collectors.GetCatalog(), diff --git a/cmd/cluster-agent/subcommands/start/command.go b/cmd/cluster-agent/subcommands/start/command.go index 4dfcd291e12ae6..fe38311c2e1222 100644 --- a/cmd/cluster-agent/subcommands/start/command.go +++ b/cmd/cluster-agent/subcommands/start/command.go @@ -26,6 +26,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/cluster-agent/command" "github.com/DataDog/datadog-agent/cmd/cluster-agent/custommetrics" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log" @@ -94,13 +95,13 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { params.Options.DisableAPIKeyChecking = true return params }), - demultiplexer.Module(), + demultiplexerimpl.Module(), orchestratorForwarderImpl.Module(), fx.Supply(orchestratorForwarderImpl.NewDefaultParams()), - fx.Provide(func() demultiplexer.Params { + fx.Provide(func() demultiplexerimpl.Params { opts := aggregator.DefaultAgentDemultiplexerOptions() opts.UseEventPlatformForwarder = false - return demultiplexer.Params{Options: opts} + return demultiplexerimpl.Params{Options: opts} }), // setup workloadmeta collectors.GetCatalog(), diff --git a/cmd/dogstatsd/subcommands/start/command.go b/cmd/dogstatsd/subcommands/start/command.go index f0a64a3df0dd02..5887c779532dde 100644 --- a/cmd/dogstatsd/subcommands/start/command.go +++ b/cmd/dogstatsd/subcommands/start/command.go @@ -19,6 +19,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/agent/common" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log" @@ -133,7 +134,7 @@ func RunDogstatsdFct(cliParams *CLIParams, defaultConfPath string, defaultLogFil } }), workloadmeta.OptionalModule(), - demultiplexer.Module(), + demultiplexerimpl.Module(), secretsimpl.Module(), orchestratorForwarderImpl.Module(), fx.Supply(orchestratorForwarderImpl.NewDisabledParams()), @@ -142,11 +143,11 @@ func RunDogstatsdFct(cliParams *CLIParams, defaultConfPath string, defaultLogFil fx.Provide(func(demuxInstance demultiplexer.Component) serializer.MetricSerializer { return demuxInstance.Serializer() }), - fx.Provide(func(config config.Component) demultiplexer.Params { + fx.Provide(func(config config.Component) demultiplexerimpl.Params { opts := aggregator.DefaultAgentDemultiplexerOptions() opts.UseEventPlatformForwarder = false opts.EnableNoAggregationPipeline = config.GetBool("dogstatsd_no_aggregation_pipeline") - return demultiplexer.Params{Options: opts, ContinueOnMissingHostname: true} + return demultiplexerimpl.Params{Options: opts, ContinueOnMissingHostname: true} }), fx.Supply(resourcesimpl.Disabled()), metadatarunnerimpl.Module(), diff --git a/cmd/otel-agent/main.go b/cmd/otel-agent/main.go index a585a8b5a36780..8b5a2ae121f7e3 100644 --- a/cmd/otel-agent/main.go +++ b/cmd/otel-agent/main.go @@ -17,7 +17,7 @@ import ( "os" "os/signal" - "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" corelog "github.com/DataDog/datadog-agent/comp/core/log" @@ -73,14 +73,14 @@ func main() { }, ), fx.Provide(newForwarderParams), - demultiplexer.Module(), + demultiplexerimpl.Module(), orchestratorForwarderImpl.Module(), fx.Supply(orchestratorForwarderImpl.NewDisabledParams()), fx.Provide(newSerializer), - fx.Provide(func(cfg config.Component) demultiplexer.Params { + fx.Provide(func(cfg config.Component) demultiplexerimpl.Params { opts := aggregator.DefaultAgentDemultiplexerOptions() opts.EnableNoAggregationPipeline = cfg.GetBool("dogstatsd_no_aggregation_pipeline") - return demultiplexer.Params{Options: opts} + return demultiplexerimpl.Params{Options: opts} }), ) if err != nil { diff --git a/cmd/security-agent/main_windows.go b/cmd/security-agent/main_windows.go index 3f5744c24f40d4..7fe177d79941be 100644 --- a/cmd/security-agent/main_windows.go +++ b/cmd/security-agent/main_windows.go @@ -22,6 +22,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/security-agent/subcommands" "github.com/DataDog/datadog-agent/cmd/security-agent/subcommands/start" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log" @@ -103,13 +104,13 @@ func (s *service) Run(svcctx context.Context) error { dogstatsd.ClientBundle, forwarder.Bundle(), fx.Provide(defaultforwarder.NewParamsWithResolvers), - demultiplexer.Module(), + demultiplexerimpl.Module(), orchestratorForwarderImpl.Module(), fx.Supply(orchestratorForwarderImpl.NewDisabledParams()), - fx.Provide(func() demultiplexer.Params { + fx.Provide(func() demultiplexerimpl.Params { opts := aggregator.DefaultAgentDemultiplexerOptions() opts.UseEventPlatformForwarder = false - return demultiplexer.Params{Options: opts} + return demultiplexerimpl.Params{Options: opts} }), // workloadmeta setup diff --git a/cmd/security-agent/subcommands/start/command.go b/cmd/security-agent/subcommands/start/command.go index de7523b67fe329..23eca06e719fcf 100644 --- a/cmd/security-agent/subcommands/start/command.go +++ b/cmd/security-agent/subcommands/start/command.go @@ -28,6 +28,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/security-agent/subcommands/compliance" "github.com/DataDog/datadog-agent/cmd/security-agent/subcommands/runtime" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log" @@ -93,14 +94,14 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { dogstatsd.ClientBundle, forwarder.Bundle(), fx.Provide(defaultforwarder.NewParamsWithResolvers), - demultiplexer.Module(), + demultiplexerimpl.Module(), orchestratorForwarderImpl.Module(), fx.Supply(orchestratorForwarderImpl.NewDisabledParams()), - fx.Provide(func() demultiplexer.Params { + fx.Provide(func() demultiplexerimpl.Params { opts := aggregator.DefaultAgentDemultiplexerOptions() opts.UseEventPlatformForwarder = false - return demultiplexer.Params{Options: opts} + return demultiplexerimpl.Params{Options: opts} }), // workloadmeta setup collectors.GetCatalog(), diff --git a/comp/aggregator/bundle.go b/comp/aggregator/bundle.go index f0203ec24b807e..f806fe33fb7785 100644 --- a/comp/aggregator/bundle.go +++ b/comp/aggregator/bundle.go @@ -7,7 +7,7 @@ package aggregator import ( - "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -16,5 +16,5 @@ import ( // Bundle defines the fx options for this bundle. func Bundle() fxutil.BundleOptions { return fxutil.Bundle( - demultiplexer.Module()) + demultiplexerimpl.Module()) } diff --git a/comp/aggregator/bundle_test.go b/comp/aggregator/bundle_test.go index f8f4794716dff2..88822dc07d4109 100644 --- a/comp/aggregator/bundle_test.go +++ b/comp/aggregator/bundle_test.go @@ -8,7 +8,7 @@ package aggregator import ( "testing" - "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" orchestratorForwarderImpl "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" @@ -21,6 +21,6 @@ func TestBundleDependencies(t *testing.T) { core.MockBundle(), defaultforwarder.MockModule(), orchestratorForwarderImpl.MockModule(), - fx.Supply(demultiplexer.Params{}), + fx.Supply(demultiplexerimpl.Params{}), ) } diff --git a/comp/aggregator/demultiplexer/component.go b/comp/aggregator/demultiplexer/component.go index 74a8c3163f8107..93860e3ea060d6 100644 --- a/comp/aggregator/demultiplexer/component.go +++ b/comp/aggregator/demultiplexer/component.go @@ -7,12 +7,9 @@ package demultiplexer import ( - "go.uber.org/fx" - "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/serializer" - "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) // team: agent-shared-components @@ -32,9 +29,3 @@ type Component interface { sender.DiagnoseSenderManager } - -// Module defines the fx options for this component. -func Module() fxutil.Module { - return fxutil.Component( - fx.Provide(newDemultiplexer)) -} diff --git a/comp/aggregator/demultiplexer/component_mock.go b/comp/aggregator/demultiplexer/component_mock.go index dc453df5f9bc2f..6181190464adb0 100644 --- a/comp/aggregator/demultiplexer/component_mock.go +++ b/comp/aggregator/demultiplexer/component_mock.go @@ -8,10 +8,7 @@ package demultiplexer import ( - "go.uber.org/fx" - "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) // Mock implements mock-specific methods. @@ -19,9 +16,3 @@ type Mock interface { SetDefaultSender(sender.Sender) Component } - -// MockModule defines the fx options for this component. -func MockModule() fxutil.Module { - return fxutil.Component( - fx.Provide(newMock)) -} diff --git a/comp/aggregator/demultiplexer/demultiplexer.go b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer.go similarity index 82% rename from comp/aggregator/demultiplexer/demultiplexer.go rename to comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer.go index 215f929e48fe82..3cfa579c000ae8 100644 --- a/comp/aggregator/demultiplexer/demultiplexer.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer.go @@ -3,21 +3,30 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2023-present Datadog, Inc. -package demultiplexer +// Package demultiplexerimpl defines the aggregator demultiplexer +package demultiplexerimpl import ( "context" + demultiplexerComp "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" "github.com/DataDog/datadog-agent/comp/aggregator/diagnosesendermanager" "github.com/DataDog/datadog-agent/comp/core/log" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" orchestratorforwarder "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/hostname" "go.uber.org/fx" ) +// Module defines the fx options for this component. +func Module() fxutil.Module { + return fxutil.Component( + fx.Provide(newDemultiplexer)) +} + type dependencies struct { fx.In Log log.Component @@ -33,14 +42,14 @@ type demultiplexer struct { type provides struct { fx.Out - Comp Component + Comp demultiplexerComp.Component // Both demultiplexer.Component and diagnosesendermanager.Component expose a different instance of SenderManager. // It means that diagnosesendermanager.Component must not be used when there is demultiplexer.Component instance. // // newDemultiplexer returns both demultiplexer.Component and diagnosesendermanager.Component (Note: demultiplexer.Component // implements diagnosesendermanager.Component). This has the nice consequence of preventing having - // demultiplexer.Module and diagnosesendermanagerimpl.Module in the same fx.App because there would + // demultiplexerimpl.Module and diagnosesendermanagerimpl.Module in the same fx.App because there would // be two ways to create diagnosesendermanager.Component. SenderManager diagnosesendermanager.Component } diff --git a/comp/aggregator/demultiplexer/demultiplexer_mock.go b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_mock.go similarity index 76% rename from comp/aggregator/demultiplexer/demultiplexer_mock.go rename to comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_mock.go index 140635e91da444..66ef0c0443ae21 100644 --- a/comp/aggregator/demultiplexer/demultiplexer_mock.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_mock.go @@ -5,18 +5,26 @@ //go:build test -package demultiplexer +package demultiplexerimpl import ( + demultiplexerComp "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" "github.com/DataDog/datadog-agent/comp/core/log" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" "go.uber.org/fx" ) +// MockModule defines the fx options for this component. +func MockModule() fxutil.Module { + return fxutil.Component( + fx.Provide(newMock)) +} + type mock struct { - Component + demultiplexerComp.Component sender *sender.Sender } @@ -40,7 +48,7 @@ type mockDependencies struct { Log log.Component } -func newMock(deps mockDependencies) (Component, Mock) { +func newMock(deps mockDependencies) (demultiplexerComp.Component, demultiplexerComp.Mock) { opts := aggregator.DefaultAgentDemultiplexerOptions() opts.DontStartForwarders = true diff --git a/comp/aggregator/demultiplexer/demultiplexer_mock_test.go b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_mock_test.go similarity index 82% rename from comp/aggregator/demultiplexer/demultiplexer_mock_test.go rename to comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_mock_test.go index 8967030a7a12a4..9988c8dbe87f5f 100644 --- a/comp/aggregator/demultiplexer/demultiplexer_mock_test.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_mock_test.go @@ -5,11 +5,12 @@ //go:build test -package demultiplexer +package demultiplexerimpl import ( "testing" + demultiplexerComp "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" @@ -18,14 +19,14 @@ import ( ) func TestSetDefaultSender(t *testing.T) { - mock := fxutil.Test[Mock](t, MockModule(), + mock := fxutil.Test[demultiplexerComp.Mock](t, MockModule(), core.MockBundle(), defaultforwarder.MockModule()) sender := &mocksender.MockSender{} mock.SetDefaultSender(sender) - var component Component = mock + var component demultiplexerComp.Component = mock lazySenderManager, err := component.LazyGetSenderManager() require.NoError(t, err) diff --git a/comp/aggregator/demultiplexer/params.go b/comp/aggregator/demultiplexer/demultiplexerimpl/params.go similarity index 94% rename from comp/aggregator/demultiplexer/params.go rename to comp/aggregator/demultiplexer/demultiplexerimpl/params.go index cd7b433a1c57e5..3472622932dd46 100644 --- a/comp/aggregator/demultiplexer/params.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/params.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2023-present Datadog, Inc. -package demultiplexer +package demultiplexerimpl import "github.com/DataDog/datadog-agent/pkg/aggregator" diff --git a/comp/ndmtmp/bundle_test.go b/comp/ndmtmp/bundle_test.go index 1d9f851a159d52..0d7dd977e7f2fd 100644 --- a/comp/ndmtmp/bundle_test.go +++ b/comp/ndmtmp/bundle_test.go @@ -8,7 +8,7 @@ package ndmtmp import ( "testing" - "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" orchestratorForwarderImpl "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" @@ -19,10 +19,10 @@ import ( func TestBundleDependencies(t *testing.T) { fxutil.TestBundle(t, Bundle(), - demultiplexer.Module(), + demultiplexerimpl.Module(), orchestratorForwarderImpl.MockModule(), defaultforwarder.Module(), - fx.Supply(demultiplexer.Params{}), + fx.Supply(demultiplexerimpl.Params{}), fx.Supply(defaultforwarder.Params{}), core.MockBundle(), ) diff --git a/comp/netflow/server/server_test.go b/comp/netflow/server/server_test.go index 47377e880446fd..a3ad0b38d8d344 100644 --- a/comp/netflow/server/server_test.go +++ b/comp/netflow/server/server_test.go @@ -19,7 +19,7 @@ import ( "go.uber.org/fx" "go.uber.org/fx/fxtest" - "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" @@ -68,7 +68,7 @@ var testOptions = fx.Options( forwarderimpl.MockModule(), hostnameimpl.MockModule(), logimpl.MockModule(), - demultiplexer.MockModule(), + demultiplexerimpl.MockModule(), defaultforwarder.MockModule(), config.MockModule(), fx.Invoke(func(lc fx.Lifecycle, c Component) { diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index 71d0dff4d2a98c..2b1857a98aea26 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -27,6 +27,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/agent/common" "github.com/DataDog/datadog-agent/cmd/agent/common/path" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" internalAPI "github.com/DataDog/datadog-agent/comp/api/api" "github.com/DataDog/datadog-agent/comp/api/api/apiimpl" "github.com/DataDog/datadog-agent/comp/core" @@ -155,15 +156,15 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { }), fx.Provide(func() serializer.MetricSerializer { return nil }), fx.Supply(defaultforwarder.Params{UseNoopForwarder: true}), - demultiplexer.Module(), + demultiplexerimpl.Module(), orchestratorForwarderImpl.Module(), fx.Supply(orchestratorForwarderImpl.NewNoopParams()), - fx.Provide(func() demultiplexer.Params { + fx.Provide(func() demultiplexerimpl.Params { // Initializing the aggregator with a flush interval of 0 (to disable the flush goroutines) opts := aggregator.DefaultAgentDemultiplexerOptions() opts.FlushInterval = 0 opts.UseNoopEventPlatformForwarder = true - return demultiplexer.Params{Options: opts} + return demultiplexerimpl.Params{Options: opts} }), // TODO(components): this is a temporary hack as the StartServer() method of the API package was previously called with nil arguments diff --git a/pkg/collector/corechecks/snmp/snmp_test.go b/pkg/collector/corechecks/snmp/snmp_test.go index bff2ffe0b86ab0..057d3833c04a4e 100644 --- a/pkg/collector/corechecks/snmp/snmp_test.go +++ b/pkg/collector/corechecks/snmp/snmp_test.go @@ -21,6 +21,7 @@ import ( "go.uber.org/fx" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/netflow/config" @@ -58,7 +59,7 @@ type deps struct { } func createDeps(t *testing.T) deps { - return fxutil.Test[deps](t, demultiplexer.MockModule(), defaultforwarder.MockModule(), config.MockModule(), logimpl.MockModule()) + return fxutil.Test[deps](t, demultiplexerimpl.MockModule(), defaultforwarder.MockModule(), config.MockModule(), logimpl.MockModule()) } func Test_Run_simpleCase(t *testing.T) { diff --git a/pkg/metadata/scheduler_test.go b/pkg/metadata/scheduler_test.go index a459fa9df952a9..11e5e795b31eb4 100644 --- a/pkg/metadata/scheduler_test.go +++ b/pkg/metadata/scheduler_test.go @@ -14,6 +14,7 @@ import ( "go.uber.org/fx" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" + "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" @@ -234,5 +235,5 @@ type deps struct { func buildDeps(t *testing.T) deps { opts := aggregator.DefaultAgentDemultiplexerOptions() opts.DontStartForwarders = true - return fxutil.Test[deps](t, defaultforwarder.MockModule(), config.MockModule(), logimpl.MockModule(), demultiplexer.MockModule()) + return fxutil.Test[deps](t, defaultforwarder.MockModule(), config.MockModule(), logimpl.MockModule(), demultiplexerimpl.MockModule()) } From da32cbe9b20f3da628cd6da9f68815df4315b727 Mon Sep 17 00:00:00 2001 From: Sylvain Afchain Date: Fri, 15 Dec 2023 17:43:32 +0100 Subject: [PATCH 15/66] [CWS] handle process credentials in the ptracer (#21559) --- pkg/security/probe/probe_epbfless.go | 13 +++ pkg/security/proto/ebpfless/msg.go | 51 +++++++-- pkg/security/ptracer/cws.go | 100 +++++++++++++++++- pkg/security/ptracer/syscalls_amd64.go | 8 ++ pkg/security/ptracer/syscalls_arm64.go | 26 +++-- .../resolvers/process/resolver_ebpfless.go | 34 ++++++ 6 files changed, 210 insertions(+), 22 deletions(-) diff --git a/pkg/security/probe/probe_epbfless.go b/pkg/security/probe/probe_epbfless.go index ae4af546fbe2e5..3bec17a96917e1 100644 --- a/pkg/security/probe/probe_epbfless.go +++ b/pkg/security/probe/probe_epbfless.go @@ -82,6 +82,14 @@ func (p *EBPFLessProbe) handleClientMsg(msg *clientMsg) { case ebpfless.SyscallTypeExec: event.Type = uint32(model.ExecEventType) entry := p.Resolvers.ProcessResolver.AddExecEntry(process.CacheResolverKey{Pid: syscallMsg.PID, NSID: syscallMsg.NSID}, syscallMsg.Exec.Filename, syscallMsg.Exec.Args, syscallMsg.Exec.Envs, syscallMsg.ContainerContext.ID) + + if syscallMsg.Exec.Credentials != nil { + entry.Credentials.UID = syscallMsg.Exec.Credentials.UID + entry.Credentials.EUID = syscallMsg.Exec.Credentials.EUID + entry.Credentials.GID = syscallMsg.Exec.Credentials.GID + entry.Credentials.EGID = syscallMsg.Exec.Credentials.EGID + } + event.Exec.Process = &entry.Process case ebpfless.SyscallTypeFork: event.Type = uint32(model.ForkEventType) @@ -92,6 +100,11 @@ func (p *EBPFLessProbe) handleClientMsg(msg *clientMsg) { event.Open.File.BasenameStr = filepath.Base(syscallMsg.Open.Filename) event.Open.Flags = syscallMsg.Open.Flags event.Open.Mode = syscallMsg.Open.Mode + case ebpfless.SyscallTypeSetUID: + p.Resolvers.ProcessResolver.UpdateUID(process.CacheResolverKey{Pid: syscallMsg.PID, NSID: syscallMsg.NSID}, syscallMsg.SetUID.UID, syscallMsg.SetUID.EUID) + + case ebpfless.SyscallTypeSetGID: + p.Resolvers.ProcessResolver.UpdateGID(process.CacheResolverKey{Pid: syscallMsg.PID, NSID: syscallMsg.NSID}, syscallMsg.SetGID.GID, syscallMsg.SetGID.EGID) } // container context diff --git a/pkg/security/proto/ebpfless/msg.go b/pkg/security/proto/ebpfless/msg.go index c8a51ca0bc6114..37f7c2c990135d 100644 --- a/pkg/security/proto/ebpfless/msg.go +++ b/pkg/security/proto/ebpfless/msg.go @@ -13,17 +13,21 @@ type SyscallType int32 const ( // SyscallTypeUnknown unknown type - SyscallTypeUnknown SyscallType = 0 + SyscallTypeUnknown SyscallType = iota // SyscallTypeExec exec type - SyscallTypeExec SyscallType = 1 + SyscallTypeExec // SyscallTypeFork fork type - SyscallTypeFork SyscallType = 2 + SyscallTypeFork // SyscallTypeOpen open type - SyscallTypeOpen SyscallType = 3 + SyscallTypeOpen // SyscallTypeExit exit type - SyscallTypeExit SyscallType = 4 + SyscallTypeExit // SyscallTypeFcntl fcntl type - SyscallTypeFcntl SyscallType = 5 + SyscallTypeFcntl + // SyscallTypeSetUID setuid/setreuid type + SyscallTypeSetUID + // SyscallTypeSetGID setgid/setregid type + SyscallTypeSetGID ) // ContainerContext defines a container context @@ -40,11 +44,20 @@ type FcntlSyscallMsg struct { Cmd uint32 } +// Credentials defines process credentials +type Credentials struct { + UID uint32 + EUID uint32 + GID uint32 + EGID uint32 +} + // ExecSyscallMsg defines an exec message type ExecSyscallMsg struct { - Filename string - Args []string - Envs []string + Filename string + Args []string + Envs []string + Credentials *Credentials } // ForkSyscallMsg defines a fork message @@ -72,6 +85,18 @@ type ChdirSyscallFakeMsg struct { Path string } +// SetUIDSyscallMsg defines a setreuid message +type SetUIDSyscallMsg struct { + UID int32 + EUID int32 +} + +// SetGIDSyscallMsg defines a setregid message +type SetGIDSyscallMsg struct { + GID int32 + EGID int32 +} + // SyscallMsg defines a syscall message type SyscallMsg struct { SeqNum uint64 @@ -84,8 +109,12 @@ type SyscallMsg struct { Fork *ForkSyscallMsg Exit *ExitSyscallMsg Fcntl *FcntlSyscallMsg - Dup *DupSyscallFakeMsg - Chdir *ChdirSyscallFakeMsg + SetUID *SetUIDSyscallMsg + SetGID *SetGIDSyscallMsg + + // internals + Dup *DupSyscallFakeMsg + Chdir *ChdirSyscallFakeMsg } // String returns string representation diff --git a/pkg/security/ptracer/cws.go b/pkg/security/ptracer/cws.go index 23ea5421325a95..134314a8463fce 100644 --- a/pkg/security/ptracer/cws.go +++ b/pkg/security/ptracer/cws.go @@ -238,6 +238,41 @@ func handleFchdir(tracer *Tracer, process *Process, msg *ebpfless.SyscallMsg, re return nil } +func handleSetuid(tracer *Tracer, _ *Process, msg *ebpfless.SyscallMsg, regs syscall.PtraceRegs) error { + msg.Type = ebpfless.SyscallTypeSetUID + msg.SetUID = &ebpfless.SetUIDSyscallMsg{ + UID: tracer.ReadArgInt32(regs, 0), + EUID: -1, + } + return nil +} + +func handleSetgid(tracer *Tracer, _ *Process, msg *ebpfless.SyscallMsg, regs syscall.PtraceRegs) error { + msg.Type = ebpfless.SyscallTypeSetGID + msg.SetGID = &ebpfless.SetGIDSyscallMsg{ + GID: tracer.ReadArgInt32(regs, 0), + } + return nil +} + +func handleSetreuid(tracer *Tracer, _ *Process, msg *ebpfless.SyscallMsg, regs syscall.PtraceRegs) error { + msg.Type = ebpfless.SyscallTypeSetUID + msg.SetUID = &ebpfless.SetUIDSyscallMsg{ + UID: tracer.ReadArgInt32(regs, 0), + EUID: tracer.ReadArgInt32(regs, 1), + } + return nil +} + +func handleSetregid(tracer *Tracer, _ *Process, msg *ebpfless.SyscallMsg, regs syscall.PtraceRegs) error { + msg.Type = ebpfless.SyscallTypeSetGID + msg.SetGID = &ebpfless.SetGIDSyscallMsg{ + GID: tracer.ReadArgInt32(regs, 0), + EGID: tracer.ReadArgInt32(regs, 1), + } + return nil +} + // ECSMetadata defines ECS metadatas type ECSMetadata struct { DockerID string `json:"DockerId"` @@ -436,6 +471,14 @@ func StartCWSPtracer(args []string, probeAddr string, creds Creds, verbose bool) return err } + // first process + process := &Process{ + Pid: tracer.PID, + Nr: make(map[int]*ebpfless.SyscallMsg), + Fd: make(map[int32]string), + } + cache.Add(tracer.PID, process) + go func() { var seq uint64 @@ -517,6 +560,30 @@ func StartCWSPtracer(args []string, probeAddr string, creds Creds, verbose bool) logErrorf("unable to handle execve: %v", err) return } + + // Top level pid, add creds. For the other PIDs the creds will be propagated at the probe side + if process.Pid == tracer.PID { + var uid, gid uint32 + + if creds.UID != nil { + uid = *creds.UID + } else { + uid = uint32(os.Getuid()) + } + + if creds.GID != nil { + gid = *creds.GID + } else { + gid = uint32(os.Getgid()) + } + + msg.Exec.Credentials = &ebpfless.Credentials{ + UID: uid, + EUID: uid, + GID: gid, + EGID: gid, + } + } case ExecveatNr: if err = handleExecveAt(tracer, process, msg, regs); err != nil { logErrorf("unable to handle execveat: %v", err) @@ -539,7 +606,26 @@ func StartCWSPtracer(args []string, probeAddr string, creds Creds, verbose bool) logErrorf("unable to handle fchdir: %v", err) return } - + case SetuidNr: + if err = handleSetuid(tracer, process, msg, regs); err != nil { + logErrorf("unable to handle fchdir: %v", err) + return + } + case SetgidNr: + if err = handleSetgid(tracer, process, msg, regs); err != nil { + logErrorf("unable to handle fchdir: %v", err) + return + } + case SetreuidNr: + if err = handleSetreuid(tracer, process, msg, regs); err != nil { + logErrorf("unable to handle fchdir: %v", err) + return + } + case SetregidNr: + if err = handleSetregid(tracer, process, msg, regs); err != nil { + logErrorf("unable to handle fchdir: %v", err) + return + } } case CallbackPostType: switch nr { @@ -547,17 +633,25 @@ func StartCWSPtracer(args []string, probeAddr string, creds Creds, verbose bool) send(process.Nr[nr]) case OpenNr, OpenatNr: if ret := tracer.ReadRet(regs); ret >= 0 { - msg, exists := process.Nr[nr] if !exists { return } - send(process.Nr[nr]) + send(msg) // maintain fd/path mapping process.Fd[int32(ret)] = msg.Open.Filename } + case SetuidNr, SetgidNr, SetreuidNr, SetregidNr: + if ret := tracer.ReadRet(regs); ret >= 0 { + msg, exists := process.Nr[nr] + if !exists { + return + } + + send(msg) + } case ForkNr, VforkNr, CloneNr: msg := &ebpfless.SyscallMsg{ ContainerContext: &containerCtx, diff --git a/pkg/security/ptracer/syscalls_amd64.go b/pkg/security/ptracer/syscalls_amd64.go index 92cad5f4f8f8cd..3f602a93b4e242 100644 --- a/pkg/security/ptracer/syscalls_amd64.go +++ b/pkg/security/ptracer/syscalls_amd64.go @@ -29,6 +29,10 @@ const ( Dup3Nr = 292 // Dup3Nr defines the syscall ID for amd64 ChdirNr = 80 // ChdirNr defines the syscall ID for amd64 FchdirNr = 81 // FchdirNr defines the syscall ID for amd64 + SetuidNr = 105 // SetuidNr defines the syscall ID for amd64 + SetgidNr = 106 // SetgidNr defines the syscall ID for amd64 + SetreuidNr = 113 // SetreuidNr defines the syscall ID for amd64 + SetregidNr = 114 // SetregidNr defines the syscall ID for amd64 ptraceFlags = 0 | syscall.PTRACE_O_TRACEVFORK | @@ -57,6 +61,10 @@ var ( "dup3", "chdir", "fchdir", + "setuid", + "setgid", + "setreuid", + "setregid", } ) diff --git a/pkg/security/ptracer/syscalls_arm64.go b/pkg/security/ptracer/syscalls_arm64.go index 35561f458ec20b..86a8cff2d2ac6f 100644 --- a/pkg/security/ptracer/syscalls_arm64.go +++ b/pkg/security/ptracer/syscalls_arm64.go @@ -21,15 +21,19 @@ const ( CloneNr = 220 // CloneNr defines the syscall ID for arm64 ExitNr = 93 // ExitNr defines the syscall ID for arm64 FcntlNr = 25 // FcntlNr defines the syscall ID for arm64 - DupNr = 23 // DupNr defines the syscall ID for amd64 - Dup3Nr = 24 // Dup3Nr defines the syscall ID for amd64 - ChdirNr = 49 // ChdirNr defines the syscall ID for amd64 - FchdirNr = 50 // FchdirNr defines the syscall ID for amd64 + DupNr = 23 // DupNr defines the syscall ID for arm64 + Dup3Nr = 24 // Dup3Nr defines the syscall ID for arm64 + ChdirNr = 49 // ChdirNr defines the syscall ID for arm64 + FchdirNr = 50 // FchdirNr defines the syscall ID for arm64 + SetuidNr = 146 // SetuidNr defines the syscall ID for arm64 + SetgidNr = 144 // SetgidNr defines the syscall ID for arm64 + SetreuidNr = 145 // SetreuidNr defines the syscall ID for arm64 + SetregidNr = 143 // SetregidNr defines the syscall ID for arm64 - OpenNr = 9990 // OpenNr not available on arm64 - ForkNr = 9991 // ForkNr not available on arm64 - VforkNr = 9992 // VforkNr not available on arm64 - Dup2Nr = 9993 // Dup2Nr not available on arm64 + OpenNr = -1 // OpenNr not available on arm64 + ForkNr = -2 // ForkNr not available on arm64 + VforkNr = -3 // VforkNr not available on arm64 + Dup2Nr = -4 // Dup2Nr not available on arm64 ptraceFlags = 0 | syscall.PTRACE_O_TRACECLONE | @@ -52,6 +56,12 @@ var ( "dup3", "chdir", "fchdir", + "setuid", + "setgid", + "setuid", + "setgid", + "setreuid", + "setregid", } ) diff --git a/pkg/security/resolvers/process/resolver_ebpfless.go b/pkg/security/resolvers/process/resolver_ebpfless.go index fe0da8f9735638..4e79dcabf83d19 100644 --- a/pkg/security/resolvers/process/resolver_ebpfless.go +++ b/pkg/security/resolvers/process/resolver_ebpfless.go @@ -156,6 +156,8 @@ func (p *EBPFLessResolver) insertForkEntry(key CacheResolverKey, entry *model.Pr func (p *EBPFLessResolver) insertExecEntry(key CacheResolverKey, entry *model.ProcessCacheEntry) { prev := p.entryCache[key] if prev != nil { + // inheritate credentials as exec event, uid/gid can be update by setuid/setgid events + entry.Credentials = prev.Credentials prev.Exec(entry) } @@ -172,6 +174,38 @@ func (p *EBPFLessResolver) Resolve(key CacheResolverKey) *model.ProcessCacheEntr return nil } +// UpdateUID updates the credentials of the provided pid +func (p *EBPFLessResolver) UpdateUID(key CacheResolverKey, uid int32, euid int32) { + p.Lock() + defer p.Unlock() + + entry := p.entryCache[key] + if entry != nil { + if uid != -1 { + entry.Credentials.UID = uint32(uid) + } + if euid != -1 { + entry.Credentials.EUID = uint32(euid) + } + } +} + +// UpdateGID updates the credentials of the provided pid +func (p *EBPFLessResolver) UpdateGID(key CacheResolverKey, gid int32, egid int32) { + p.Lock() + defer p.Unlock() + + entry := p.entryCache[key] + if entry != nil { + if gid != -1 { + entry.Credentials.GID = uint32(gid) + } + if egid != -1 { + entry.Credentials.EGID = uint32(egid) + } + } +} + // getCacheSize returns the cache size of the process resolver func (p *EBPFLessResolver) getCacheSize() float64 { p.RLock() From bc10f388637f3eb22d26ae4b8c20f2f5a7f8bee2 Mon Sep 17 00:00:00 2001 From: Joshua Lineaweaver Date: Fri, 15 Dec 2023 12:02:32 -0500 Subject: [PATCH 16/66] [Orchestrator] Set HPA to stable in cluster agent (#21589) * Set HPA to stable in cluster agent * Add release note --- .../orchestrator/collectors/k8s/horizontalpodautoscaler.go | 2 +- .../notes/HorizontalPodAutoscalerEnable-353a13c703f9dc48.yaml | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 releasenotes-dca/notes/HorizontalPodAutoscalerEnable-353a13c703f9dc48.yaml diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/horizontalpodautoscaler.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/horizontalpodautoscaler.go index c91450daeac689..8747a95c19f9aa 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/horizontalpodautoscaler.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/horizontalpodautoscaler.go @@ -41,7 +41,7 @@ func NewHorizontalPodAutoscalerCollector() *HorizontalPodAutoscalerCollector { return &HorizontalPodAutoscalerCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, - IsStable: false, + IsStable: true, IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, diff --git a/releasenotes-dca/notes/HorizontalPodAutoscalerEnable-353a13c703f9dc48.yaml b/releasenotes-dca/notes/HorizontalPodAutoscalerEnable-353a13c703f9dc48.yaml new file mode 100644 index 00000000000000..ff645c6e9ae783 --- /dev/null +++ b/releasenotes-dca/notes/HorizontalPodAutoscalerEnable-353a13c703f9dc48.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Enable Horizontal Pod Autoscaler collection for the Orchestrator by default From 578dbd4e7eb4366bebc11d10d6260ab545ce2ac2 Mon Sep 17 00:00:00 2001 From: Yakov Shapiro Date: Fri, 15 Dec 2023 12:45:41 -0500 Subject: [PATCH 17/66] Allow the install type to be unset for the trace agent (#21578) --- pkg/trace/agent/agent.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/trace/agent/agent.go b/pkg/trace/agent/agent.go index 54649a968f6864..56c0a622f93ec6 100644 --- a/pkg/trace/agent/agent.go +++ b/pkg/trace/agent/agent.go @@ -234,7 +234,7 @@ func (a *Agent) setRootSpanTags(root *pb.Span) { // setFirstTraceTags sets additional tags on the first trace ever processed by the agent, // so that we can see that the customer has successfully onboarded onto APM. func (a *Agent) setFirstTraceTags(root *pb.Span) { - if a.conf == nil || a.conf.InstallSignature.InstallType == "" || root == nil { + if a.conf == nil || a.conf.InstallSignature.InstallID == "" || root == nil { return } a.firstSpanOnce.Do(func() { From d5a0f884e6ab2bef9f8f9325c27cd637039dbd62 Mon Sep 17 00:00:00 2001 From: Pierre Gimalac Date: Fri, 15 Dec 2023 19:44:21 +0100 Subject: [PATCH 18/66] Add used_by_otel field in modules.py and update update-go invoke task (#21588) Add used_by_otel field in modules.py and update update-go invoke task --- tasks/modules.py | 20 ++++++++++++-------- tasks/update_go.py | 14 ++++++++++---- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/tasks/modules.py b/tasks/modules.py index 5837a86d59c348..a04b7ab3782112 100644 --- a/tasks/modules.py +++ b/tasks/modules.py @@ -27,6 +27,7 @@ def __init__( importable=True, independent=False, lint_targets=None, + used_by_otel=False, ): self.path = path self.targets = targets if targets else ["."] @@ -39,6 +40,7 @@ def __init__( # at the cost of spending some time parsing the module. self.importable = importable self.independent = independent + self.used_by_otel = used_by_otel self._dependencies = None @@ -154,10 +156,10 @@ def dependency_path(self, agent_version): "test/fakeintake": GoModule("test/fakeintake", independent=True), "pkg/aggregator/ckey": GoModule("pkg/aggregator/ckey", independent=True), "pkg/errors": GoModule("pkg/errors", independent=True), - "pkg/obfuscate": GoModule("pkg/obfuscate", independent=True), + "pkg/obfuscate": GoModule("pkg/obfuscate", independent=True, used_by_otel=True), "pkg/gohai": GoModule("pkg/gohai", independent=True, importable=False), - "pkg/proto": GoModule("pkg/proto", independent=True), - "pkg/trace": GoModule("pkg/trace", independent=True), + "pkg/proto": GoModule("pkg/proto", independent=True, used_by_otel=True), + "pkg/trace": GoModule("pkg/trace", independent=True, used_by_otel=True), "pkg/tagset": GoModule("pkg/tagset", independent=True), "pkg/metrics": GoModule("pkg/metrics", independent=True), "pkg/telemetry": GoModule("pkg/telemetry", independent=True), @@ -170,12 +172,14 @@ def dependency_path(self, agent_version): "pkg/config/remote": GoModule("pkg/config/remote", independent=True), "pkg/security/secl": GoModule("pkg/security/secl", independent=True), "pkg/status/health": GoModule("pkg/status/health", independent=True), - "pkg/remoteconfig/state": GoModule("pkg/remoteconfig/state", independent=True), - "pkg/util/cgroups": GoModule("pkg/util/cgroups", independent=True, condition=lambda: sys.platform == "linux"), + "pkg/remoteconfig/state": GoModule("pkg/remoteconfig/state", independent=True, used_by_otel=True), + "pkg/util/cgroups": GoModule( + "pkg/util/cgroups", independent=True, condition=lambda: sys.platform == "linux", used_by_otel=True + ), "pkg/util/http": GoModule("pkg/util/http", independent=True), - "pkg/util/log": GoModule("pkg/util/log", independent=True), - "pkg/util/pointer": GoModule("pkg/util/pointer", independent=True), - "pkg/util/scrubber": GoModule("pkg/util/scrubber", independent=True), + "pkg/util/log": GoModule("pkg/util/log", independent=True, used_by_otel=True), + "pkg/util/pointer": GoModule("pkg/util/pointer", independent=True, used_by_otel=True), + "pkg/util/scrubber": GoModule("pkg/util/scrubber", independent=True, used_by_otel=True), "pkg/util/backoff": GoModule("pkg/util/backoff", independent=True), "pkg/util/cache": GoModule("pkg/util/cache", independent=True), "pkg/util/common": GoModule("pkg/util/common", independent=True), diff --git a/tasks/update_go.py b/tasks/update_go.py index 7b3d6e3463ab60..153979db0e3b0b 100644 --- a/tasks/update_go.py +++ b/tasks/update_go.py @@ -24,6 +24,7 @@ def go_version(_): "test_version": "Whether the image is a test image or not", "warn": "Don't exit in case of matching error, just warn.", "release_note": "Whether to create a release note or not. The default behaviour is to create a release note", + "include_otel_modules": "Whether to update the version in go.mod files used by otel.", } ) def update_go( @@ -33,6 +34,7 @@ def update_go( test_version: Optional[bool] = False, warn: Optional[bool] = False, release_note: Optional[bool] = True, + include_otel_modules: Optional[bool] = False, ): """ Updates the version of Go and build images. @@ -68,7 +70,7 @@ def update_go( _update_root_readme(warn, new_major) _update_fakeintake_readme(warn, new_major) - _update_go_mods(warn, new_major) + _update_go_mods(warn, new_major, include_otel_modules) _update_process_agent_readme(warn, new_major) _update_windowsevent_readme(warn, new_major) _update_go_version_file(warn, version) @@ -217,9 +219,13 @@ def _update_windowsevent_readme(warn: bool, major: str): _update_file(warn, path, pattern, replace) -def _update_go_mods(warn: bool, major: str): - mod_files = [f"./{module}/go.mod" for module in DEFAULT_MODULES] - for mod_file in mod_files: +def _update_go_mods(warn: bool, major: str, include_otel_modules: bool): + for path, module in DEFAULT_MODULES.items(): + if not include_otel_modules and module.used_by_otel: + # only update the go directives in go.mod files not used by otel + # to allow them to keep using the modules + continue + mod_file = f"./{path}/go.mod" _update_file(warn, mod_file, "^go [.0-9]+$", f"go {major}") From d15268d030a6b06f163623c6940248ba81ec38df Mon Sep 17 00:00:00 2001 From: Bryce Kahle Date: Fri, 15 Dec 2023 11:26:02 -0800 Subject: [PATCH 19/66] fix KMT cleanup null check (#21576) fix KMT cleanup null check --- .gitlab/kernel_version_testing/system_probe.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab/kernel_version_testing/system_probe.yml b/.gitlab/kernel_version_testing/system_probe.yml index c7745d7a56d64e..0d169eaf0d164c 100644 --- a/.gitlab/kernel_version_testing/system_probe.yml +++ b/.gitlab/kernel_version_testing/system_probe.yml @@ -437,7 +437,7 @@ kernel_matrix_testing_run_tests_arm64: - INSTANCE_ID="$(jq -r '.[0][0]' < instance.json)" - echo ${INSTANCE_ID} - | - if [[ "${INSTANCE_ID}" != "" ]]; then + if [[ "${INSTANCE_ID}" != "" ]] && [[ "${INSTANCE_ID}" != "null" ]]; then aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}" fi From 9c2a358144c1bec888f089d785a2555de1a2188a Mon Sep 17 00:00:00 2001 From: Pierre Gimalac Date: Fri, 15 Dec 2023 21:09:46 +0100 Subject: [PATCH 20/66] [ASCII-977] Go update auto-comment old versions (#21476) [ASCII-977] Go update auto-comment old versions --- .github/CODEOWNERS | 2 + .github/workflows/go-update-commenter.yml | 65 ++++++++++++++++ tools/go-update/detect-old-version.sh | 92 +++++++++++++++++++++++ 3 files changed, 159 insertions(+) create mode 100644 .github/workflows/go-update-commenter.yml create mode 100644 tools/go-update/detect-old-version.sh diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 200c83e62ab97e..bcf2998e991e34 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -47,6 +47,7 @@ /.github/workflows/windows-*.yml @DataDog/windows-agent /.github/workflows/cws-btfhub-sync.yml @DataDog/agent-security /.github/workflows/gohai.yml @DataDog/agent-shared-components +/.github/workflows/go-update-commenter.yml @DataDog/agent-shared-components # Gitlab files # Files containing job contents are owned by teams in charge of the jobs + agent-platform @@ -507,6 +508,7 @@ /tools/ @DataDog/agent-platform /tools/ebpf/ @DataDog/ebpf-platform /tools/gdb/ @DataDog/agent-shared-components +/tools/go-update/ @DataDog/agent-shared-components /tools/retry_file_dump/ @DataDog/agent-metrics-logs /tools/windows/ @DataDog/windows-agent /tools/windows/DatadogAgentInstaller/WixSetup/localization-en-us.wxl @DataDog/windows-agent @DataDog/documentation diff --git a/.github/workflows/go-update-commenter.yml b/.github/workflows/go-update-commenter.yml new file mode 100644 index 00000000000000..c25a907d753ecc --- /dev/null +++ b/.github/workflows/go-update-commenter.yml @@ -0,0 +1,65 @@ +name: "Go update commenter" + +on: + pull_request: + # Only run on PR label events (in particular not on every commit) + types: [ labeled ] + +jobs: + old-versions-match: + # Only run if the PR is labeled with 'go-update' + if: ${{ github.event.label.name == 'go-update' }} + runs-on: ubuntu-latest + steps: + # get the Go version of the target branch + - uses: actions/checkout@v3 + with: + ref: ${{ github.base_ref }} + - name: Get former Go version + id: former_go_version + run: | + echo version="$(cat .go-version)" >> $GITHUB_OUTPUT + + # get the Go version of the PR branch + - uses: actions/checkout@v3 + - name: Get current Go version + id: new_go_version + run: | + echo version="$(cat .go-version)" >> $GITHUB_OUTPUT + + # build the comment + - name: Build full comment + id: old_versions + run: | + set -euxo pipefail + # build the base of the Github URL to the current commit + GITHUB_HEAD_URL='${{ github.server_url }}/${{ github.repository }}/blob/${{ github.sha }}' + { + echo "matches<> $GITHUB_OUTPUT + + # and display it + - uses: actions/github-script@v7 + env: + # We need to store the output in an environment variable and not use it directly in the createComment, + # as it will likely not be a valid JS string (eg. if it contains a quote character) + CONTENT: ${{ steps.old_versions.outputs.matches }} + with: + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: process.env.CONTENT + }) diff --git a/tools/go-update/detect-old-version.sh b/tools/go-update/detect-old-version.sh new file mode 100644 index 00000000000000..d8cc714e1420fb --- /dev/null +++ b/tools/go-update/detect-old-version.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +set -euo pipefail + +# This script is used to display files which might contain a reference to an old Go version. +# +# It can be explicitely given a Go version as argument to look for. +# Otherwise, it assumes that the current branch is the one which contains the new Go version, +# and compares it to $GITHUB_BASE_REF if it is defined, or "main" otherwise + +if [ $# -gt 2 ]; then + echo "This script takes at most two arguments, the old Go version we want to look for and the new one" 1>&2 + echo "If no argument is given, they are fetched from the '.go-version' file, respectively of the branch from GITHUB_BASE_REF, or main if it is not defined." 1>&2 + echo "" 1>&2 + echo "If only one version is given, it is looked for without any particular checking." 1>&2 + echo "If zero or two versions are given, the script checks whether the minor changed or only the bugfix, and uses either accordingly." 1>&2 + exit 1 +fi + +if [ $# -eq 1 ]; then + # use the version given as argument + PATTERN_GO_VERSION="$1" +else + if [ $# -eq 0 ]; then + # use the version from the .go-version file, and compare it to the one from GITHUB_BASE_REF, or main + GO_VERSION_PREV_BUGFIX=$(git show "${GITHUB_BASE_REF:-main}":.go-version) + GO_VERSION_NEW_BUGFIX=$(cat .go-version) + else + GO_VERSION_PREV_BUGFIX="$1" + GO_VERSION_NEW_BUGFIX="$2" + fi + + GO_VERSION_PREV_MINOR="${GO_VERSION_PREV_BUGFIX%.*}" + echo "Former bugfix version: $GO_VERSION_PREV_BUGFIX" 1>&2 + echo "Former minor version: $GO_VERSION_PREV_MINOR" 1>&2 + + GO_VERSION_NEW_MINOR="${GO_VERSION_NEW_BUGFIX%.*}" + echo "New bugfix version: $GO_VERSION_NEW_BUGFIX" 1>&2 + echo "New minor version: $GO_VERSION_NEW_MINOR" 1>&2 + + # if the old bugfix is the same as the new one, return + if [ "$GO_VERSION_PREV_BUGFIX" == "$GO_VERSION_NEW_BUGFIX" ]; then + echo "This branch doesn't change the Go version" 1>&2 + exit 1 + fi + + if [ "$GO_VERSION_PREV_MINOR" != "$GO_VERSION_NEW_MINOR" ]; then + # minor update + PATTERN_GO_VERSION="$GO_VERSION_PREV_MINOR" + else + # bugfix update + PATTERN_GO_VERSION="$GO_VERSION_PREV_BUGFIX" + fi +fi + +echo "Looking for Go version: $PATTERN_GO_VERSION" 1>&2 + +# Go versions can be preceded by a 'g' (golang), 'o' (go), 'v', or a non-alphanumerical character +# Prevent matching when preceded by a dot, as it is likely not a Go version in that case +PATTERN_PREFIX='(^|[^.a-fh-np-uw-z0-9])' +# Go versions in go.dev/dl URLs are followed by a dot, so we need to allow it in the regex +PATTERN_SUFFIX='($|[^a-z0-9])' + +# Go versions contain dots, which are special characters in regexes, so we need to escape them +# Safely assume that the version only contains numbers and dots +PATTERN_GO_VERSION_ESCAPED="$(echo "$PATTERN_GO_VERSION" | sed 's/\./\\./g')" +# The regex is not perfect, but it should be good enough +PATTERN="${PATTERN_PREFIX}${PATTERN_GO_VERSION_ESCAPED}${PATTERN_SUFFIX}" +echo "Using pattern: $PATTERN" 1>&2 + +# grep returns 1 when no match is found, which would cause the script to fail, wrap it to return 0 in that case. +# It returns a non-zero value if grep returned >1. +function safegrep() { + grep "$@" || [ $? -eq 1 ] +} + +# -r: recursive +# -I: ignore binary files +# -i: ignore case +# -n: print line number +# -E: extended regexp pattern to match +# --exclude-dir: exclude directories +# --exclude: exclude file name patterns +safegrep -r -I -i -n -E "$PATTERN" . \ + --exclude-dir fixtures --exclude-dir .git --exclude-dir releasenotes \ + --exclude-dir omnibus --exclude-dir snmp --exclude-dir testdata \ + --exclude '*.rst' --exclude '*.sum' --exclude '*generated*.go' --exclude '*.svg' | \ +# -v: invert match +# exclude matches in go.mod files that are dependency versions (note the 'v' before the version) +safegrep -v -E "go\.mod:.*v${PATTERN_GO_VERSION_ESCAPED}" | \ +# grep outputs paths starting with ./, so we remove that +sed -e 's|^./||' From 5771179a3ea3a466b6660e2a26d256b8508fb090 Mon Sep 17 00:00:00 2001 From: Bryce Kahle Date: Fri, 15 Dec 2023 12:55:28 -0800 Subject: [PATCH 21/66] fix ebpf-platform revive issues (#21574) fix ebpf-platform revive issues --- cmd/system-probe/api/client.go | 2 +- cmd/system-probe/api/module/common.go | 2 +- cmd/system-probe/command/command.go | 2 +- cmd/system-probe/common/common.go | 2 +- cmd/system-probe/common/common_nix.go | 2 +- cmd/system-probe/common/common_unsupported.go | 2 +- cmd/system-probe/common/common_windows.go | 2 +- cmd/system-probe/config/adjust.go | 2 +- cmd/system-probe/config/config.go | 14 ++-- cmd/system-probe/config/config_test.go | 43 ++++++----- cmd/system-probe/main.go | 1 - cmd/system-probe/main_common.go | 2 +- cmd/system-probe/modules/all_linux.go | 5 +- cmd/system-probe/modules/all_unsupported.go | 2 +- cmd/system-probe/modules/all_windows.go | 2 +- .../modules/dynamic_instrumentation.go | 2 +- .../modules/language_detection.go | 2 +- cmd/system-probe/subcommands/debug/command.go | 2 +- .../subcommands/modrestart/command.go | 2 +- cmd/system-probe/subcommands/run/command.go | 12 ++- cmd/system-probe/subcommands/subcommands.go | 2 +- .../subcommands/version/command.go | 4 +- cmd/system-probe/utils/limiter.go | 2 +- .../corechecks/ebpf/probe/ebpfcheck/probe.go | 7 +- .../corechecks/ebpf/probe/ebpfcheck/prog.go | 49 ++++++------ .../corechecks/ebpf/probe/ebpfcheck/utils.go | 10 --- pkg/ebpf/bytecode/asset_reader.go | 2 +- pkg/ebpf/bytecode/runtime/all_helpers.go | 2 +- pkg/ebpf/bytecode/runtime/asset.go | 2 +- pkg/ebpf/bytecode/runtime/generated_asset.go | 4 +- pkg/ebpf/bytecode/runtime/helpers_test.go | 2 +- pkg/ebpf/bytecode/runtime/protected_file.go | 17 ++--- .../runtime/runtime_compilation_helpers.go | 2 +- pkg/ebpf/cgo/genpost.go | 2 +- pkg/ebpf/common.go | 2 +- pkg/ebpf/compiler/compiler.go | 6 +- pkg/ebpf/debugfs_stat_collector_linux.go | 7 +- pkg/ebpf/ebpftest/bpfdebug_linux.go | 2 +- pkg/ebpf/ebpftest/bpfdebug_unsupported.go | 2 +- pkg/ebpf/ebpftest/buildmode.go | 2 +- pkg/ebpf/ebpftest/buildmode_linux.go | 6 +- pkg/ebpf/ebpftest/buildmode_windows.go | 6 +- pkg/ebpf/ebpftest/log.go | 2 +- pkg/ebpf/perf.go | 8 +- pkg/network/telemetry/stat_counter_wrapper.go | 11 +-- pkg/network/telemetry/stat_gauge_wrapper.go | 12 +-- pkg/util/kernel/arch.go | 2 +- pkg/util/kernel/download_headers_test.go | 3 +- pkg/util/kernel/find_headers.go | 8 +- pkg/util/kernel/find_headers_test.go | 2 +- pkg/util/kernel/fs_nolinux.go | 4 +- test/new-e2e/scenarios/system-probe/main.go | 9 +-- .../system-probe/system-probe-test-env.go | 75 ++++++++----------- .../system-probe/test-json-review/main.go | 2 +- test/new-e2e/system-probe/test-runner/main.go | 9 +-- 55 files changed, 171 insertions(+), 221 deletions(-) diff --git a/cmd/system-probe/api/client.go b/cmd/system-probe/api/client.go index 58d7350e0dc8ca..e6ffb8e341fd34 100644 --- a/cmd/system-probe/api/client.go +++ b/cmd/system-probe/api/client.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package api contains the API exposed by system-probe package api import ( diff --git a/cmd/system-probe/api/module/common.go b/cmd/system-probe/api/module/common.go index e50a10cbf9f818..7a303623cd95fa 100644 --- a/cmd/system-probe/api/module/common.go +++ b/cmd/system-probe/api/module/common.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package module is the scaffolding for a system-probe module and the loader used upon start package module import ( diff --git a/cmd/system-probe/command/command.go b/cmd/system-probe/command/command.go index 6206e134f2e4a2..2dec9a443b4afd 100644 --- a/cmd/system-probe/command/command.go +++ b/cmd/system-probe/command/command.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package command contains utilities for creating system-probe commands package command import ( diff --git a/cmd/system-probe/common/common.go b/cmd/system-probe/common/common.go index ef66dd5bfb3da3..60e55c9e71896a 100644 --- a/cmd/system-probe/common/common.go +++ b/cmd/system-probe/common/common.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package common is global variables for the system-probe process package common import ( diff --git a/cmd/system-probe/common/common_nix.go b/cmd/system-probe/common/common_nix.go index 77aa9cae82c8b5..bb50ddef3b86c2 100644 --- a/cmd/system-probe/common/common_nix.go +++ b/cmd/system-probe/common/common_nix.go @@ -8,6 +8,6 @@ package common const ( - //nolint:revive // TODO(EBPF) Fix revive linter + // DefaultLogFile is the default path to the system-probe log file DefaultLogFile = "/var/log/datadog/system-probe.log" ) diff --git a/cmd/system-probe/common/common_unsupported.go b/cmd/system-probe/common/common_unsupported.go index 81f2ac7f2bfec9..024ab6ccb3cfe7 100644 --- a/cmd/system-probe/common/common_unsupported.go +++ b/cmd/system-probe/common/common_unsupported.go @@ -8,6 +8,6 @@ package common const ( - //nolint:revive // TODO(EBPF) Fix revive linter + // DefaultLogFile is the default path to the system-probe log file DefaultLogFile = "" ) diff --git a/cmd/system-probe/common/common_windows.go b/cmd/system-probe/common/common_windows.go index 7da3eeef06321a..a88a8001241bc7 100644 --- a/cmd/system-probe/common/common_windows.go +++ b/cmd/system-probe/common/common_windows.go @@ -6,6 +6,6 @@ package common const ( - //nolint:revive // TODO(EBPF) Fix revive linter + // DefaultLogFile is the default path to the system-probe log file DefaultLogFile = "c:\\programdata\\datadog\\logs\\system-probe.log" ) diff --git a/cmd/system-probe/config/adjust.go b/cmd/system-probe/config/adjust.go index d2b8e30a01a1bc..1856aa69a791e3 100644 --- a/cmd/system-probe/config/adjust.go +++ b/cmd/system-probe/config/adjust.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package config contains the general configuration for system-probe package config import ( diff --git a/cmd/system-probe/config/config.go b/cmd/system-probe/config/config.go index 1d8830400a72c8..100bbd1ae31ad1 100644 --- a/cmd/system-probe/config/config.go +++ b/cmd/system-probe/config/config.go @@ -95,18 +95,16 @@ func newSysprobeConfig(configPath string) (*Config, error) { // load the configuration _, err := aconfig.LoadCustom(aconfig.SystemProbe, "system-probe", optional.NewNoneOption[secrets.Component](), aconfig.Datadog.GetEnvVars()) if err != nil { - var e viper.ConfigFileNotFoundError - //nolint:revive // TODO(EBPF) Fix revive linter - if errors.As(err, &e) || errors.Is(err, os.ErrNotExist) { - // do nothing, we can ignore a missing system-probe.yaml config file - } else if errors.Is(err, fs.ErrPermission) { + if errors.Is(err, fs.ErrPermission) { // special-case permission-denied with a clearer error message if runtime.GOOS == "windows" { return nil, fmt.Errorf(`cannot access the system-probe config file (%w); try running the command in an Administrator shell"`, err) - } else { //nolint:revive // TODO(EBPF) Fix revive linter - return nil, fmt.Errorf("cannot access the system-probe config file (%w); try running the command under the same user as the Datadog Agent", err) } - } else { + return nil, fmt.Errorf("cannot access the system-probe config file (%w); try running the command under the same user as the Datadog Agent", err) + } + + var e viper.ConfigFileNotFoundError + if !errors.As(err, &e) && !errors.Is(err, os.ErrNotExist) { return nil, fmt.Errorf("unable to load system-probe config file: %w", err) } } diff --git a/cmd/system-probe/config/config_test.go b/cmd/system-probe/config/config_test.go index 051bb611c20a8a..e50baaf38f9ea8 100644 --- a/cmd/system-probe/config/config_test.go +++ b/cmd/system-probe/config/config_test.go @@ -32,34 +32,33 @@ func TestEventMonitor(t *testing.T) { newConfig(t) for i, tc := range []struct { - //nolint:revive // TODO(EBPF) Fix revive linter - cws, fim, process_events, network_events bool - enabled bool + cws, fim, processEvents, networkEvents bool + enabled bool }{ - {cws: false, fim: false, process_events: false, network_events: false, enabled: false}, - {cws: false, fim: false, process_events: true, network_events: false, enabled: true}, - {cws: false, fim: true, process_events: false, network_events: false, enabled: true}, - {cws: false, fim: true, process_events: true, network_events: false, enabled: true}, - {cws: true, fim: false, process_events: false, network_events: false, enabled: true}, - {cws: true, fim: false, process_events: true, network_events: false, enabled: true}, - {cws: true, fim: true, process_events: false, network_events: false, enabled: true}, - {cws: true, fim: true, process_events: true, network_events: false, enabled: true}, - {cws: false, fim: false, process_events: false, network_events: true, enabled: true}, - {cws: false, fim: false, process_events: true, network_events: true, enabled: true}, - {cws: false, fim: true, process_events: false, network_events: true, enabled: true}, - {cws: false, fim: true, process_events: true, network_events: true, enabled: true}, - {cws: true, fim: false, process_events: false, network_events: true, enabled: true}, - {cws: true, fim: false, process_events: true, network_events: true, enabled: true}, - {cws: true, fim: true, process_events: false, network_events: true, enabled: true}, - {cws: true, fim: true, process_events: true, network_events: true, enabled: true}, + {cws: false, fim: false, processEvents: false, networkEvents: false, enabled: false}, + {cws: false, fim: false, processEvents: true, networkEvents: false, enabled: true}, + {cws: false, fim: true, processEvents: false, networkEvents: false, enabled: true}, + {cws: false, fim: true, processEvents: true, networkEvents: false, enabled: true}, + {cws: true, fim: false, processEvents: false, networkEvents: false, enabled: true}, + {cws: true, fim: false, processEvents: true, networkEvents: false, enabled: true}, + {cws: true, fim: true, processEvents: false, networkEvents: false, enabled: true}, + {cws: true, fim: true, processEvents: true, networkEvents: false, enabled: true}, + {cws: false, fim: false, processEvents: false, networkEvents: true, enabled: true}, + {cws: false, fim: false, processEvents: true, networkEvents: true, enabled: true}, + {cws: false, fim: true, processEvents: false, networkEvents: true, enabled: true}, + {cws: false, fim: true, processEvents: true, networkEvents: true, enabled: true}, + {cws: true, fim: false, processEvents: false, networkEvents: true, enabled: true}, + {cws: true, fim: false, processEvents: true, networkEvents: true, enabled: true}, + {cws: true, fim: true, processEvents: false, networkEvents: true, enabled: true}, + {cws: true, fim: true, processEvents: true, networkEvents: true, enabled: true}, } { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Logf("%+v\n", tc) t.Setenv("DD_RUNTIME_SECURITY_CONFIG_ENABLED", strconv.FormatBool(tc.cws)) t.Setenv("DD_RUNTIME_SECURITY_CONFIG_FIM_ENABLED", strconv.FormatBool(tc.fim)) - t.Setenv("DD_SYSTEM_PROBE_EVENT_MONITORING_PROCESS_ENABLED", strconv.FormatBool(tc.process_events)) - t.Setenv("DD_SYSTEM_PROBE_EVENT_MONITORING_NETWORK_PROCESS_ENABLED", strconv.FormatBool(tc.network_events)) - t.Setenv("DD_SYSTEM_PROBE_NETWORK_ENABLED", strconv.FormatBool(tc.network_events)) + t.Setenv("DD_SYSTEM_PROBE_EVENT_MONITORING_PROCESS_ENABLED", strconv.FormatBool(tc.processEvents)) + t.Setenv("DD_SYSTEM_PROBE_EVENT_MONITORING_NETWORK_PROCESS_ENABLED", strconv.FormatBool(tc.networkEvents)) + t.Setenv("DD_SYSTEM_PROBE_NETWORK_ENABLED", strconv.FormatBool(tc.networkEvents)) cfg, err := New("/doesnotexist") t.Logf("%+v\n", cfg) diff --git a/cmd/system-probe/main.go b/cmd/system-probe/main.go index 20084530887fc7..5a40451d37a805 100644 --- a/cmd/system-probe/main.go +++ b/cmd/system-probe/main.go @@ -5,7 +5,6 @@ //go:build linux -//nolint:revive // TODO(EBPF) Fix revive linter package main import ( diff --git a/cmd/system-probe/main_common.go b/cmd/system-probe/main_common.go index 8fa26b5d90b085..b27a87970430ca 100644 --- a/cmd/system-probe/main_common.go +++ b/cmd/system-probe/main_common.go @@ -5,7 +5,7 @@ //go:build linux || windows -//nolint:revive // TODO(EBPF) Fix revive linter +// Package main is the entrypoint for system-probe process package main import ( diff --git a/cmd/system-probe/modules/all_linux.go b/cmd/system-probe/modules/all_linux.go index de2f5429c9427b..6b34b702dec73e 100644 --- a/cmd/system-probe/modules/all_linux.go +++ b/cmd/system-probe/modules/all_linux.go @@ -5,7 +5,7 @@ //go:build linux -//nolint:revive // TODO(EBPF) Fix revive linter +// Package modules is all the module definitions for system-probe package modules import ( @@ -27,7 +27,6 @@ var All = []module.Factory{ ComplianceModule, } -//nolint:revive // TODO(EBPF) Fix revive linter -func inactivityEventLog(duration time.Duration) { +func inactivityEventLog(_ time.Duration) { } diff --git a/cmd/system-probe/modules/all_unsupported.go b/cmd/system-probe/modules/all_unsupported.go index 22eb6ca73f7416..59e3fd37f5edd0 100644 --- a/cmd/system-probe/modules/all_unsupported.go +++ b/cmd/system-probe/modules/all_unsupported.go @@ -5,7 +5,7 @@ //go:build !linux && !windows -//nolint:revive // TODO(EBPF) Fix revive linter +// Package modules is all the module definitions for system-probe package modules import "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" diff --git a/cmd/system-probe/modules/all_windows.go b/cmd/system-probe/modules/all_windows.go index fdf9b4beba52d4..554c4a3eda9392 100644 --- a/cmd/system-probe/modules/all_windows.go +++ b/cmd/system-probe/modules/all_windows.go @@ -5,7 +5,7 @@ //go:build windows -//nolint:revive // TODO(EBPF) Fix revive linter +// Package modules is all the module definitions for system-probe package modules import ( diff --git a/cmd/system-probe/modules/dynamic_instrumentation.go b/cmd/system-probe/modules/dynamic_instrumentation.go index bdb089772ff996..343c89e269bf34 100644 --- a/cmd/system-probe/modules/dynamic_instrumentation.go +++ b/cmd/system-probe/modules/dynamic_instrumentation.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/ebpf" ) -//nolint:revive // TODO(EBPF) Fix revive linter +// DynamicInstrumentation is the dynamic instrumentation module factory var DynamicInstrumentation = module.Factory{ Name: config.DynamicInstrumentationModule, ConfigNamespaces: []string{}, diff --git a/cmd/system-probe/modules/language_detection.go b/cmd/system-probe/modules/language_detection.go index 9e6f8ef33c85c2..6bbc6ed7d96f54 100644 --- a/cmd/system-probe/modules/language_detection.go +++ b/cmd/system-probe/modules/language_detection.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -//nolint:revive // TODO(EBPF) Fix revive linter +// LanguageDetectionModule is the language detection module factory var LanguageDetectionModule = module.Factory{ Name: config.LanguageDetectionModule, ConfigNamespaces: []string{"language_detection"}, diff --git a/cmd/system-probe/subcommands/debug/command.go b/cmd/system-probe/subcommands/debug/command.go index 963801f8c3f524..763ad53334c871 100644 --- a/cmd/system-probe/subcommands/debug/command.go +++ b/cmd/system-probe/subcommands/debug/command.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package debug is the debug system-probe subcommand package debug import ( diff --git a/cmd/system-probe/subcommands/modrestart/command.go b/cmd/system-probe/subcommands/modrestart/command.go index b63730250365e9..f94c7f37245672 100644 --- a/cmd/system-probe/subcommands/modrestart/command.go +++ b/cmd/system-probe/subcommands/modrestart/command.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package modrestart is the module-restart system-probe subcommand package modrestart import ( diff --git a/cmd/system-probe/subcommands/run/command.go b/cmd/system-probe/subcommands/run/command.go index 0e37ba0dce1c24..37588c8f4ba741 100644 --- a/cmd/system-probe/subcommands/run/command.go +++ b/cmd/system-probe/subcommands/run/command.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package run is the run system-probe subcommand package run import ( @@ -11,9 +11,7 @@ import ( "errors" "fmt" "net/http" - - //nolint:revive // TODO(EBPF) Fix revive linter - _ "net/http/pprof" + _ "net/http/pprof" // activate pprof profiling "os" "os/signal" "os/user" @@ -132,14 +130,14 @@ func run(log log.Component, _ config.Component, statsd compstatsd.Component, tel sigpipeCh := make(chan os.Signal, 1) signal.Notify(sigpipeCh, syscall.SIGPIPE) go func() { - //nolint:revive // TODO(EBPF) Fix revive linter + //nolint:revive for range sigpipeCh { - // do nothing + // intentionally drain channel } }() if err := startSystemProbe(cliParams, log, statsd, telemetry, sysprobeconfig, rcclient); err != nil { - if err == ErrNotEnabled { + if errors.Is(err, ErrNotEnabled) { // A sleep is necessary to ensure that supervisor registers this process as "STARTED" // If the exit is "too quick", we enter a BACKOFF->FATAL loop even though this is an expected exit // http://supervisord.org/subprocess.html#process-states diff --git a/cmd/system-probe/subcommands/subcommands.go b/cmd/system-probe/subcommands/subcommands.go index ae9b8662626d7b..dd2cdaa3b27d1a 100644 --- a/cmd/system-probe/subcommands/subcommands.go +++ b/cmd/system-probe/subcommands/subcommands.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package subcommands contains the subcommands for system-probe package subcommands import ( diff --git a/cmd/system-probe/subcommands/version/command.go b/cmd/system-probe/subcommands/version/command.go index 90508b45430c5c..b2630d9aa383be 100644 --- a/cmd/system-probe/subcommands/version/command.go +++ b/cmd/system-probe/subcommands/version/command.go @@ -14,8 +14,6 @@ import ( ) // Commands returns a slice of subcommands for the 'agent' command. -// -//nolint:revive // TODO(EBPF) Fix revive linter -func Commands(globalParams *command.GlobalParams) []*cobra.Command { +func Commands(_ *command.GlobalParams) []*cobra.Command { return []*cobra.Command{version.MakeCommand("System Probe")} } diff --git a/cmd/system-probe/utils/limiter.go b/cmd/system-probe/utils/limiter.go index 49fc17c552ed2e..6ec6aa71f6e9f5 100644 --- a/cmd/system-probe/utils/limiter.go +++ b/cmd/system-probe/utils/limiter.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2021-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package utils are utilities for system-probe package utils import ( diff --git a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go index 74ac1d26ecaa83..fe1398eb8e8e58 100644 --- a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go +++ b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go @@ -146,10 +146,9 @@ func (k *Probe) attach(collSpec *ebpf.CollectionSpec) (err error) { spec := collSpec.Programs[name] switch prog.Type() { case ebpf.Kprobe: - //nolint:revive // TODO(EBPF) Fix revive linter - const kProbePrefix, kretprobePrefix = "kprobe/", "kretprobe/" - if strings.HasPrefix(spec.SectionName, kProbePrefix) { - attachPoint := spec.SectionName[len(kProbePrefix):] + const kprobePrefix, kretprobePrefix = "kprobe/", "kretprobe/" + if strings.HasPrefix(spec.SectionName, kprobePrefix) { + attachPoint := spec.SectionName[len(kprobePrefix):] l, err := link.Kprobe(attachPoint, prog, &link.KprobeOptions{ TraceFSPrefix: "ddebpfc", }) diff --git a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/prog.go b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/prog.go index 981d99cef0d7e2..26873926fe0b50 100644 --- a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/prog.go +++ b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/prog.go @@ -24,14 +24,14 @@ func ProgObjInfo(fd uint32, info *ProgInfo) error { return err } -//nolint:revive // TODO(EBPF) Fix revive linter +// ObjGetInfoByFdAttr is the attributes for the BPF_OBJ_GET_INFO_BY_FD mode of the bpf syscall type ObjGetInfoByFdAttr struct { BpfFd uint32 InfoLen uint32 Info Pointer } -//nolint:revive // TODO(EBPF) Fix revive linter +// ObjGetInfoByFd implements the BPF_OBJ_GET_INFO_BY_FD mode of the bpf syscall func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error { _, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(unix.BPF_OBJ_GET_INFO_BY_FD), uintptr(unsafe.Pointer(attr)), unsafe.Sizeof(*attr)) if errNo != 0 { @@ -58,30 +58,27 @@ func ProgGetFdByID(attr *ProgGetFdByIDAttr) (uint32, error) { // ProgInfo corresponds to kernel C type `bpf_prog_info` type ProgInfo struct { - Type uint32 - //nolint:revive // TODO(EBPF) Fix revive linter - Id uint32 - Tag [8]uint8 - JitedProgLen uint32 - XlatedProgLen uint32 - JitedProgInsns uint64 - XlatedProgInsns Pointer - LoadTime uint64 - //nolint:revive // TODO(EBPF) Fix revive linter - CreatedByUid uint32 - NrMapIds uint32 - MapIds Pointer - Name ObjName - Ifindex uint32 - _ [4]byte /* unsupported bitfield */ - NetnsDev uint64 - NetnsIno uint64 - NrJitedKsyms uint32 - NrJitedFuncLens uint32 - JitedKsyms uint64 - JitedFuncLens uint64 - //nolint:revive // TODO(EBPF) Fix revive linter - BtfId BTFID + Type uint32 + ID uint32 + Tag [8]uint8 + JitedProgLen uint32 + XlatedProgLen uint32 + JitedProgInsns uint64 + XlatedProgInsns Pointer + LoadTime uint64 + CreatedByUID uint32 + NrMapIds uint32 + MapIds Pointer + Name ObjName + Ifindex uint32 + _ [4]byte /* unsupported bitfield */ + NetnsDev uint64 + NetnsIno uint64 + NrJitedKsyms uint32 + NrJitedFuncLens uint32 + JitedKsyms uint64 + JitedFuncLens uint64 + BtfID BTFID FuncInfoRecSize uint32 FuncInfo uint64 NrFuncInfo uint32 diff --git a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/utils.go b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/utils.go index 672354b408916e..36b5a30bc264c7 100644 --- a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/utils.go +++ b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/utils.go @@ -42,13 +42,3 @@ func pageAlign[T constraints.Integer](x T) T { func align[T constraints.Integer](x, a T) T { return (x + (a - 1)) & ^(a - 1) } - -// generic slice deletion at position i without concern for slice order -// -//nolint:unused // TODO(EBPF) Fix unused linter -func deleteAtNoOrder[S ~[]E, E any](s S, i int) S { - s[i] = s[len(s)-1] - // use zero value here to ensure no memory leaks - s[len(s)-1] = *new(E) - return s[:len(s)-1] -} diff --git a/pkg/ebpf/bytecode/asset_reader.go b/pkg/ebpf/bytecode/asset_reader.go index a0d8c2080358fd..885f1a60d15ef7 100644 --- a/pkg/ebpf/bytecode/asset_reader.go +++ b/pkg/ebpf/bytecode/asset_reader.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package bytecode contains types and functions for eBPF bytecode package bytecode import ( diff --git a/pkg/ebpf/bytecode/runtime/all_helpers.go b/pkg/ebpf/bytecode/runtime/all_helpers.go index 50fd1858239a10..485e07133b7b73 100644 --- a/pkg/ebpf/bytecode/runtime/all_helpers.go +++ b/pkg/ebpf/bytecode/runtime/all_helpers.go @@ -5,7 +5,7 @@ //go:build linux_bpf -//nolint:revive // TODO(EBPF) Fix revive linter +// Package runtime is for runtime compilation related types and functions package runtime // updated as of Linux v6.0 commit 4fe89d07dcc2804c8b562f6c7896a45643d34b2f diff --git a/pkg/ebpf/bytecode/runtime/asset.go b/pkg/ebpf/bytecode/runtime/asset.go index 998ac217dd4165..2d0812368b5b2f 100644 --- a/pkg/ebpf/bytecode/runtime/asset.go +++ b/pkg/ebpf/bytecode/runtime/asset.go @@ -51,7 +51,7 @@ func (a *asset) Compile(config *ebpf.Config, additionalFlags []string, client st } }() - opts := kernel.KernelHeaderOptions{ + opts := kernel.HeaderOptions{ DownloadEnabled: config.EnableKernelHeaderDownload, Dirs: config.KernelHeadersDirs, DownloadDir: config.KernelHeadersDownloadDir, diff --git a/pkg/ebpf/bytecode/runtime/generated_asset.go b/pkg/ebpf/bytecode/runtime/generated_asset.go index 622a6221fd7389..59f4153741a7e5 100644 --- a/pkg/ebpf/bytecode/runtime/generated_asset.go +++ b/pkg/ebpf/bytecode/runtime/generated_asset.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -//nolint:revive // TODO(EBPF) Fix revive linter +// ConstantFetcher is the generated asset for constant_fetcher.c var ConstantFetcher = newGeneratedAsset("constant_fetcher.c") // generatedAsset represents an asset whose contents will be dynamically generated at runtime @@ -50,7 +50,7 @@ func (a *generatedAsset) Compile(config *ebpf.Config, inputCode string, addition } }() - opts := kernel.KernelHeaderOptions{ + opts := kernel.HeaderOptions{ DownloadEnabled: config.EnableKernelHeaderDownload, Dirs: config.KernelHeadersDirs, DownloadDir: config.KernelHeadersDownloadDir, diff --git a/pkg/ebpf/bytecode/runtime/helpers_test.go b/pkg/ebpf/bytecode/runtime/helpers_test.go index 87bacedc218226..33f6fa044b9b52 100644 --- a/pkg/ebpf/bytecode/runtime/helpers_test.go +++ b/pkg/ebpf/bytecode/runtime/helpers_test.go @@ -30,7 +30,7 @@ func TestGetAvailableHelpers(t *testing.T) { } cfg := ebpf.NewConfig() - opts := kernel.KernelHeaderOptions{ + opts := kernel.HeaderOptions{ DownloadEnabled: cfg.EnableKernelHeaderDownload, Dirs: cfg.KernelHeadersDirs, DownloadDir: cfg.KernelHeadersDownloadDir, diff --git a/pkg/ebpf/bytecode/runtime/protected_file.go b/pkg/ebpf/bytecode/runtime/protected_file.go index 555aaf58b550df..a59224f09cf191 100644 --- a/pkg/ebpf/bytecode/runtime/protected_file.go +++ b/pkg/ebpf/bytecode/runtime/protected_file.go @@ -13,13 +13,12 @@ import ( "os" "path/filepath" - "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/justincormack/go-memfd" + + "github.com/DataDog/datadog-agent/pkg/util/log" ) -// This represent a symlink to a sealed ram-backed file -// -//nolint:revive // TODO(EBPF) Fix revive linter +// ProtectedFile represents a symlink to a sealed ram-backed file type ProtectedFile interface { Close() error Reader() io.Reader @@ -31,9 +30,7 @@ type ramBackedFile struct { file *memfd.Memfd } -// This function returns a sealed ram backed file -// -//nolint:revive // TODO(EBPF) Fix revive linter +// NewProtectedFile returns a sealed ram backed file func NewProtectedFile(name, dir string, source io.Reader) (ProtectedFile, error) { var err error @@ -66,8 +63,7 @@ func NewProtectedFile(name, dir string, source io.Reader) (ProtectedFile, error) return nil, fmt.Errorf("failed to create symlink %s from target %s: %w", tmpFile, target, err) } - //nolint:staticcheck // TODO(EBPF) Fix staticcheck linter - if _, err := memfdFile.Seek(0, os.SEEK_SET); err != nil { + if _, err := memfdFile.Seek(0, io.SeekStart); err != nil { return nil, fmt.Errorf("failed to reset cursor: %w", err) } @@ -95,8 +91,7 @@ func setupSourceInfoFile(source io.Reader, path string) error { func (m *ramBackedFile) Close() error { os.Remove(m.symlink) - //nolint:staticcheck // TODO(EBPF) Fix staticcheck linter - if _, err := m.file.Seek(0, os.SEEK_SET); err != nil { + if _, err := m.file.Seek(0, io.SeekStart); err != nil { log.Debug(err) } if err := setupSourceInfoFile(m.file, m.symlink); err != nil { diff --git a/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go b/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go index 9f5b74f231c0a9..58b4a961ec2ceb 100644 --- a/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go +++ b/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -//nolint:revive // TODO(EBPF) Fix revive linter +// CompiledOutput is the interface for a compiled output from runtime compilation type CompiledOutput interface { io.Reader io.ReaderAt diff --git a/pkg/ebpf/cgo/genpost.go b/pkg/ebpf/cgo/genpost.go index 6186df1e1760b2..e21570299ab501 100644 --- a/pkg/ebpf/cgo/genpost.go +++ b/pkg/ebpf/cgo/genpost.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package main is the program to fixup cgo generated types package main import ( diff --git a/pkg/ebpf/common.go b/pkg/ebpf/common.go index 7a0f9071ac28a6..cc6c913eaa83e0 100644 --- a/pkg/ebpf/common.go +++ b/pkg/ebpf/common.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package ebpf contains general eBPF related types and functions package ebpf import ( diff --git a/pkg/ebpf/compiler/compiler.go b/pkg/ebpf/compiler/compiler.go index 46f2e5dec6ed3d..55b88d342d0d41 100644 --- a/pkg/ebpf/compiler/compiler.go +++ b/pkg/ebpf/compiler/compiler.go @@ -5,7 +5,7 @@ //go:build linux_bpf -//nolint:revive // TODO(EBPF) Fix revive linter +// Package compiler is the runtime compiler for eBPF package compiler import ( @@ -92,14 +92,14 @@ func CompileToObjectFile(inFile, outputFile string, cflags []string, headerDirs return nil } -//nolint:revive // TODO(EBPF) Fix revive linter +// WithStdin assigns the provided io.Reader as Stdin for the command func WithStdin(in io.Reader) func(*exec.Cmd) { return func(c *exec.Cmd) { c.Stdin = in } } -//nolint:revive // TODO(EBPF) Fix revive linter +// WithStdout assigns the provided io.Writer as Stdout for the command func WithStdout(out io.Writer) func(*exec.Cmd) { return func(c *exec.Cmd) { c.Stdout = out diff --git a/pkg/ebpf/debugfs_stat_collector_linux.go b/pkg/ebpf/debugfs_stat_collector_linux.go index 02684e60298639..b611b44479540c 100644 --- a/pkg/ebpf/debugfs_stat_collector_linux.go +++ b/pkg/ebpf/debugfs_stat_collector_linux.go @@ -19,8 +19,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -//nolint:revive // TODO(EBPF) Fix revive linter -const kProbeTelemetryName = "ebpf__probes" +const kprobeTelemetryName = "ebpf__probes" type profileType byte @@ -82,8 +81,8 @@ func NewDebugFsStatCollector() prometheus.Collector { return &NoopDebugFsStatCollector{} } return &DebugFsStatCollector{ - hits: prometheus.NewDesc(kProbeTelemetryName+"__hits", "Counter tracking number of probe hits", []string{"probe_name", "probe_type"}, nil), - misses: prometheus.NewDesc(kProbeTelemetryName+"__misses", "Counter tracking number of probe misses", []string{"probe_name", "probe_type"}, nil), + hits: prometheus.NewDesc(kprobeTelemetryName+"__hits", "Counter tracking number of probe hits", []string{"probe_name", "probe_type"}, nil), + misses: prometheus.NewDesc(kprobeTelemetryName+"__misses", "Counter tracking number of probe misses", []string{"probe_name", "probe_type"}, nil), lastProbeStats: make(map[eventKey]int), tracefsRoot: root, } diff --git a/pkg/ebpf/ebpftest/bpfdebug_linux.go b/pkg/ebpf/ebpftest/bpfdebug_linux.go index 877a870a180cf7..9aa7674db03429 100644 --- a/pkg/ebpf/ebpftest/bpfdebug_linux.go +++ b/pkg/ebpf/ebpftest/bpfdebug_linux.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package ebpftest is utilities for tests against eBPF package ebpftest import ( diff --git a/pkg/ebpf/ebpftest/bpfdebug_unsupported.go b/pkg/ebpf/ebpftest/bpfdebug_unsupported.go index e8f77b46ed6e5c..74097c8878c239 100644 --- a/pkg/ebpf/ebpftest/bpfdebug_unsupported.go +++ b/pkg/ebpf/ebpftest/bpfdebug_unsupported.go @@ -5,7 +5,7 @@ //go:build !linux -//nolint:revive // TODO(EBPF) Fix revive linter +// Package ebpftest is utilities for tests against eBPF package ebpftest import "testing" diff --git a/pkg/ebpf/ebpftest/buildmode.go b/pkg/ebpf/ebpftest/buildmode.go index f4b5c46ef30f8b..4a2a990dfc2551 100644 --- a/pkg/ebpf/ebpftest/buildmode.go +++ b/pkg/ebpf/ebpftest/buildmode.go @@ -24,7 +24,7 @@ func init() { Fentry = fentry{} } -//nolint:revive // TODO(EBPF) Fix revive linter +// BuildMode is an eBPF build mode type BuildMode interface { fmt.Stringer Env() map[string]string diff --git a/pkg/ebpf/ebpftest/buildmode_linux.go b/pkg/ebpf/ebpftest/buildmode_linux.go index 761c1eaddf8be6..e68bcf4ac0192a 100644 --- a/pkg/ebpf/ebpftest/buildmode_linux.go +++ b/pkg/ebpf/ebpftest/buildmode_linux.go @@ -22,7 +22,7 @@ func init() { hostinfo, _ = host.Info() } -//nolint:revive // TODO(EBPF) Fix revive linter +// SupportedBuildModes returns the build modes supported on the current host func SupportedBuildModes() []BuildMode { modes := []BuildMode{Prebuilt, RuntimeCompiled, CORE} if os.Getenv("TEST_FENTRY_OVERRIDE") == "true" || (runtime.GOARCH == "amd64" && (hostinfo.Platform == "amazon" || hostinfo.Platform == "amzn") && kv.Major() == 5 && kv.Minor() == 10) { @@ -31,14 +31,14 @@ func SupportedBuildModes() []BuildMode { return modes } -//nolint:revive // TODO(EBPF) Fix revive linter +// TestBuildModes runs the test under all the provided build modes func TestBuildModes(t *testing.T, modes []BuildMode, name string, fn func(t *testing.T)) { for _, mode := range modes { TestBuildMode(t, mode, name, fn) } } -//nolint:revive // TODO(EBPF) Fix revive linter +// TestBuildMode runs the test under the provided build mode func TestBuildMode(t *testing.T, mode BuildMode, name string, fn func(t *testing.T)) { t.Run(mode.String(), func(t *testing.T) { for k, v := range mode.Env() { diff --git a/pkg/ebpf/ebpftest/buildmode_windows.go b/pkg/ebpf/ebpftest/buildmode_windows.go index 770a28bcbd26ad..7a0da5e343c161 100644 --- a/pkg/ebpf/ebpftest/buildmode_windows.go +++ b/pkg/ebpf/ebpftest/buildmode_windows.go @@ -7,18 +7,18 @@ package ebpftest import "testing" -//nolint:revive // TODO(EBPF) Fix revive linter +// SupportedBuildModes returns the build modes supported on the current host func SupportedBuildModes() []BuildMode { return []BuildMode{Prebuilt} } -//nolint:revive // TODO(EBPF) Fix revive linter +// TestBuildModes runs the test under all the provided build modes func TestBuildModes(t *testing.T, modes []BuildMode, name string, fn func(t *testing.T)) { //nolint:revive // TODO fix revive unused-parameter // ignore provided modes and only use prebuilt TestBuildMode(t, Prebuilt, name, fn) } -//nolint:revive // TODO(EBPF) Fix revive linter +// TestBuildMode runs the test under the provided build mode func TestBuildMode(t *testing.T, mode BuildMode, name string, fn func(t *testing.T)) { if mode != Prebuilt { t.Skipf("unsupported build mode %s", mode) diff --git a/pkg/ebpf/ebpftest/log.go b/pkg/ebpf/ebpftest/log.go index 7eb02542f3bc14..33ceef7aa3f9ba 100644 --- a/pkg/ebpf/ebpftest/log.go +++ b/pkg/ebpf/ebpftest/log.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -//nolint:revive // TODO(EBPF) Fix revive linter +// LogLevel sets the logger level for this test only func LogLevel(t testing.TB, level string) { t.Cleanup(func() { log.SetupLogger(seelog.Default, "off") diff --git a/pkg/ebpf/perf.go b/pkg/ebpf/perf.go index 3254c33a0b3443..cf362331a32cfc 100644 --- a/pkg/ebpf/perf.go +++ b/pkg/ebpf/perf.go @@ -55,9 +55,7 @@ func NewPerfHandler(dataChannelSize int) *PerfHandler { } // LostHandler is the callback intended to be used when configuring PerfMapOptions -// -//nolint:revive // TODO(EBPF) Fix revive linter -func (c *PerfHandler) LostHandler(CPU int, lostCount uint64, perfMap *manager.PerfMap, manager *manager.Manager) { +func (c *PerfHandler) LostHandler(_ int, lostCount uint64, _ *manager.PerfMap, _ *manager.Manager) { if c.closed { return } @@ -65,9 +63,7 @@ func (c *PerfHandler) LostHandler(CPU int, lostCount uint64, perfMap *manager.Pe } // RecordHandler is the callback intended to be used when configuring PerfMapOptions -// -//nolint:revive // TODO(EBPF) Fix revive linter -func (c *PerfHandler) RecordHandler(record *perf.Record, perfMap *manager.PerfMap, manager *manager.Manager) { +func (c *PerfHandler) RecordHandler(record *perf.Record, _ *manager.PerfMap, _ *manager.Manager) { if c.closed { return } diff --git a/pkg/network/telemetry/stat_counter_wrapper.go b/pkg/network/telemetry/stat_counter_wrapper.go index 5a6be346e76915..de1e4f948dfee4 100644 --- a/pkg/network/telemetry/stat_counter_wrapper.go +++ b/pkg/network/telemetry/stat_counter_wrapper.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package telemetry provides types and functions for internal telemetry package telemetry import ( @@ -19,29 +19,30 @@ type StatCounterWrapper struct { counter telemetry.Counter } -//nolint:revive // TODO(EBPF) Fix revive linter +// Inc increments the counter with the given tags value. func (sgw *StatCounterWrapper) Inc(tags ...string) { sgw.stat.Inc() sgw.counter.Inc(tags...) } -//nolint:revive // TODO(EBPF) Fix revive linter +// Delete deletes the value for the counter with the given tags value. func (sgw *StatCounterWrapper) Delete() { sgw.stat.Store(0) sgw.counter.Delete() } -//nolint:revive // TODO(EBPF) Fix revive linter +// Add adds the given value to the counter with the given tags value. func (sgw *StatCounterWrapper) Add(v int64, tags ...string) { sgw.stat.Add(v) sgw.counter.Add(float64(v), tags...) } -//nolint:revive // TODO(EBPF) Fix revive linter +// Load atomically loads the wrapped value. func (sgw *StatCounterWrapper) Load() int64 { return sgw.stat.Load() } +// NewStatCounterWrapper returns a new StatCounterWrapper func NewStatCounterWrapper(subsystem string, statName string, tags []string, description string) *StatCounterWrapper { return &StatCounterWrapper{ stat: atomic.NewInt64(0), diff --git a/pkg/network/telemetry/stat_gauge_wrapper.go b/pkg/network/telemetry/stat_gauge_wrapper.go index 746de999ec96f0..3c0ae4db4254ff 100644 --- a/pkg/network/telemetry/stat_gauge_wrapper.go +++ b/pkg/network/telemetry/stat_gauge_wrapper.go @@ -18,36 +18,36 @@ type StatGaugeWrapper struct { gauge telemetry.Gauge } -//nolint:revive // TODO(EBPF) Fix revive linter +// Inc increments the Gauge value. func (sgw *StatGaugeWrapper) Inc() { sgw.stat.Inc() sgw.gauge.Inc() } -//nolint:revive // TODO(EBPF) Fix revive linter +// Dec decrements the Gauge value. func (sgw *StatGaugeWrapper) Dec() { sgw.stat.Dec() sgw.gauge.Dec() } -//nolint:revive // TODO(EBPF) Fix revive linter +// Add adds the value to the Gauge value. func (sgw *StatGaugeWrapper) Add(v int64) { sgw.stat.Add(v) sgw.gauge.Add(float64(v)) } -//nolint:revive // TODO(EBPF) Fix revive linter +// Set stores the value for the given tags. func (sgw *StatGaugeWrapper) Set(v int64) { sgw.stat.Store(v) sgw.gauge.Set(float64(v)) } -//nolint:revive // TODO(EBPF) Fix revive linter +// Load atomically loads the wrapped value. func (sgw *StatGaugeWrapper) Load() int64 { return sgw.stat.Load() } -//nolint:revive // TODO(EBPF) Fix revive linter +// NewStatGaugeWrapper returns a new StatGaugeWrapper func NewStatGaugeWrapper(subsystem string, statName string, tags []string, description string) *StatGaugeWrapper { return &StatGaugeWrapper{ stat: atomic.NewInt64(0), diff --git a/pkg/util/kernel/arch.go b/pkg/util/kernel/arch.go index 9a9b724a79b8ab..e7b6558accbe87 100644 --- a/pkg/util/kernel/arch.go +++ b/pkg/util/kernel/arch.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(EBPF) Fix revive linter +// Package kernel is utilities for the Linux kernel package kernel import "runtime" diff --git a/pkg/util/kernel/download_headers_test.go b/pkg/util/kernel/download_headers_test.go index 81ae433d88a29c..239306544cf1a1 100644 --- a/pkg/util/kernel/download_headers_test.go +++ b/pkg/util/kernel/download_headers_test.go @@ -102,8 +102,7 @@ var targets = map[string]TargetSetup{ }, } -//nolint:revive // TODO(EBPF) Fix revive linter -func setup(target types.Target, repos []string, dname string) error { +func setup(_ types.Target, repos []string, dname string) error { // Make source-list.d sources := fmt.Sprintf(reposSourceDir, dname) if err := os.MkdirAll(sources, 0744); err != nil { diff --git a/pkg/util/kernel/find_headers.go b/pkg/util/kernel/find_headers.go index 64ea8bb44e279c..1937e4fadcb8d4 100644 --- a/pkg/util/kernel/find_headers.go +++ b/pkg/util/kernel/find_headers.go @@ -78,8 +78,8 @@ type headerProvider struct { kernelHeaders []string } -//nolint:revive // TODO(EBPF) Fix revive linter -type KernelHeaderOptions struct { +// HeaderOptions are options for the kernel header download process +type HeaderOptions struct { DownloadEnabled bool Dirs []string DownloadDir string @@ -89,7 +89,7 @@ type KernelHeaderOptions struct { ZypperReposDir string } -func initProvider(opts KernelHeaderOptions) { +func initProvider(opts HeaderOptions) { HeaderProvider = &headerProvider{ downloadEnabled: opts.DownloadEnabled, headerDirs: opts.Dirs, @@ -114,7 +114,7 @@ func initProvider(opts KernelHeaderOptions) { // Any subsequent calls to GetKernelHeaders will return the result of the first call. This is because // kernel header downloading can be a resource intensive process, so we don't want to retry it an unlimited // number of times. -func GetKernelHeaders(opts KernelHeaderOptions, client statsd.ClientInterface) []string { +func GetKernelHeaders(opts HeaderOptions, client statsd.ClientInterface) []string { providerMu.Lock() defer providerMu.Unlock() diff --git a/pkg/util/kernel/find_headers_test.go b/pkg/util/kernel/find_headers_test.go index 9529bdad16679c..9de3abd0684456 100644 --- a/pkg/util/kernel/find_headers_test.go +++ b/pkg/util/kernel/find_headers_test.go @@ -20,7 +20,7 @@ func TestGetKernelHeaders(t *testing.T) { t.Skip("set INTEGRATION environment variable to run") } - opts := KernelHeaderOptions{} + opts := HeaderOptions{} dirs := GetKernelHeaders(opts, nil) assert.NotZero(t, len(dirs), "expected to find header directories") t.Log(dirs) diff --git a/pkg/util/kernel/fs_nolinux.go b/pkg/util/kernel/fs_nolinux.go index f2e710276ccd07..244924539ea142 100644 --- a/pkg/util/kernel/fs_nolinux.go +++ b/pkg/util/kernel/fs_nolinux.go @@ -9,12 +9,12 @@ package kernel import "github.com/DataDog/datadog-agent/pkg/util/funcs" -//nolint:revive // TODO(EBPF) Fix revive linter +// ProcFSRoot is the path to procfs var ProcFSRoot = funcs.MemoizeNoError(func() string { return "" }) -//nolint:revive // TODO(EBPF) Fix revive linter +// SysFSRoot is the path to sysfs var SysFSRoot = funcs.MemoizeNoError(func() string { return "" }) diff --git a/test/new-e2e/scenarios/system-probe/main.go b/test/new-e2e/scenarios/system-probe/main.go index 5d33c87a901119..2ac9154d456e1f 100644 --- a/test/new-e2e/scenarios/system-probe/main.go +++ b/test/new-e2e/scenarios/system-probe/main.go @@ -5,7 +5,7 @@ //go:build !windows -//nolint:revive // TODO(EBPF) Fix revive linter +// Package main is the entrypoint for the system-probe e2e testing scenario package main import ( @@ -18,10 +18,9 @@ import ( systemProbe "github.com/DataDog/datadog-agent/test/new-e2e/system-probe" ) -var DD_AGENT_TESTING_DIR = os.Getenv("DD_AGENT_TESTING_DIR") var defaultVMConfigPath = filepath.Join(".", "system-probe", "config", "vmconfig.json") -func run(envName, x86InstanceType, armInstanceType string, destroy bool, opts *systemProbe.SystemProbeEnvOpts) error { +func run(envName, x86InstanceType, armInstanceType string, destroy bool, opts *systemProbe.EnvOpts) error { if destroy { return systemProbe.Destroy(envName) } @@ -49,7 +48,7 @@ func main() { sshKeyFile := flag.String("ssh-key-path", "", "path of private ssh key for ec2 instances") sshKeyName := flag.String("ssh-key-name", "", "name of ssh key pair to use for ec2 instances") infraEnv := flag.String("infra-env", "", "name of infra env to use") - dependenciesDirectoryPtr := flag.String("dependencies-dir", DD_AGENT_TESTING_DIR, "directory where dependencies package is present") + dependenciesDirectoryPtr := flag.String("dependencies-dir", os.Getenv("DD_AGENT_TESTING_DIR"), "directory where dependencies package is present") vmconfigPathPtr := flag.String("vmconfig", defaultVMConfigPath, "vmconfig path") local := flag.Bool("local", false, "is scenario running locally") runAgentPtr := flag.Bool("run-agent", false, "Run datadog agent on the metal instance") @@ -62,7 +61,7 @@ func main() { failOnMissing = true } - opts := systemProbe.SystemProbeEnvOpts{ + opts := systemProbe.EnvOpts{ X86AmiID: *x86AmiIDPtr, ArmAmiID: *armAmiIDPtr, ShutdownPeriod: *shutdownPtr, diff --git a/test/new-e2e/system-probe/system-probe-test-env.go b/test/new-e2e/system-probe/system-probe-test-env.go index ad6b9cff2b0c75..e19cbb749626e4 100644 --- a/test/new-e2e/system-probe/system-probe-test-env.go +++ b/test/new-e2e/system-probe/system-probe-test-env.go @@ -10,8 +10,7 @@ package systemprobe import ( "context" - //nolint:revive // TODO(EBPF) Fix revive linter - _ "embed" + _ "embed" // embed files used in this scenario "fmt" "os" "path/filepath" @@ -31,35 +30,26 @@ import ( ) const ( - //nolint:revive // TODO(EBPF) Fix revive linter - AgentQAPrimaryAZ = "subnet-03061a1647c63c3c3" - //nolint:revive // TODO(EBPF) Fix revive linter - AgentQASecondaryAZ = "subnet-0f1ca3e929eb3fb8b" - //nolint:revive // TODO(EBPF) Fix revive linter - AgentQABackupAZ = "subnet-071213aedb0e1ae54" - - //nolint:revive // TODO(EBPF) Fix revive linter - SandboxPrimaryAz = "subnet-b89e00e2" - //nolint:revive // TODO(EBPF) Fix revive linter - SandboxSecondaryAz = "subnet-8ee8b1c6" - //nolint:revive // TODO(EBPF) Fix revive linter - SandboxBackupAz = "subnet-3f5db45b" - - //nolint:revive // TODO(EBPF) Fix revive linter - DatadogAgentQAEnv = "aws/agent-qa" - //nolint:revive // TODO(EBPF) Fix revive linter - SandboxEnv = "aws/sandbox" - //nolint:revive // TODO(EBPF) Fix revive linter - EC2TagsEnvVar = "RESOURCE_TAGS" + agentQAPrimaryAZ = "subnet-03061a1647c63c3c3" + agentQASecondaryAZ = "subnet-0f1ca3e929eb3fb8b" + agentQABackupAZ = "subnet-071213aedb0e1ae54" + + sandboxPrimaryAz = "subnet-b89e00e2" + sandboxSecondaryAz = "subnet-8ee8b1c6" + sandboxBackupAz = "subnet-3f5db45b" + + datadogAgentQAEnv = "aws/agent-qa" + sandboxEnv = "aws/sandbox" + ec2TagsEnvVar = "RESOURCE_TAGS" ) var availabilityZones = map[string][]string{ - DatadogAgentQAEnv: {AgentQAPrimaryAZ, AgentQASecondaryAZ, AgentQABackupAZ}, - SandboxEnv: {SandboxPrimaryAz, SandboxSecondaryAz, SandboxBackupAz}, + datadogAgentQAEnv: {agentQAPrimaryAZ, agentQASecondaryAZ, agentQABackupAZ}, + sandboxEnv: {sandboxPrimaryAz, sandboxSecondaryAz, sandboxBackupAz}, } -//nolint:revive // TODO(EBPF) Fix revive linter -type SystemProbeEnvOpts struct { +// EnvOpts are the options for the system-probe scenario +type EnvOpts struct { X86AmiID string ArmAmiID string SSHKeyPath string @@ -76,7 +66,7 @@ type SystemProbeEnvOpts struct { AgentVersion string } -//nolint:revive // TODO(EBPF) Fix revive linter +// TestEnv represents options for a particular test environment type TestEnv struct { context context.Context name string @@ -87,17 +77,13 @@ type TestEnv struct { } var ( - //nolint:revive // TODO(EBPF) Fix revive linter - MicroVMsDependenciesPath = filepath.Join("/", "opt", "kernel-version-testing", "dependencies-%s.tar.gz") - //nolint:revive // TODO(EBPF) Fix revive linter - CustomAMIWorkingDir = filepath.Join("/", "home", "kernel-version-testing") + customAMIWorkingDir = filepath.Join("/", "home", "kernel-version-testing") - //nolint:revive // TODO(EBPF) Fix revive linter - CI_PROJECT_DIR = GetEnv("CI_PROJECT_DIR", "/tmp") - sshKeyX86 = GetEnv("LibvirtSSHKeyX86", "/tmp/libvirt_rsa-x86_64") - sshKeyArm = GetEnv("LibvirtSSHKeyARM", "/tmp/libvirt_rsa-arm64") + ciProjectDir = getEnv("CI_PROJECT_DIR", "/tmp") + sshKeyX86 = getEnv("LibvirtSSHKeyX86", "/tmp/libvirt_rsa-x86_64") + sshKeyArm = getEnv("LibvirtSSHKeyARM", "/tmp/libvirt_rsa-arm64") - stackOutputs = filepath.Join(CI_PROJECT_DIR, "stack.output") + stackOutputs = filepath.Join(ciProjectDir, "stack.output") ) func outputsToFile(output auto.OutputMap) error { @@ -119,8 +105,7 @@ func outputsToFile(output auto.OutputMap) error { return f.Sync() } -//nolint:revive // TODO(EBPF) Fix revive linter -func GetEnv(key, fallback string) string { +func getEnv(key, fallback string) string { if value, ok := os.LookupEnv(key); ok { return value } @@ -157,8 +142,8 @@ func getAvailabilityZone(env string, azIndx int) string { return "" } -//nolint:revive // TODO(EBPF) Fix revive linter -func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *SystemProbeEnvOpts) (*TestEnv, error) { +// NewTestEnv creates a new test environment +func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (*TestEnv, error) { var err error var sudoPassword string @@ -178,7 +163,7 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *SystemProbe sudoPassword = "" } - apiKey := GetEnv("DD_API_KEY", "") + apiKey := getEnv("DD_API_KEY", "") if opts.RunAgent && apiKey == "" { return nil, fmt.Errorf("No API Key for datadog-agent provided") } @@ -200,7 +185,7 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *SystemProbe "microvm:provision": auto.ConfigValue{Value: strconv.FormatBool(opts.Provision)}, "microvm:x86AmiID": auto.ConfigValue{Value: opts.X86AmiID}, "microvm:arm64AmiID": auto.ConfigValue{Value: opts.ArmAmiID}, - "microvm:workingDir": auto.ConfigValue{Value: CustomAMIWorkingDir}, + "microvm:workingDir": auto.ConfigValue{Value: customAMIWorkingDir}, "ddagent:deploy": auto.ConfigValue{Value: strconv.FormatBool(opts.RunAgent)}, "ddagent:apiKey": auto.ConfigValue{Value: apiKey, Secret: true}, } @@ -221,7 +206,7 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *SystemProbe config["ddagent:version"] = auto.ConfigValue{Value: opts.AgentVersion} } - if envVars := GetEnv(EC2TagsEnvVar, ""); envVars != "" { + if envVars := getEnv(ec2TagsEnvVar, ""); envVars != "" { config["ddinfra:extraResourcesTags"] = auto.ConfigValue{Value: envVars} } @@ -268,12 +253,12 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *SystemProbe return systemProbeTestEnv, nil } -//nolint:revive // TODO(EBPF) Fix revive linter +// Destroy deletes the stack with the provided name func Destroy(name string) error { return infra.GetStackManager().DeleteStack(context.Background(), name) } -//nolint:revive // TODO(EBPF) Fix revive linter +// RemoveStack removes the stack configuration with the provided name func (env *TestEnv) RemoveStack() error { return infra.GetStackManager().ForceRemoveStackConfiguration(env.context, env.name) } diff --git a/test/new-e2e/system-probe/test-json-review/main.go b/test/new-e2e/system-probe/test-json-review/main.go index 4f884742443b81..afe80ded2aecf2 100644 --- a/test/new-e2e/system-probe/test-json-review/main.go +++ b/test/new-e2e/system-probe/test-json-review/main.go @@ -5,7 +5,7 @@ //go:build linux -//nolint:revive // TODO(EBPF) Fix revive linter +// Package main is the test-json-review tool which reports all failed tests from the test JSON output package main import ( diff --git a/test/new-e2e/system-probe/test-runner/main.go b/test/new-e2e/system-probe/test-runner/main.go index 4b098dddc30316..6099758cb12e68 100644 --- a/test/new-e2e/system-probe/test-runner/main.go +++ b/test/new-e2e/system-probe/test-runner/main.go @@ -5,7 +5,7 @@ //go:build linux -//nolint:revive // TODO(EBPF) Fix revive linter +// Package main is the test-runner tool which runs the system-probe tests package main import ( @@ -136,16 +136,15 @@ func buildCommandArgs(pkg string, xmlpath string, jsonpath string, file string, // concatenateJsons combines all the test json output files into a single file. func concatenateJsons(indir, outdir string) error { - //nolint:revive // TODO(EBPF) Fix revive linter - testJsonFile := filepath.Join(outdir, "out.json") + testJSONFile := filepath.Join(outdir, "out.json") matches, err := glob(indir, `.*\.json`, func(path string) bool { return true }) if err != nil { return fmt.Errorf("json glob: %s", err) } - f, err := os.OpenFile(testJsonFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) + f, err := os.OpenFile(testJSONFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) if err != nil { - return fmt.Errorf("open %s: %s", testJsonFile, err) + return fmt.Errorf("open %s: %s", testJSONFile, err) } defer f.Close() From 8afcd59e794ef67e6c3309a10a9ef658a6b43ae9 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Fri, 15 Dec 2023 22:31:48 +0100 Subject: [PATCH 22/66] obfuscation error false positive (#21594) --- pkg/collector/corechecks/oracle-dbm/statements.go | 5 +++-- ...racle-obuscation-error-false-207d782244887115.yaml | 11 +++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/oracle-obuscation-error-false-207d782244887115.yaml diff --git a/pkg/collector/corechecks/oracle-dbm/statements.go b/pkg/collector/corechecks/oracle-dbm/statements.go index 21b8678f08b710..cfc1cc6b7ad578 100644 --- a/pkg/collector/corechecks/oracle-dbm/statements.go +++ b/pkg/collector/corechecks/oracle-dbm/statements.go @@ -337,12 +337,13 @@ func (c *Check) copyToPreviousMap(newMap map[StatementMetricsKeyDB]StatementMetr } func handlePredicate(predicateType string, dbValue sql.NullString, payloadValue *string, statement StatementMetricsDB, c *Check, o *obfuscate.Obfuscator) { - if dbValue.Valid { + if dbValue.Valid && dbValue.String != "" { obfuscated, err := o.ObfuscateSQLString(dbValue.String) if err == nil { *payloadValue = obfuscated.Query } else { - *payloadValue = fmt.Sprintf("%s obfuscation error", predicateType) + *payloadValue = fmt.Sprintf("%s obfuscation error %d", predicateType, len(dbValue.String)) + //*payloadValue = dbValue.String logEntry := fmt.Sprintf("%s %s for sql_id: %s, plan_hash_value: %d", c.logPrompt, *payloadValue, statement.SQLID, statement.PlanHashValue) if c.config.ExecutionPlans.LogUnobfuscatedPlans { logEntry = fmt.Sprintf("%s unobfuscated filter: %s", logEntry, dbValue.String) diff --git a/releasenotes/notes/oracle-obuscation-error-false-207d782244887115.yaml b/releasenotes/notes/oracle-obuscation-error-false-207d782244887115.yaml new file mode 100644 index 00000000000000..ab9d5980b0b516 --- /dev/null +++ b/releasenotes/notes/oracle-obuscation-error-false-207d782244887115.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixed obfuscation error false positive when the access or filter predicates are empty. From f425dfa882dd9ca8533172c246ea047be1a40799 Mon Sep 17 00:00:00 2001 From: Gustavo Caso Date: Fri, 15 Dec 2023 22:41:10 +0100 Subject: [PATCH 23/66] core/status component implementation (#21536) [ASCII-976] Status component implementation --- comp/README.md | 4 + comp/core/status/component.go | 77 ++ comp/core/status/component_mock.go | 13 + comp/core/status/render_helpers.go | 286 ++++++++ .../statusimpl/common_header_provider.go | 106 +++ .../statusimpl/common_header_provider_test.go | 148 ++++ comp/core/status/statusimpl/status.go | 338 +++++++++ comp/core/status/statusimpl/status_mock.go | 38 + comp/core/status/statusimpl/status_test.go | 671 ++++++++++++++++++ .../status/statusimpl/templates/errors.tmpl | 6 + .../status/statusimpl/templates/html.tmpl | 32 + .../status/statusimpl/templates/text.tmpl | 13 + docs/components/README.md | 2 +- docs/components/defining-components.md | 2 +- pkg/util/flavor/flavor.go | 23 + pkg/util/flavor/flavor_test.go | 29 + 16 files changed, 1786 insertions(+), 2 deletions(-) create mode 100644 comp/core/status/component.go create mode 100644 comp/core/status/component_mock.go create mode 100644 comp/core/status/render_helpers.go create mode 100644 comp/core/status/statusimpl/common_header_provider.go create mode 100644 comp/core/status/statusimpl/common_header_provider_test.go create mode 100644 comp/core/status/statusimpl/status.go create mode 100644 comp/core/status/statusimpl/status_mock.go create mode 100644 comp/core/status/statusimpl/status_test.go create mode 100644 comp/core/status/statusimpl/templates/errors.tmpl create mode 100644 comp/core/status/statusimpl/templates/html.tmpl create mode 100644 comp/core/status/statusimpl/templates/text.tmpl create mode 100644 pkg/util/flavor/flavor_test.go diff --git a/comp/README.md b/comp/README.md index 2b6f5e7f1970ea..7d642373c8e336 100644 --- a/comp/README.md +++ b/comp/README.md @@ -84,6 +84,10 @@ Package log implements a component to handle logging internal to the agent. Package secrets decodes secret values by invoking the configured executable command +### [comp/core/status](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/core/status) + +Package status displays information about the agent. + ### [comp/core/sysprobeconfig](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/core/sysprobeconfig) *Datadog Team*: ebpf-platform diff --git a/comp/core/status/component.go b/comp/core/status/component.go new file mode 100644 index 00000000000000..0a28f5ec4a4247 --- /dev/null +++ b/comp/core/status/component.go @@ -0,0 +1,77 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +// Package status displays information about the agent. +package status + +import ( + "io" + + "go.uber.org/fx" +) + +// team: agent-shared-components + +// CollectorSection stores the collector section name +const CollectorSection string = "collector" + +// Component interface to access the agent status. +type Component interface { + // Returns all the agent status information for the format type + GetStatus(format string, verbose bool) ([]byte, error) + // Returns only the agent status for the especify section and format type + GetStatusBySection(section string, format string, verbose bool) ([]byte, error) +} + +// Provider interface +type Provider interface { + // Name is used to sort the status providers alphabetically. + Name() string + // Section is used to group the status providers. + // When displaying the Text output the section is render as a header + Section() string + JSON(stats map[string]interface{}) error + Text(buffer io.Writer) error + HTML(buffer io.Writer) error +} + +// HeaderProvider interface +type HeaderProvider interface { + // Index is used to choose the order in which the header information is displayed. + Index() int + // When displaying the Text output the name is render as a header + Name() string + JSON(stats map[string]interface{}) error + Text(buffer io.Writer) error + HTML(buffer io.Writer) error +} + +// InformationProvider stores the Provider instance +type InformationProvider struct { + fx.Out + + Provider Provider `group:"status"` +} + +// HeaderInformationProvider stores the HeaderProvider instance +type HeaderInformationProvider struct { + fx.Out + + Provider HeaderProvider `group:"header_status"` +} + +// NewInformationProvider returns a InformationProvider to be called when generating the agent status +func NewInformationProvider(provider Provider) InformationProvider { + return InformationProvider{ + Provider: provider, + } +} + +// NewHeaderInformationProvider returns a new HeaderInformationProvider to be called when generating the agent status +func NewHeaderInformationProvider(provider HeaderProvider) HeaderInformationProvider { + return HeaderInformationProvider{ + Provider: provider, + } +} diff --git a/comp/core/status/component_mock.go b/comp/core/status/component_mock.go new file mode 100644 index 00000000000000..07fc21adf09f1e --- /dev/null +++ b/comp/core/status/component_mock.go @@ -0,0 +1,13 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +//go:build test + +package status + +// Mock implements mock-specific methods. +type Mock interface { + Component +} diff --git a/comp/core/status/render_helpers.go b/comp/core/status/render_helpers.go new file mode 100644 index 00000000000000..ed398a0a62c656 --- /dev/null +++ b/comp/core/status/render_helpers.go @@ -0,0 +1,286 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package status + +import ( + "encoding/json" + "fmt" + htemplate "html/template" + "strconv" + "strings" + "sync" + ttemplate "text/template" + "time" + "unicode" + + "github.com/dustin/go-humanize" + "github.com/fatih/color" + + "golang.org/x/text/cases" + "golang.org/x/text/language" + "golang.org/x/text/unicode/norm" +) + +var ( + htmlFuncOnce sync.Once + htmlFuncMap htemplate.FuncMap + textFuncOnce sync.Once + textFuncMap ttemplate.FuncMap +) + +// HTMLFmap return a map of utility functions for HTML templating +func HTMLFmap() htemplate.FuncMap { + htmlFuncOnce.Do(func() { + htmlFuncMap = htemplate.FuncMap{ + "doNotEscape": doNotEscape, + "lastError": lastError, + "lastErrorTraceback": func(s string) htemplate.HTML { return doNotEscape(lastErrorTraceback(s)) }, + "lastErrorMessage": func(s string) htemplate.HTML { return doNotEscape(lastErrorMessage(s)) }, + "configError": configError, + "printDashes": PrintDashes, + "formatUnixTime": formatUnixTime, + "humanize": mkHuman, + "humanizeDuration": mkHumanDuration, + "toUnsortedList": toUnsortedList, + "formatTitle": formatTitle, + "add": add, + "status": status, + "redText": redText, + "yellowText": yellowText, + "greenText": greenText, + "ntpWarning": ntpWarning, + "version": getVersion, + "percent": func(v float64) string { return fmt.Sprintf("%02.1f", v*100) }, + "complianceResult": complianceResult, + } + }) + return htmlFuncMap +} + +// TextFmap map of utility functions for text templating +func TextFmap() ttemplate.FuncMap { + textFuncOnce.Do(func() { + textFuncMap = ttemplate.FuncMap{ + "lastErrorTraceback": lastErrorTraceback, + "lastErrorMessage": lastErrorMessage, + "printDashes": PrintDashes, + "formatUnixTime": formatUnixTime, + "humanize": mkHuman, + "humanizeDuration": mkHumanDuration, + "toUnsortedList": toUnsortedList, + "formatTitle": formatTitle, + "add": add, + "status": status, + "redText": redText, + "yellowText": yellowText, + "greenText": greenText, + "ntpWarning": ntpWarning, + "version": getVersion, + "percent": func(v float64) string { return fmt.Sprintf("%02.1f", v*100) }, + "complianceResult": complianceResult, + } + }) + + return textFuncMap +} + +const timeFormat = "2006-01-02 15:04:05.999 MST" + +func doNotEscape(value string) htemplate.HTML { + return htemplate.HTML(value) +} + +func configError(value string) htemplate.HTML { + return htemplate.HTML(value + "\n") +} + +func lastError(value string) htemplate.HTML { + return htemplate.HTML(value) +} + +func lastErrorTraceback(value string) string { + var lastErrorArray []map[string]string + + err := json.Unmarshal([]byte(value), &lastErrorArray) + if err != nil || len(lastErrorArray) == 0 { + return "No traceback" + } + lastErrorArray[0]["traceback"] = strings.Replace(lastErrorArray[0]["traceback"], "\n", "\n ", -1) + lastErrorArray[0]["traceback"] = strings.TrimRight(lastErrorArray[0]["traceback"], "\n\t ") + return lastErrorArray[0]["traceback"] +} + +// lastErrorMessage converts the last error message to html +func lastErrorMessage(value string) string { + var lastErrorArray []map[string]string + err := json.Unmarshal([]byte(value), &lastErrorArray) + if err == nil && len(lastErrorArray) > 0 { + if msg, ok := lastErrorArray[0]["message"]; ok { + return msg + } + } + return value +} + +// formatUnixTime formats the unix time to make it more readable +func formatUnixTime(unixTime int64) string { + // Initially treat given unixTime is in nanoseconds + t := time.Unix(0, int64(unixTime)) + // If year returned 1970, assume unixTime actually in seconds + if t.Year() == time.Unix(0, 0).Year() { + t = time.Unix(int64(unixTime), 0) + } + + _, tzoffset := t.Zone() + result := t.Format(timeFormat) + if tzoffset != 0 { + result += " / " + t.UTC().Format(timeFormat) + } + msec := t.UnixNano() / int64(time.Millisecond) + result += " (" + strconv.Itoa(int(msec)) + ")" + return result +} + +// PrintDashes repeats the pattern (dash) for the length of s +func PrintDashes(s string, dash string) string { + return strings.Repeat(dash, stringLength(s)) +} + +func toUnsortedList(s map[string]interface{}) string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return fmt.Sprintf("%s", res) +} + +// mkHuman adds commas to large numbers to assist readability in status outputs +func mkHuman(f float64) string { + return humanize.Commaf(f) +} + +// mkHumanDuration makes time values more readable +func mkHumanDuration(f float64, unit string) string { + var duration time.Duration + if unit != "" { + duration, _ = time.ParseDuration(fmt.Sprintf("%f%s", f, unit)) + } else { + duration = time.Duration(int64(f)) * time.Second + } + + return duration.String() +} + +func stringLength(s string) int { + /* + len(string) is wrong if the string has unicode characters in it, + for example, something like 'Agent (v6.0.0+Χελωνη)' has len(s) == 27. + This is a better way of counting a string length + (credit goes to https://stackoverflow.com/a/12668840) + */ + var ia norm.Iter + ia.InitString(norm.NFKD, s) + nc := 0 + for !ia.Done() { + nc = nc + 1 + ia.Next() + } + return nc +} + +// add two integer together +func add(x, y int) int { + return x + y +} + +// formatTitle split a camel case string into space-separated words +func formatTitle(title string) string { + if title == "os" { + return "OS" + } + + // Split camel case words + var words []string + var l int + + for s := title; s != ""; s = s[l:] { + l = strings.IndexFunc(s[1:], unicode.IsUpper) + 1 + if l <= 0 { + l = len(s) + } + words = append(words, s[:l]) + } + title = strings.Join(words, " ") + + // Capitalize the first letter + return cases.Title(language.English, cases.NoLower).String(title) +} + +func status(check map[string]interface{}) string { + if check["LastError"].(string) != "" { + return fmt.Sprintf("[%s]", color.RedString("ERROR")) + } + if len(check["LastWarnings"].([]interface{})) != 0 { + return fmt.Sprintf("[%s]", color.YellowString("WARNING")) + } + return fmt.Sprintf("[%s]", color.GreenString("OK")) +} + +func complianceResult(result string) string { + switch result { + case "error": + return fmt.Sprintf("[%s]", color.RedString("ERROR")) + case "failed": + return fmt.Sprintf("[%s]", color.RedString("FAILED")) + case "passed": + return fmt.Sprintf("[%s]", color.GreenString("PASSED")) + default: + return fmt.Sprintf("[%s]", color.YellowString("UNKNOWN")) + } +} + +// Renders the message in a red color +func redText(message string) string { + return color.RedString(message) +} + +// Renders the message in a yellow color +func yellowText(message string) string { + return color.YellowString(message) +} + +// Renders the message in a green color +func greenText(message string) string { + return color.GreenString(message) +} + +// Tells if the ntp offset may be too large, resulting in metrics +// from the agent being dropped by metrics-intake +func ntpWarning(ntpOffset float64) bool { + // Negative offset => clock is in the future, 10 minutes (600s) allowed + // Positive offset => clock is in the past, 60 minutes (3600s) allowed + // According to https://docs.datadoghq.com/developers/metrics/#submitting-metrics + return ntpOffset <= -600 || ntpOffset >= 3600 +} + +func getVersion(instances map[string]interface{}) string { + if len(instances) == 0 { + return "" + } + for _, instance := range instances { + instanceMap := instance.(map[string]interface{}) + version, ok := instanceMap["CheckVersion"] + if !ok { + return "" + } + str, ok := version.(string) + if !ok { + return "" + } + return str + } + return "" +} diff --git a/comp/core/status/statusimpl/common_header_provider.go b/comp/core/status/statusimpl/common_header_provider.go new file mode 100644 index 00000000000000..5094476299d5f5 --- /dev/null +++ b/comp/core/status/statusimpl/common_header_provider.go @@ -0,0 +1,106 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +package statusimpl + +import ( + "fmt" + htmlTemplate "html/template" + "io" + "os" + "path" + "runtime" + "strings" + textTemplate "text/template" + "time" + + "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/status" + "github.com/DataDog/datadog-agent/pkg/collector/python" + pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/util/flavor" + "github.com/DataDog/datadog-agent/pkg/version" +) + +var nowFunc = time.Now +var startTimeProvider = pkgConfig.StartTime + +type headerProvider struct { + data map[string]interface{} + name string + textTemplatesFunctions textTemplate.FuncMap + htmlTemplatesFunctions htmlTemplate.FuncMap +} + +func (h *headerProvider) Index() int { + return 0 +} + +func (h *headerProvider) Name() string { + return h.name +} + +func (h *headerProvider) JSON(stats map[string]interface{}) error { + for k, v := range h.data { + stats[k] = v + } + + return nil +} + +func (h *headerProvider) Text(buffer io.Writer) error { + tmpl, tmplErr := templatesFS.ReadFile(path.Join("templates", "text.tmpl")) + if tmplErr != nil { + return tmplErr + } + t := textTemplate.Must(textTemplate.New("header").Funcs(h.textTemplatesFunctions).Parse(string(tmpl))) + return t.Execute(buffer, h.data) +} + +func (h *headerProvider) HTML(buffer io.Writer) error { + tmpl, tmplErr := templatesFS.ReadFile(path.Join("templates", "html.tmpl")) + if tmplErr != nil { + return tmplErr + } + t := htmlTemplate.Must(htmlTemplate.New("header").Funcs(h.htmlTemplatesFunctions).Parse(string(tmpl))) + return t.Execute(buffer, h.data) +} + +func newCommonHeaderProvider(config config.Component) status.HeaderProvider { + + data := map[string]interface{}{} + data["version"] = version.AgentVersion + data["flavor"] = flavor.GetFlavor() + data["conf_file"] = config.ConfigFileUsed() + data["pid"] = os.Getpid() + data["go_version"] = runtime.Version() + data["agent_start_nano"] = startTimeProvider.UnixNano() + pythonVersion := python.GetPythonVersion() + data["python_version"] = strings.Split(pythonVersion, " ")[0] + data["build_arch"] = runtime.GOARCH + data["time_nano"] = nowFunc().UnixNano() + data["config"] = populateConfig(config) + + return &headerProvider{ + data: data, + name: fmt.Sprintf("%s (v%s)", flavor.GetHumanReadableFlavor(), data["version"]), + textTemplatesFunctions: status.TextFmap(), + htmlTemplatesFunctions: status.HTMLFmap(), + } +} + +func populateConfig(config config.Component) map[string]string { + conf := make(map[string]string) + conf["log_file"] = config.GetString("log_file") + conf["log_level"] = config.GetString("log_level") + conf["confd_path"] = config.GetString("confd_path") + conf["additional_checksd"] = config.GetString("additional_checksd") + + conf["fips_enabled"] = config.GetString("fips.enabled") + conf["fips_local_address"] = config.GetString("fips.local_address") + conf["fips_port_range_start"] = config.GetString("fips.port_range_start") + + return conf +} diff --git a/comp/core/status/statusimpl/common_header_provider_test.go b/comp/core/status/statusimpl/common_header_provider_test.go new file mode 100644 index 00000000000000..dfd7d616047f0e --- /dev/null +++ b/comp/core/status/statusimpl/common_header_provider_test.go @@ -0,0 +1,148 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package statusimpl + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/DataDog/datadog-agent/comp/core/config" + pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/version" + "github.com/stretchr/testify/assert" +) + +func TestCommonHeaderProviderIndex(t *testing.T) { + config := fxutil.Test[config.Component](t, config.MockModule()) + + provider := newCommonHeaderProvider(config) + + assert.Equal(t, 0, provider.Index()) +} + +func TestCommonHeaderProviderJSON(t *testing.T) { + nowFunc = func() time.Time { return time.Unix(1515151515, 0) } + startTimeProvider = time.Unix(1515151515, 0) + originalTZ := os.Getenv("TZ") + os.Setenv("TZ", "UTC") + + defer func() { + nowFunc = time.Now + startTimeProvider = pkgConfig.StartTime + os.Setenv("TZ", originalTZ) + }() + + config := fxutil.Test[config.Component](t, config.MockModule()) + + provider := newCommonHeaderProvider(config) + stats := map[string]interface{}{} + provider.JSON(stats) + + assert.Equal(t, version.AgentVersion, stats["version"]) + assert.Equal(t, agentFlavor, stats["flavor"]) + assert.Equal(t, config.ConfigFileUsed(), stats["conf_file"]) + assert.Equal(t, pid, stats["pid"]) + assert.Equal(t, goVersion, stats["go_version"]) + assert.Equal(t, startTimeProvider.UnixNano(), stats["agent_start_nano"]) + assert.Equal(t, "n/a", stats["python_version"]) + assert.Equal(t, arch, stats["build_arch"]) + assert.Equal(t, nowFunc().UnixNano(), stats["time_nano"]) + assert.NotEqual(t, "", stats["title"]) +} + +var expectedTextOutput = fmt.Sprintf(` Status date: 2018-01-05 11:25:15 UTC (1515151515000) + Agent start: 2018-01-05 11:25:15 UTC (1515151515000) + Pid: %d + Go Version: %s + Python Version: n/a + Build arch: %s + Agent flavor: %s + Log Level: info +`, pid, goVersion, arch, agentFlavor) + +func TestCommonHeaderProviderText(t *testing.T) { + nowFunc = func() time.Time { return time.Unix(1515151515, 0) } + startTimeProvider = time.Unix(1515151515, 0) + + defer func() { + nowFunc = time.Now + startTimeProvider = pkgConfig.StartTime + }() + + config := fxutil.Test[config.Component](t, config.MockModule()) + + provider := newCommonHeaderProvider(config) + + buffer := new(bytes.Buffer) + provider.Text(buffer) + + // We replace windows line break by linux so the tests pass on every OS + expectedResult := strings.Replace(expectedTextOutput, "\r\n", "\n", -1) + output := strings.Replace(buffer.String(), "\r\n", "\n", -1) + + assert.Equal(t, expectedResult, output) +} + +func TestCommonHeaderProviderHTML(t *testing.T) { + nowFunc = func() time.Time { return time.Unix(1515151515, 0) } + startTimeProvider = time.Unix(1515151515, 0) + originalTZ := os.Getenv("TZ") + os.Setenv("TZ", "UTC") + + defer func() { + nowFunc = time.Now + startTimeProvider = pkgConfig.StartTime + os.Setenv("TZ", originalTZ) + }() + + config := fxutil.Test[config.Component](t, config.MockModule()) + + provider := newCommonHeaderProvider(config) + + buffer := new(bytes.Buffer) + provider.HTML(buffer) + + // We have to do this strings replacement because html/temaplte escapes the `+` sign + // https://github.com/golang/go/issues/42506 + result := buffer.String() + unescapedResult := strings.Replace(result, "+", "+", -1) + + expectedHTMLOutput := fmt.Sprintf(`
+ Agent Info + + Version: %s +
Flavor: %s +
PID: %d +
Agent start: 2018-01-05 11:25:15 UTC (1515151515000) +
Log Level: info +
Config File: There is no config file +
Conf.d Path: %s +
Checks.d Path: %s +
+
+ +
+ System Info + + System time: 2018-01-05 11:25:15 UTC (1515151515000) +
Go Version: %s +
Python Version: n/a +
Build arch: %s +
+
+`, version.AgentVersion, agentFlavor, pid, config.GetString("confd_path"), config.GetString("additional_checksd"), goVersion, arch) + + // We replace windows line break by linux so the tests pass on every OS + expectedResult := strings.Replace(expectedHTMLOutput, "\r\n", "\n", -1) + output := strings.Replace(unescapedResult, "\r\n", "\n", -1) + + assert.Equal(t, expectedResult, output) +} diff --git a/comp/core/status/statusimpl/status.go b/comp/core/status/statusimpl/status.go new file mode 100644 index 00000000000000..56072ba7e732dd --- /dev/null +++ b/comp/core/status/statusimpl/status.go @@ -0,0 +1,338 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +// Package statusimpl implements the status component interface +package statusimpl + +import ( + "bytes" + "embed" + "encoding/json" + "io" + "path" + "sort" + "text/template" + "unicode" + + "go.uber.org/fx" + "golang.org/x/text/cases" + "golang.org/x/text/language" + + "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/status" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" +) + +//go:embed templates +var templatesFS embed.FS + +type dependencies struct { + fx.In + Config config.Component + + Providers []status.Provider `group:"status"` + HeaderProviders []status.HeaderProvider `group:"header_status"` +} + +type statusImplementation struct { + sortedHeaderProviders []status.HeaderProvider + sortedSectionNames []string + sortedProvidersBySection map[string][]status.Provider +} + +// Module defines the fx options for this component. +func Module() fxutil.Module { + return fxutil.Component( + fx.Provide(newStatus), + ) +} + +func sortByName(providers []status.Provider) []status.Provider { + sort.SliceStable(providers, func(i, j int) bool { + return providers[i].Name() < providers[j].Name() + }) + + return providers +} + +func newStatus(deps dependencies) (status.Component, error) { + // Sections are sorted by name + // The exception is the collector section. We want that to be the first section to be displayed + // We manually insert the collector section in the first place after sorting them alphabetically + sortedSectionNames := []string{} + for _, provider := range deps.Providers { + if !present(provider.Section(), sortedSectionNames) && provider.Section() != status.CollectorSection { + sortedSectionNames = append(sortedSectionNames, provider.Section()) + } + } + sort.Strings(sortedSectionNames) + sortedSectionNames = append([]string{status.CollectorSection}, sortedSectionNames...) + + // Providers of each section are sort alphabetically by name + sortedProvidersBySection := map[string][]status.Provider{} + for _, provider := range deps.Providers { + providers := sortedProvidersBySection[provider.Section()] + sortedProvidersBySection[provider.Section()] = append(providers, provider) + } + for section, providers := range sortedProvidersBySection { + sortedProvidersBySection[section] = sortByName(providers) + } + + // Header providers are sorted by index + // We manually insert the common header provider in the first place after sorting is done + sortedHeaderProviders := deps.HeaderProviders + sort.SliceStable(sortedHeaderProviders, func(i, j int) bool { + return sortedHeaderProviders[i].Index() < sortedHeaderProviders[j].Index() + }) + + sortedHeaderProviders = append([]status.HeaderProvider{newCommonHeaderProvider(deps.Config)}, sortedHeaderProviders...) + + return &statusImplementation{ + sortedSectionNames: sortedSectionNames, + sortedProvidersBySection: sortedProvidersBySection, + sortedHeaderProviders: sortedHeaderProviders, + }, nil +} + +func (s *statusImplementation) GetStatus(format string, _ bool) ([]byte, error) { + var errors []error + + switch format { + case "json": + stats := make(map[string]interface{}) + for _, sc := range s.sortedHeaderProviders { + if err := sc.JSON(stats); err != nil { + errors = append(errors, err) + } + } + + for _, providers := range s.sortedProvidersBySection { + for _, provider := range providers { + if err := provider.JSON(stats); err != nil { + errors = append(errors, err) + } + } + } + + if len(errors) > 0 { + errorsInfo := []string{} + for _, error := range errors { + errorsInfo = append(errorsInfo, error.Error()) + } + stats["errors"] = errorsInfo + } + + return json.Marshal(stats) + case "text": + var b = new(bytes.Buffer) + + for _, sc := range s.sortedHeaderProviders { + printHeader(b, sc.Name()) + newLine(b) + + if err := sc.Text(b); err != nil { + errors = append(errors, err) + } + + newLine(b) + } + + for _, section := range s.sortedSectionNames { + + printHeader(b, section) + newLine(b) + + for _, provider := range s.sortedProvidersBySection[section] { + if err := provider.Text(b); err != nil { + errors = append(errors, err) + } + } + + newLine(b) + } + if len(errors) > 0 { + if err := renderErrors(b, errors); err != nil { + return []byte{}, err + } + + return b.Bytes(), nil + } + + return b.Bytes(), nil + case "html": + var b = new(bytes.Buffer) + + for _, sc := range s.sortedHeaderProviders { + err := sc.HTML(b) + if err != nil { + return b.Bytes(), err + } + } + + for _, section := range s.sortedSectionNames { + for _, provider := range s.sortedProvidersBySection[section] { + err := provider.HTML(b) + if err != nil { + return b.Bytes(), err + } + } + } + return b.Bytes(), nil + default: + return []byte{}, nil + } +} + +func (s *statusImplementation) GetStatusBySection(section string, format string, _ bool) ([]byte, error) { + var errors []error + + switch section { + case "header": + providers := s.sortedHeaderProviders + switch format { + case "json": + stats := make(map[string]interface{}) + + for _, sc := range providers { + if err := sc.JSON(stats); err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + errorsInfo := []string{} + for _, error := range errors { + errorsInfo = append(errorsInfo, error.Error()) + } + stats["errors"] = errorsInfo + } + + return json.Marshal(stats) + case "text": + var b = new(bytes.Buffer) + + for i, sc := range providers { + if i == 0 { + printHeader(b, sc.Name()) + newLine(b) + } + + err := sc.Text(b) + if err != nil { + return b.Bytes(), err + } + } + + return b.Bytes(), nil + case "html": + var b = new(bytes.Buffer) + + for _, sc := range providers { + err := sc.HTML(b) + if err != nil { + return b.Bytes(), err + } + } + return b.Bytes(), nil + default: + return []byte{}, nil + } + default: + providers := s.sortedProvidersBySection[section] + switch format { + case "json": + stats := make(map[string]interface{}) + + for _, sc := range providers { + if err := sc.JSON(stats); err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + errorsInfo := []string{} + for _, error := range errors { + errorsInfo = append(errorsInfo, error.Error()) + } + stats["errors"] = errorsInfo + } + + return json.Marshal(stats) + case "text": + var b = new(bytes.Buffer) + + for i, sc := range providers { + if i == 0 { + printHeader(b, section) + newLine(b) + } + + if err := sc.Text(b); err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + if err := renderErrors(b, errors); err != nil { + return []byte{}, err + } + + return b.Bytes(), nil + } + + return b.Bytes(), nil + case "html": + var b = new(bytes.Buffer) + + for _, sc := range providers { + err := sc.HTML(b) + if err != nil { + return b.Bytes(), err + } + } + return b.Bytes(), nil + default: + return []byte{}, nil + } + } +} + +func present(value string, container []string) bool { + for _, v := range container { + if v == value { + return true + } + } + + return false +} + +func printHeader(buffer *bytes.Buffer, section string) { + dashes := []byte(status.PrintDashes(section, "=")) + buffer.Write(dashes) + newLine(buffer) + + runes := []rune(section) + if unicode.IsUpper(runes[0]) { + buffer.Write([]byte(section)) + } else { + buffer.Write([]byte(cases.Title(language.Und).String(section))) + } + newLine(buffer) + buffer.Write(dashes) +} + +func newLine(buffer *bytes.Buffer) { + buffer.Write([]byte("\n")) +} + +func renderErrors(w io.Writer, errs []error) error { + tmpl, tmplErr := templatesFS.ReadFile(path.Join("templates", "errors.tmpl")) + if tmplErr != nil { + return tmplErr + } + t := template.Must(template.New("errors").Parse(string(tmpl))) + return t.Execute(w, errs) +} diff --git a/comp/core/status/statusimpl/status_mock.go b/comp/core/status/statusimpl/status_mock.go new file mode 100644 index 00000000000000..bc404c7b0bbfed --- /dev/null +++ b/comp/core/status/statusimpl/status_mock.go @@ -0,0 +1,38 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +//go:build test + +package statusimpl + +import ( + "go.uber.org/fx" + + "github.com/DataDog/datadog-agent/comp/core/status" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" +) + +type statusMock struct { +} + +func (s *statusMock) GetStatus(string, bool) ([]byte, error) { + return []byte{}, nil +} + +func (s *statusMock) GetStatusBySection(string, string, bool) ([]byte, error) { + return []byte{}, nil +} + +// newMock returns a status Mock +func newMock() status.Mock { + return &statusMock{} +} + +// MockModule defines the fx options for the mock component. +func MockModule() fxutil.Module { + return fxutil.Component( + fx.Provide(newMock), + ) +} diff --git a/comp/core/status/statusimpl/status_test.go b/comp/core/status/statusimpl/status_test.go new file mode 100644 index 00000000000000..47aaa3e2ee8335 --- /dev/null +++ b/comp/core/status/statusimpl/status_test.go @@ -0,0 +1,671 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package statusimpl + +import ( + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strings" + "testing" + "time" + + "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/status" + pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/util/flavor" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/version" + "github.com/stretchr/testify/assert" + "go.uber.org/fx" +) + +type mockProvider struct { + data map[string]interface{} + text string + html string + name string + section string +} + +func (m mockProvider) Name() string { + return m.name +} + +func (m mockProvider) Section() string { + return m.section +} + +func (m mockProvider) JSON(stats map[string]interface{}) error { + for key, value := range m.data { + stats[key] = value + } + + return nil +} + +func (m mockProvider) Text(buffer io.Writer) error { + _, err := buffer.Write([]byte(m.text)) + return err +} + +func (m mockProvider) HTML(buffer io.Writer) error { + _, err := buffer.Write([]byte(m.html)) + return err +} + +type mockHeaderProvider struct { + data map[string]interface{} + text string + html string + index int + name string +} + +func (m mockHeaderProvider) Index() int { + return m.index +} + +func (m mockHeaderProvider) Name() string { + return m.name +} + +func (m mockHeaderProvider) JSON(stats map[string]interface{}) error { + for key, value := range m.data { + stats[key] = value + } + + return nil +} + +func (m mockHeaderProvider) Text(buffer io.Writer) error { + _, err := buffer.Write([]byte(m.text)) + return err +} + +func (m mockHeaderProvider) HTML(buffer io.Writer) error { + _, err := buffer.Write([]byte(m.html)) + return err +} + +type errorMockProvider struct{} + +func (m errorMockProvider) Name() string { + return "error mock" +} + +func (m errorMockProvider) Section() string { + return "error section" +} + +func (m errorMockProvider) JSON(map[string]interface{}) error { + return fmt.Errorf("testing JSON errors") +} + +func (m errorMockProvider) Text(io.Writer) error { + return fmt.Errorf("testing Text errors") +} + +func (m errorMockProvider) HTML(io.Writer) error { + return fmt.Errorf("testing HTML errors") +} + +var ( + humanReadbaleFlavor = flavor.GetHumanReadableFlavor() + agentVersion = version.AgentVersion + pid = os.Getpid() + goVersion = runtime.Version() + arch = runtime.GOARCH + agentFlavor = flavor.GetFlavor() + testTitle = fmt.Sprintf("%s (v%s)", humanReadbaleFlavor, agentVersion) +) + +var testTextHeader = fmt.Sprintf(`%s +%s +%s`, status.PrintDashes(testTitle, "="), testTitle, status.PrintDashes(testTitle, "=")) + +var expectedStatusTextOutput = fmt.Sprintf(`%s + Status date: 2018-01-05 11:25:15 UTC (1515151515000) + Agent start: 2018-01-05 11:25:15 UTC (1515151515000) + Pid: %d + Go Version: %s + Python Version: n/a + Build arch: %s + Agent flavor: %s + Log Level: info + +========== +Header Foo +========== + header foo: header bar + header foo2: header bar 2 + +========= +Collector +========= + text from a + text from b + +========= +A Section +========= + text from a + +========= +X Section +========= + text from a + text from x + +`, testTextHeader, pid, goVersion, arch, agentFlavor) + +func TestGetStatus(t *testing.T) { + nowFunc = func() time.Time { return time.Unix(1515151515, 0) } + startTimeProvider = time.Unix(1515151515, 0) + originalTZ := os.Getenv("TZ") + os.Setenv("TZ", "UTC") + + defer func() { + nowFunc = time.Now + startTimeProvider = pkgConfig.StartTime + os.Setenv("TZ", originalTZ) + }() + + deps := fxutil.Test[dependencies](t, fx.Options( + config.MockModule(), + fx.Supply( + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo": "bar", + }, + name: "a", + text: " text from a\n", + html: `
+ Foo + +
Bar: bar +
+
+`, + section: status.CollectorSection, + }), + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo2": "bar2", + }, + name: "b", + text: " text from b\n", + section: status.CollectorSection, + }), + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo3": "bar3", + }, + name: "x", + text: " text from x\n", + section: "x section", + }), + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo3": "bar3", + }, + name: "a", + text: " text from a\n", + section: "a section", + }), + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo3": "bar3", + }, + name: "a", + text: " text from a\n", + section: "x section", + }), + status.NewHeaderInformationProvider(mockHeaderProvider{ + name: "header foo", + data: map[string]interface{}{ + "header_foo": "header_bar", + }, + text: ` header foo: header bar + header foo2: header bar 2 +`, + html: `
+ Header Foo + +
Header Bar: bar +
+
+`, + index: 2, + }), + ), + )) + + statusComponent, err := newStatus(deps) + + assert.NoError(t, err) + + testCases := []struct { + name string + format string + assertFunc func(*testing.T, []byte) + }{ + { + name: "JSON", + format: "json", + assertFunc: func(t *testing.T, bytes []byte) { + result := map[string]interface{}{} + err = json.Unmarshal(bytes, &result) + + assert.NoError(t, err) + + assert.Equal(t, "bar", result["foo"]) + assert.Equal(t, "header_bar", result["header_foo"]) + }, + }, + { + name: "Text", + format: "text", + assertFunc: func(t *testing.T, bytes []byte) { + // We replace windows line break by linux so the tests pass on every OS + expectedResult := strings.Replace(expectedStatusTextOutput, "\r\n", "\n", -1) + output := strings.Replace(string(bytes), "\r\n", "\n", -1) + + assert.Equal(t, expectedResult, output) + }, + }, + { + name: "HTML", + format: "html", + assertFunc: func(t *testing.T, bytes []byte) { + // We have to do this strings replacement because html/temaplte escapes the `+` sign + // https://github.com/golang/go/issues/42506 + result := string(bytes) + unescapedResult := strings.Replace(result, "+", "+", -1) + + expectedStatusHTMLOutput := fmt.Sprintf(`
+ Agent Info + + Version: %s +
Flavor: %s +
PID: %d +
Agent start: 2018-01-05 11:25:15 UTC (1515151515000) +
Log Level: info +
Config File: There is no config file +
Conf.d Path: %s +
Checks.d Path: %s +
+
+ +
+ System Info + + System time: 2018-01-05 11:25:15 UTC (1515151515000) +
Go Version: %s +
Python Version: n/a +
Build arch: %s +
+
+
+ Header Foo + +
Header Bar: bar +
+
+
+ Foo + +
Bar: bar +
+
+`, agentVersion, agentFlavor, pid, deps.Config.GetString("confd_path"), deps.Config.GetString("additional_checksd"), goVersion, arch) + + // We replace windows line break by linux so the tests pass on every OS + expectedResult := strings.Replace(expectedStatusHTMLOutput, "\r\n", "\n", -1) + output := strings.Replace(unescapedResult, "\r\n", "\n", -1) + + assert.Equal(t, expectedResult, output) + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + bytesResult, err := statusComponent.GetStatus(testCase.format, false) + + assert.NoError(t, err) + + testCase.assertFunc(t, bytesResult) + }) + } +} + +var expectedStatusTextErrorOutput = fmt.Sprintf(`%s + Status date: 2018-01-05 11:25:15 UTC (1515151515000) + Agent start: 2018-01-05 11:25:15 UTC (1515151515000) + Pid: %d + Go Version: %s + Python Version: n/a + Build arch: %s + Agent flavor: agent + Log Level: info + +========= +Collector +========= + text from b + +============= +Error Section +============= + +==================== +Status render errors +==================== + - testing Text errors + +`, testTextHeader, pid, goVersion, arch) + +func TestGetStatusWithErrors(t *testing.T) { + nowFunc = func() time.Time { return time.Unix(1515151515, 0) } + startTimeProvider = time.Unix(1515151515, 0) + originalTZ := os.Getenv("TZ") + os.Setenv("TZ", "UTC") + + defer func() { + nowFunc = time.Now + startTimeProvider = pkgConfig.StartTime + os.Setenv("TZ", originalTZ) + }() + + deps := fxutil.Test[dependencies](t, fx.Options( + config.MockModule(), + fx.Supply( + status.NewInformationProvider(errorMockProvider{}), + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo2": "bar2", + }, + name: "b", + text: " text from b\n", + section: status.CollectorSection, + }), + ), + )) + + statusComponent, err := newStatus(deps) + + assert.NoError(t, err) + + testCases := []struct { + name string + format string + assertFunc func(*testing.T, []byte) + }{ + { + name: "JSON", + format: "json", + assertFunc: func(t *testing.T, bytes []byte) { + result := map[string]interface{}{} + err = json.Unmarshal(bytes, &result) + + assert.NoError(t, err) + + assert.Equal(t, "testing JSON errors", result["errors"].([]interface{})[0].(string)) + }, + }, + { + name: "Text", + format: "text", + assertFunc: func(t *testing.T, bytes []byte) { + // We replace windows line break by linux so the tests pass on every OS + expectedResult := strings.Replace(expectedStatusTextErrorOutput, "\r\n", "\n", -1) + output := strings.Replace(string(bytes), "\r\n", "\n", -1) + + assert.Equal(t, expectedResult, output) + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + bytesResult, err := statusComponent.GetStatus(testCase.format, false) + + assert.NoError(t, err) + + testCase.assertFunc(t, bytesResult) + }) + } +} + +func TestGetStatusBySection(t *testing.T) { + deps := fxutil.Test[dependencies](t, fx.Options( + config.MockModule(), + fx.Supply( + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo": "bar", + }, + name: "a", + text: " text from a\n", + html: `
+ Foo + +
Bar: bar +
+
+`, + section: status.CollectorSection, + }), + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo2": "bar2", + }, + name: "b", + text: " text from b\n", + section: status.CollectorSection, + }), + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo3": "bar3", + }, + name: "x", + text: " text from x\n", + section: "x section", + }), + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo3": "bar3", + }, + name: "a", + text: " text from a\n", + section: "a section", + }), + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo3": "bar3", + }, + name: "a", + text: " text from a\n", + section: "x section", + }), + status.NewHeaderInformationProvider(mockHeaderProvider{ + data: map[string]interface{}{ + "header_foo": "header_bar", + }, + text: ` header foo: header bar + header foo2: header bar 2 +`, + html: `
+ Header Foo + +
Header Bar: bar +
+
+`, + index: 2, + }), + ), + )) + + statusComponent, err := newStatus(deps) + + assert.NoError(t, err) + + testCases := []struct { + name string + section string + format string + assertFunc func(*testing.T, []byte) + }{ + { + name: "JSON", + section: "header", + format: "json", + assertFunc: func(t *testing.T, bytes []byte) { + result := map[string]interface{}{} + err = json.Unmarshal(bytes, &result) + + assert.NoError(t, err) + + assert.Nil(t, result["foo"]) + assert.Equal(t, "header_bar", result["header_foo"]) + }, + }, + { + name: "Text", + format: "text", + section: "x section", + assertFunc: func(t *testing.T, bytes []byte) { + result := `========= +X Section +========= + text from a + text from x +` + + // We replace windows line break by linux so the tests pass on every OS + expectedResult := strings.Replace(result, "\r\n", "\n", -1) + output := strings.Replace(string(bytes), "\r\n", "\n", -1) + + assert.Equal(t, expectedResult, output) + }, + }, + { + name: "HTML", + section: "collector", + format: "html", + assertFunc: func(t *testing.T, bytes []byte) { + result := `
+ Foo + +
Bar: bar +
+
+` + // We replace windows line break by linux so the tests pass on every OS + expectedResult := strings.Replace(result, "\r\n", "\n", -1) + output := strings.Replace(string(bytes), "\r\n", "\n", -1) + + assert.Equal(t, expectedResult, output) + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + bytesResult, err := statusComponent.GetStatusBySection(testCase.section, testCase.format, false) + + assert.NoError(t, err) + + testCase.assertFunc(t, bytesResult) + }) + } +} + +func TestGetStatusBySectionsWithErrors(t *testing.T) { + nowFunc = func() time.Time { return time.Unix(1515151515, 0) } + startTimeProvider = time.Unix(1515151515, 0) + originalTZ := os.Getenv("TZ") + os.Setenv("TZ", "UTC") + + defer func() { + nowFunc = time.Now + startTimeProvider = pkgConfig.StartTime + os.Setenv("TZ", originalTZ) + }() + + deps := fxutil.Test[dependencies](t, fx.Options( + config.MockModule(), + fx.Supply( + status.NewInformationProvider(errorMockProvider{}), + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo2": "bar2", + }, + name: "b", + text: " text from b\n", + section: status.CollectorSection, + }), + ), + )) + + statusComponent, err := newStatus(deps) + + assert.NoError(t, err) + + testCases := []struct { + name string + format string + assertFunc func(*testing.T, []byte) + }{ + { + name: "JSON", + format: "json", + assertFunc: func(t *testing.T, bytes []byte) { + result := map[string]interface{}{} + err = json.Unmarshal(bytes, &result) + + assert.NoError(t, err) + + assert.Equal(t, "testing JSON errors", result["errors"].([]interface{})[0].(string)) + }, + }, + { + name: "Text", + format: "text", + assertFunc: func(t *testing.T, bytes []byte) { + expected := `============= +Error Section +============= +==================== +Status render errors +==================== + - testing Text errors + +` + + // We replace windows line break by linux so the tests pass on every OS + expectedResult := strings.Replace(expected, "\r\n", "\n", -1) + output := strings.Replace(string(bytes), "\r\n", "\n", -1) + + assert.Equal(t, expectedResult, output) + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + bytesResult, err := statusComponent.GetStatusBySection("error section", testCase.format, false) + + assert.NoError(t, err) + + testCase.assertFunc(t, bytesResult) + }) + } +} diff --git a/comp/core/status/statusimpl/templates/errors.tmpl b/comp/core/status/statusimpl/templates/errors.tmpl new file mode 100644 index 00000000000000..2a89c05b9e218c --- /dev/null +++ b/comp/core/status/statusimpl/templates/errors.tmpl @@ -0,0 +1,6 @@ +==================== +Status render errors +==================== +{{- range $err := . }} + - {{ $err }} +{{ end }} diff --git a/comp/core/status/statusimpl/templates/html.tmpl b/comp/core/status/statusimpl/templates/html.tmpl new file mode 100644 index 00000000000000..e42180ed6bed29 --- /dev/null +++ b/comp/core/status/statusimpl/templates/html.tmpl @@ -0,0 +1,32 @@ +
+ Agent Info + + Version: {{.version}} +
Flavor: {{.flavor}} +
PID: {{.pid}} +
Agent start: {{ formatUnixTime .agent_start_nano }} + {{- if .config.log_file}} +
Log File: {{.config.log_file}} + {{end}} +
Log Level: {{.config.log_level}} +
Config File: {{if .conf_file}}{{.conf_file}}{{else}}There is no config file{{end}} +
Conf.d Path: {{.config.confd_path}} +
Checks.d Path: {{.config.additional_checksd}} +
+
+ +
+ System Info + + System time: {{ formatUnixTime .time_nano }} + {{- if .ntpOffset}} +
NTP Offset: {{ humanizeDuration .ntpOffset "s"}} + {{- if ntpWarning .ntpOffset}} +
NTP Offset is high. Datadog may ignore metrics sent by this Agent. + {{- end}} + {{end}} +
Go Version: {{.go_version}} +
Python Version: {{.python_version}} +
Build arch: {{.build_arch}} +
+
diff --git a/comp/core/status/statusimpl/templates/text.tmpl b/comp/core/status/statusimpl/templates/text.tmpl new file mode 100644 index 00000000000000..acb0168ab750b7 --- /dev/null +++ b/comp/core/status/statusimpl/templates/text.tmpl @@ -0,0 +1,13 @@ + Status date: {{ formatUnixTime .time_nano }} + Agent start: {{ formatUnixTime .agent_start_nano }} + Pid: {{.pid}} + Go Version: {{.go_version}} + {{- if .python_version }} + Python Version: {{.python_version}} + {{- end }} + Build arch: {{.build_arch}} + Agent flavor: {{.flavor}} + {{- if .config.log_file}} + Log File: {{.config.log_file}} + {{- end }} + Log Level: {{.config.log_level}} diff --git a/docs/components/README.md b/docs/components/README.md index 280d86a4b88711..a21c1df34a3025 100644 --- a/docs/components/README.md +++ b/docs/components/README.md @@ -4,7 +4,7 @@ * [Overview of Components](./components.md) * [Guidelines](./guidelines.md) * [Defining Components](./defining-components.md) - * [Using Components](./using.md) + * [Using Components](./usage.md) * [Defining Bundles](./defining-bundles.md) * [Defining Apps and Binaries](./defining-apps.md) * [Registrations](./registrations.md) diff --git a/docs/components/defining-components.md b/docs/components/defining-components.md index 155523e2792791..ec3a4a833880ff 100644 --- a/docs/components/defining-components.md +++ b/docs/components/defining-components.md @@ -1,6 +1,6 @@ # Defining Components -You can use the invoke task `inv new-component comp//` to generate a scaffold for your new component. +You can use the invoke task `inv components.new-component comp//` to generate a scaffold for your new component. Below is a description of the different folders and files of your component. diff --git a/pkg/util/flavor/flavor.go b/pkg/util/flavor/flavor.go index a0348ce44f181b..b81dcb3d9e4931 100644 --- a/pkg/util/flavor/flavor.go +++ b/pkg/util/flavor/flavor.go @@ -29,6 +29,20 @@ const ( TraceAgent = "trace_agent" ) +var agentFlavors = map[string]string{ + DefaultAgent: "Agent", + IotAgent: "IoT Agent", + ClusterAgent: "Cluster Agent", + Dogstatsd: "DogStatsD", + SecurityAgent: "Security Agent", + ServerlessAgent: "Serverless Agent", + HerokuAgent: "Heroku Agent", + ProcessAgent: "Process Agent", + TraceAgent: "Trace Agent", +} + +const unknownAgent = "Unknown Agent" + var agentFlavor = DefaultAgent // SetFlavor sets the Agent flavor @@ -46,3 +60,12 @@ func SetFlavor(flavor string) { func GetFlavor() string { return agentFlavor } + +// GetHumanReadableFlavor gets the running Agent flavor in a human readable form +func GetHumanReadableFlavor() string { + if val, ok := agentFlavors[agentFlavor]; ok { + return val + } + + return unknownAgent +} diff --git a/pkg/util/flavor/flavor_test.go b/pkg/util/flavor/flavor_test.go new file mode 100644 index 00000000000000..b18d14f84170c1 --- /dev/null +++ b/pkg/util/flavor/flavor_test.go @@ -0,0 +1,29 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package flavor + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetHumanReadableFlavor(t *testing.T) { + for k, v := range agentFlavors { + t.Run(fmt.Sprintf("%s: %s", k, v), func(t *testing.T) { + SetFlavor(k) + + assert.Equal(t, v, GetHumanReadableFlavor()) + }) + } + + t.Run("Unknown Agent", func(t *testing.T) { + SetFlavor("foo") + + assert.Equal(t, "Unknown Agent", GetHumanReadableFlavor()) + }) +} From 2b6a9c17d5c18b258ffdb5b9d479c7ce8dfba63f Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Mon, 18 Dec 2023 10:45:09 +0100 Subject: [PATCH 24/66] Change Pulumi logger to write to t.Log (#21580) Change Pulumi logger to write to t.Log --- test/new-e2e/pkg/utils/e2e/e2e.go | 14 +++++-- test/new-e2e/pkg/utils/infra/stack_manager.go | 42 +++++++++++++------ .../system-probe/system-probe-test-env.go | 4 +- .../install-script/install_script_test.go | 4 +- test/new-e2e/tests/containers/eks_test.go | 4 +- test/new-e2e/tests/containers/kindvm_test.go | 4 +- test/new-e2e/tests/ndm/snmp/snmpTestEnv.go | 2 +- 7 files changed, 50 insertions(+), 24 deletions(-) diff --git a/test/new-e2e/pkg/utils/e2e/e2e.go b/test/new-e2e/pkg/utils/e2e/e2e.go index 9fd41e981c1cc9..bbd491d3972ab8 100644 --- a/test/new-e2e/pkg/utils/e2e/e2e.go +++ b/test/new-e2e/pkg/utils/e2e/e2e.go @@ -370,6 +370,15 @@ const ( deleteTimeout = 30 * time.Minute ) +type testWriter struct { + t *testing.T +} + +func (tw testWriter) Write(p []byte) (n int, err error) { + tw.t.Log(string(p)) + return len(p), nil +} + // Suite manages the environment creation and runs E2E tests. type Suite[Env any] struct { suite.Suite @@ -510,7 +519,7 @@ func (suite *Suite[Env]) TearDownSuite() { // TODO: Implement retry on delete ctx, cancel := context.WithTimeout(context.Background(), deleteTimeout) defer cancel() - err := infra.GetStackManager().DeleteStack(ctx, suite.params.StackName) + err := infra.GetStackManager().DeleteStack(ctx, suite.params.StackName, testWriter{t: suite.T()}) if err != nil { suite.T().Errorf("unable to delete stack: %s, err :%v", suite.params.StackName, err) suite.T().Fail() @@ -520,7 +529,6 @@ func (suite *Suite[Env]) TearDownSuite() { func createEnv[Env any](suite *Suite[Env], stackDef *StackDefinition[Env]) (*Env, auto.UpResult, error) { var env *Env ctx := context.Background() - _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure( ctx, suite.params.StackName, @@ -529,7 +537,7 @@ func createEnv[Env any](suite *Suite[Env], stackDef *StackDefinition[Env]) (*Env var err error env, err = stackDef.envFactory(ctx) return err - }, false) + }, false, testWriter{t: suite.T()}) return env, stackOutput, err } diff --git a/test/new-e2e/pkg/utils/infra/stack_manager.go b/test/new-e2e/pkg/utils/infra/stack_manager.go index 8f12645b53e53d..e0baed5700d2d2 100644 --- a/test/new-e2e/pkg/utils/infra/stack_manager.go +++ b/test/new-e2e/pkg/utils/infra/stack_manager.go @@ -10,6 +10,7 @@ import ( "context" "errors" "fmt" + "io" "os" "runtime" "strings" @@ -102,10 +103,10 @@ func newStackManager() (*StackManager, error) { // GetStack creates or return a stack based on stack name and config, if error occurs during stack creation it destroy all the resources created func (sm *StackManager) GetStack(ctx context.Context, name string, config runner.ConfigMap, deployFunc pulumi.RunFunc, failOnMissing bool) (*auto.Stack, auto.UpResult, error) { - stack, upResult, err := sm.getStack(ctx, name, config, deployFunc, failOnMissing) + stack, upResult, err := sm.getStack(ctx, name, config, deployFunc, failOnMissing, nil) if err != nil { - errDestroy := sm.deleteStack(ctx, name, stack) + errDestroy := sm.deleteStack(ctx, name, stack, nil) if errDestroy != nil { return stack, upResult, errors.Join(err, errDestroy) } @@ -115,13 +116,13 @@ func (sm *StackManager) GetStack(ctx context.Context, name string, config runner } // GetStackNoDeleteOnFailure creates or return a stack based on stack name and config, if error occurs during stack creation, it will not destroy the created resources. Using this can lead to resource leaks. -func (sm *StackManager) GetStackNoDeleteOnFailure(ctx context.Context, name string, config runner.ConfigMap, deployFunc pulumi.RunFunc, failOnMissing bool) (*auto.Stack, auto.UpResult, error) { +func (sm *StackManager) GetStackNoDeleteOnFailure(ctx context.Context, name string, config runner.ConfigMap, deployFunc pulumi.RunFunc, failOnMissing bool, logWriter io.Writer) (*auto.Stack, auto.UpResult, error) { - return sm.getStack(ctx, name, config, deployFunc, failOnMissing) + return sm.getStack(ctx, name, config, deployFunc, failOnMissing, logWriter) } // DeleteStack safely deletes a stack -func (sm *StackManager) DeleteStack(ctx context.Context, name string) error { +func (sm *StackManager) DeleteStack(ctx context.Context, name string, logWriter io.Writer) error { stack, ok := sm.stacks.Get(name) if !ok { @@ -141,7 +142,7 @@ func (sm *StackManager) DeleteStack(ctx context.Context, name string) error { stack = &newStack } - return sm.deleteStack(ctx, name, stack) + return sm.deleteStack(ctx, name, stack, logWriter) } // ForceRemoveStackConfiguration removes the configuration files pulumi creates for managing a stack. @@ -164,7 +165,7 @@ func (sm *StackManager) Cleanup(ctx context.Context) []error { var errors []error sm.stacks.Range(func(stackID string, stack *auto.Stack) { - err := sm.deleteStack(ctx, stackID, stack) + err := sm.deleteStack(ctx, stackID, stack, nil) if err != nil { errors = append(errors, err) } @@ -173,13 +174,23 @@ func (sm *StackManager) Cleanup(ctx context.Context) []error { return errors } -func (sm *StackManager) deleteStack(ctx context.Context, stackID string, stack *auto.Stack) error { +func (sm *StackManager) deleteStack(ctx context.Context, stackID string, stack *auto.Stack, logWriter io.Writer) error { if stack == nil { return fmt.Errorf("unable to find stack, skipping deletion of: %s", stackID) } destroyContext, cancel := context.WithTimeout(ctx, stackDestroyTimeout) - _, err := stack.Destroy(destroyContext, optdestroy.ProgressStreams(os.Stdout)) + + var logger io.Writer + + if logWriter == nil { + logger = os.Stdout + } else { + logger = logWriter + } + _, err := stack.Destroy(destroyContext, optdestroy.ProgressStreams(logger), optdestroy.DebugLogging(debug.LoggingOptions{ + FlowToPlugins: true, + })) cancel() if err != nil { return err @@ -191,7 +202,7 @@ func (sm *StackManager) deleteStack(ctx context.Context, stackID string, stack * return err } -func (sm *StackManager) getStack(ctx context.Context, name string, config runner.ConfigMap, deployFunc pulumi.RunFunc, failOnMissing bool) (*auto.Stack, auto.UpResult, error) { +func (sm *StackManager) getStack(ctx context.Context, name string, config runner.ConfigMap, deployFunc pulumi.RunFunc, failOnMissing bool, logWriter io.Writer) (*auto.Stack, auto.UpResult, error) { // Build configuration from profile profile := runner.GetProfile() stackName := buildStackName(profile.NamePrefix(), name) @@ -231,8 +242,15 @@ func (sm *StackManager) getStack(ctx context.Context, name string, config runner upCtx, cancel := context.WithTimeout(ctx, stackUpTimeout) var loglevel uint = 1 defer cancel() - upResult, err := stack.Up(upCtx, optup.ProgressStreams(os.Stderr), optup.DebugLogging(debug.LoggingOptions{ - LogToStdErr: true, + var logger io.Writer + + if logWriter == nil { + logger = os.Stderr + } else { + logger = logWriter + } + + upResult, err := stack.Up(upCtx, optup.ProgressStreams(logger), optup.DebugLogging(debug.LoggingOptions{ FlowToPlugins: true, LogLevel: &loglevel, })) diff --git a/test/new-e2e/system-probe/system-probe-test-env.go b/test/new-e2e/system-probe/system-probe-test-env.go index e19cbb749626e4..191c14ea413b3d 100644 --- a/test/new-e2e/system-probe/system-probe-test-env.go +++ b/test/new-e2e/system-probe/system-probe-test-env.go @@ -227,7 +227,7 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (* return fmt.Errorf("setup micro-vms in remote instance: %w", err) } return nil - }, opts.FailOnMissing) + }, opts.FailOnMissing, nil) if err != nil { return handleScenarioFailure(err, func(possibleError handledError) { // handle the following errors by trying in a different availability zone @@ -255,7 +255,7 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (* // Destroy deletes the stack with the provided name func Destroy(name string) error { - return infra.GetStackManager().DeleteStack(context.Background(), name) + return infra.GetStackManager().DeleteStack(context.Background(), name, nil) } // RemoveStack removes the stack configuration with the provided name diff --git a/test/new-e2e/tests/agent-platform/install-script/install_script_test.go b/test/new-e2e/tests/agent-platform/install-script/install_script_test.go index 0867bdefc2b458..e4dd94c61a6f0c 100644 --- a/test/new-e2e/tests/agent-platform/install-script/install_script_test.go +++ b/test/new-e2e/tests/agent-platform/install-script/install_script_test.go @@ -69,7 +69,7 @@ func TestInstallScript(t *testing.T) { osVersions := strings.Split(*osVersion, ",") cwsSupportedOsVersionList := strings.Split(*cwsSupportedOsVersion, ",") - fmt.Println("Parsed platform json file: ", platformJSON) + t.Log("Parsed platform json file: ", platformJSON) for _, osVers := range osVersions { vmOpts := []ec2params.Option{} @@ -99,7 +99,7 @@ func TestInstallScript(t *testing.T) { } t.Run(fmt.Sprintf("test install script on %s %s %s agent %s", osVers, *architecture, *flavor, *majorVersion), func(tt *testing.T) { tt.Parallel() - fmt.Printf("Testing %s", osVers) + tt.Logf("Testing %s", osVers) e2e.Run(tt, &installScriptSuite{cwsSupported: cwsSupported}, e2e.EC2VMStackDef(vmOpts...), params.WithStackName(fmt.Sprintf("install-script-test-%v-%v-%s-%s-%v", os.Getenv("CI_PIPELINE_ID"), osVers, *architecture, *flavor, *majorVersion))) }) } diff --git a/test/new-e2e/tests/containers/eks_test.go b/test/new-e2e/tests/containers/eks_test.go index b0e5328e7bd67c..20a9dd3d5d68a4 100644 --- a/test/new-e2e/tests/containers/eks_test.go +++ b/test/new-e2e/tests/containers/eks_test.go @@ -42,13 +42,13 @@ func (suite *eksSuite) SetupSuite() { "dddogstatsd:deploy": auto.ConfigValue{Value: "true"}, } - _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(ctx, "eks-cluster", stackConfig, eks.Run, false) + _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(ctx, "eks-cluster", stackConfig, eks.Run, false, nil) if !suite.Assert().NoError(err) { stackName, err := infra.GetStackManager().GetPulumiStackName("eks-cluster") suite.Require().NoError(err) suite.T().Log(dumpEKSClusterState(ctx, stackName)) if !runner.GetProfile().AllowDevMode() || !*keepStacks { - infra.GetStackManager().DeleteStack(ctx, "eks-cluster") + infra.GetStackManager().DeleteStack(ctx, "eks-cluster", nil) } suite.T().FailNow() } diff --git a/test/new-e2e/tests/containers/kindvm_test.go b/test/new-e2e/tests/containers/kindvm_test.go index 624b459ed22398..322eabfd0ca4df 100644 --- a/test/new-e2e/tests/containers/kindvm_test.go +++ b/test/new-e2e/tests/containers/kindvm_test.go @@ -41,13 +41,13 @@ func (suite *kindSuite) SetupSuite() { "dddogstatsd:deploy": auto.ConfigValue{Value: "true"}, } - _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(ctx, "kind-cluster", stackConfig, kindvm.Run, false) + _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(ctx, "kind-cluster", stackConfig, kindvm.Run, false, nil) if !suite.Assert().NoError(err) { stackName, err := infra.GetStackManager().GetPulumiStackName("kind-cluster") suite.Require().NoError(err) suite.T().Log(dumpKindClusterState(ctx, stackName)) if !runner.GetProfile().AllowDevMode() || !*keepStacks { - infra.GetStackManager().DeleteStack(ctx, "kind-cluster") + infra.GetStackManager().DeleteStack(ctx, "kind-cluster", nil) } suite.T().FailNow() } diff --git a/test/new-e2e/tests/ndm/snmp/snmpTestEnv.go b/test/new-e2e/tests/ndm/snmp/snmpTestEnv.go index 4b1c2ce87b3028..c18dc830ddbaed 100644 --- a/test/new-e2e/tests/ndm/snmp/snmpTestEnv.go +++ b/test/new-e2e/tests/ndm/snmp/snmpTestEnv.go @@ -131,7 +131,7 @@ func NewTestEnv() (*TestEnv, error) { // Destroy delete the NDM stack. Deprecated, should port to NDM func (testEnv *TestEnv) Destroy() error { - return infra.GetStackManager().DeleteStack(testEnv.context, testEnv.name) + return infra.GetStackManager().DeleteStack(testEnv.context, testEnv.name, nil) } //go:embed compose/data From bff8530633834a17ca68dab0c6a1792dc2444d79 Mon Sep 17 00:00:00 2001 From: Florent Clarret Date: Mon, 18 Dec 2023 12:58:14 +0100 Subject: [PATCH 25/66] Cryptography build (#21463) * test * test * test * update branch * revert to main * Revert "test" This reverts commit 591b10b0e50899bdb078b79080e6b220c18b8534. --- omnibus/config/software/datadog-agent-integrations-py3.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/omnibus/config/software/datadog-agent-integrations-py3.rb b/omnibus/config/software/datadog-agent-integrations-py3.rb index 82d6063e6e2149..08ed137187222b 100644 --- a/omnibus/config/software/datadog-agent-integrations-py3.rb +++ b/omnibus/config/software/datadog-agent-integrations-py3.rb @@ -174,6 +174,8 @@ { "RUSTFLAGS" => "-C link-arg=-Wl,-rpath,#{install_dir}/embedded/lib", "OPENSSL_DIR" => "#{install_dir}/embedded/", + "PIP_NO_CACHE_DIR" => "off", + "PIP_FORCE_REINSTALL" => "1", } ) end From a87ec42695734e9f6129a7c05c0faf05ec3d12ce Mon Sep 17 00:00:00 2001 From: Marethyu <45374460+Pythyu@users.noreply.github.com> Date: Mon, 18 Dec 2023 13:06:14 +0100 Subject: [PATCH 26/66] Updates current tests with fakeintake loadbalancer's new default state (#21535) Updates current tests with fakeintake loadbalancer's new default state --- .gitlab-ci.yml | 2 +- test/new-e2e/examples/agentenv_metrics_test.go | 2 -- .../agent-shared-components/forwarder/nss_failover_test.go | 4 ++-- test/new-e2e/tests/agent-subcommands/diagnose_test.go | 3 +-- test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go | 3 +-- test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go | 3 +-- test/new-e2e/tests/agent-subcommands/subcommands_test.go | 4 +--- test/new-e2e/tests/ndm/snmp/snmpTestEnv.go | 3 +-- test/new-e2e/tests/process/linux_test.go | 2 -- 9 files changed, 8 insertions(+), 18 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6b423087045757..9313b5d4a64001 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -176,7 +176,7 @@ variables: # To use images from test-infra-definitions dev branches, set the SUFFIX variable to -dev # and check the job creating the image to make sure you have the right SHA prefix TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: "" - TEST_INFRA_DEFINITIONS_BUILDIMAGES: 281b2a324002 + TEST_INFRA_DEFINITIONS_BUILDIMAGES: 67d2009bcd81 DATADOG_AGENT_BUILDERS: v22276738-b36b132 DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded diff --git a/test/new-e2e/examples/agentenv_metrics_test.go b/test/new-e2e/examples/agentenv_metrics_test.go index 5370c184d06e66..d181e021d80c0b 100644 --- a/test/new-e2e/examples/agentenv_metrics_test.go +++ b/test/new-e2e/examples/agentenv_metrics_test.go @@ -11,7 +11,6 @@ import ( "github.com/DataDog/datadog-agent/test/fakeintake/client" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" - "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake/fakeintakeparams" "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2os" "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2params" "github.com/stretchr/testify/assert" @@ -24,7 +23,6 @@ type fakeintakeSuiteMetrics struct { func TestVMSuiteEx5(t *testing.T) { e2e.Run(t, &fakeintakeSuiteMetrics{}, e2e.FakeIntakeStackDef( - e2e.WithFakeIntakeParams(fakeintakeparams.WithoutLoadBalancer()), e2e.WithVMParams(ec2params.WithOS(ec2os.CentOS)), )) } diff --git a/test/new-e2e/tests/agent-shared-components/forwarder/nss_failover_test.go b/test/new-e2e/tests/agent-shared-components/forwarder/nss_failover_test.go index 225439084adbc4..df97fec97f4610 100644 --- a/test/new-e2e/tests/agent-shared-components/forwarder/nss_failover_test.go +++ b/test/new-e2e/tests/agent-shared-components/forwarder/nss_failover_test.go @@ -80,12 +80,12 @@ func multiFakeintakeStackDef(agentOptions ...agentparams.Option) *e2e.StackDefin return nil, err } - fiExporter1, err := aws.NewEcsFakeintake(awsEnv, fakeintakeparams.WithName(fakeintake1Name), fakeintakeparams.WithoutLoadBalancer()) + fiExporter1, err := aws.NewEcsFakeintake(awsEnv, fakeintakeparams.WithName(fakeintake1Name)) if err != nil { return nil, err } - fiExporter2, err := aws.NewEcsFakeintake(awsEnv, fakeintakeparams.WithName(fakeintake2Name), fakeintakeparams.WithoutLoadBalancer()) + fiExporter2, err := aws.NewEcsFakeintake(awsEnv, fakeintakeparams.WithName(fakeintake2Name)) if err != nil { return nil, err } diff --git a/test/new-e2e/tests/agent-subcommands/diagnose_test.go b/test/new-e2e/tests/agent-subcommands/diagnose_test.go index ee4e8647aa302a..a7896f64b4f901 100644 --- a/test/new-e2e/tests/agent-subcommands/diagnose_test.go +++ b/test/new-e2e/tests/agent-subcommands/diagnose_test.go @@ -17,7 +17,6 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" svcmanager "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-platform/common/svc-manager" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" - "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake/fakeintakeparams" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -34,7 +33,7 @@ var allSuites = []string{ } func TestAgentDiagnoseEC2Suite(t *testing.T) { - e2e.Run(t, &agentDiagnoseSuite{}, e2e.FakeIntakeStackDef(e2e.WithFakeIntakeParams(fakeintakeparams.WithoutLoadBalancer()))) + e2e.Run(t, &agentDiagnoseSuite{}, e2e.FakeIntakeStackDef()) } // type summary represents the number of success, fail, warnings and errors of a diagnose command diff --git a/test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go b/test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go index b639913ab2fd05..9ec2a45b830a20 100644 --- a/test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go @@ -14,7 +14,6 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" - "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake/fakeintakeparams" "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2os" "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2params" ) @@ -34,7 +33,7 @@ type linuxFlareSuite struct { func TestLinuxFlareSuite(t *testing.T) { t.Parallel() - e2e.Run(t, &linuxFlareSuite{}, e2e.FakeIntakeStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.UbuntuOS)), e2e.WithFakeIntakeParams(fakeintakeparams.WithoutLoadBalancer()))) + e2e.Run(t, &linuxFlareSuite{}, e2e.FakeIntakeStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.UbuntuOS)))) } func (v *linuxFlareSuite) TestFlareWithAllConfiguration() { diff --git a/test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go b/test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go index 3379d8c899ab4b..add1882fc2cf5e 100644 --- a/test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go @@ -11,7 +11,6 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" - "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake/fakeintakeparams" "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2os" "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2params" "github.com/stretchr/testify/assert" @@ -23,7 +22,7 @@ type windowsFlareSuite struct { func TestWindowsFlareSuite(t *testing.T) { t.Parallel() - e2e.Run(t, &windowsFlareSuite{}, e2e.FakeIntakeStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.WindowsOS)), e2e.WithFakeIntakeParams(fakeintakeparams.WithoutLoadBalancer()))) + e2e.Run(t, &windowsFlareSuite{}, e2e.FakeIntakeStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.WindowsOS)))) } func (v *windowsFlareSuite) TestFlareWindows() { diff --git a/test/new-e2e/tests/agent-subcommands/subcommands_test.go b/test/new-e2e/tests/agent-subcommands/subcommands_test.go index 6f7457ddc788ec..1891fa3ec59839 100644 --- a/test/new-e2e/tests/agent-subcommands/subcommands_test.go +++ b/test/new-e2e/tests/agent-subcommands/subcommands_test.go @@ -14,8 +14,6 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" - "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake/fakeintakeparams" - "github.com/cenkalti/backoff" "github.com/stretchr/testify/assert" ) @@ -30,7 +28,7 @@ type subcommandWithFakeIntakeSuite struct { func TestSubcommandSuite(t *testing.T) { e2e.Run(t, &subcommandSuite{}, e2e.AgentStackDef()) - e2e.Run(t, &subcommandWithFakeIntakeSuite{}, e2e.FakeIntakeStackDef(e2e.WithFakeIntakeParams(fakeintakeparams.WithoutLoadBalancer()))) + e2e.Run(t, &subcommandWithFakeIntakeSuite{}, e2e.FakeIntakeStackDef()) } // section contains the content status of a specific section (e.g. Forwarder) diff --git a/test/new-e2e/tests/ndm/snmp/snmpTestEnv.go b/test/new-e2e/tests/ndm/snmp/snmpTestEnv.go index c18dc830ddbaed..449b3decb9dcc2 100644 --- a/test/new-e2e/tests/ndm/snmp/snmpTestEnv.go +++ b/test/new-e2e/tests/ndm/snmp/snmpTestEnv.go @@ -16,7 +16,6 @@ import ( "github.com/DataDog/test-infra-definitions/components/datadog/agent" "github.com/DataDog/test-infra-definitions/components/datadog/dockeragentparams" "github.com/DataDog/test-infra-definitions/scenarios/aws" - "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake/fakeintakeparams" "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2vm" "github.com/pulumi/pulumi/sdk/v3/go/auto" @@ -58,7 +57,7 @@ func NewTestEnv() (*TestEnv, error) { return err } - fakeintakeExporter, err := aws.NewEcsFakeintake(vm.GetAwsEnvironment(), fakeintakeparams.WithoutLoadBalancer()) + fakeintakeExporter, err := aws.NewEcsFakeintake(vm.GetAwsEnvironment()) if err != nil { return err } diff --git a/test/new-e2e/tests/process/linux_test.go b/test/new-e2e/tests/process/linux_test.go index 369abf2d2827e2..1eb1b9e6b87579 100644 --- a/test/new-e2e/tests/process/linux_test.go +++ b/test/new-e2e/tests/process/linux_test.go @@ -10,7 +10,6 @@ import ( "time" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" - "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake/fakeintakeparams" "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/test/fakeintake/aggregator" @@ -24,7 +23,6 @@ type linuxTestSuite struct { func TestLinuxTestSuite(t *testing.T) { e2e.Run(t, &linuxTestSuite{}, e2e.FakeIntakeStackDef( - e2e.WithFakeIntakeParams(fakeintakeparams.WithoutLoadBalancer()), e2e.WithAgentParams(agentparams.WithAgentConfig(processCheckConfigStr)), )) } From 39cf73177a2d2ff57d7df65714aabe137cf64ccd Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Mon, 18 Dec 2023 13:22:40 +0100 Subject: [PATCH 27/66] Add definition of new labels (#21566) Co-authored-by: Brett Blue <84536271+brett0000FF@users.noreply.github.com> Co-authored-by: Bryce Kahle --- .circleci/config.yml | 20 +++++- .ddqa/config.toml | 2 + .github/PULL_REQUEST_TEMPLATE.md | 2 +- .github/dependabot.yaml | 7 ++ .github/workflows/cws-btfhub-sync.yml | 2 +- docs/dev/contributing.md | 6 +- tasks/__init__.py | 6 +- tasks/{test.py => go_test.py} | 38 ++++++++++- tasks/new_e2e_tests.py | 2 +- tasks/release.py | 1 + tasks/security_agent.py | 2 +- tasks/system_probe.py | 2 +- tasks/unit-tests/linters_tests.py | 97 +++++++++++++++++++++++++++ 13 files changed, 174 insertions(+), 13 deletions(-) rename tasks/{test.py => go_test.py} (96%) create mode 100644 tasks/unit-tests/linters_tests.py diff --git a/.circleci/config.yml b/.circleci/config.yml index e44f826a9b224f..8cab47ee58b394 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -135,6 +135,15 @@ jobs: command: inv -e lint-teamassignment name: run PR check for team assignment labels + skip_qa: + <<: *job_template + steps: + - restore_cache: *restore_source + - restore_cache: *restore_deps + - run: + command: inv -e lint-skip-qa + name: run PR check for skip-qa labels + milestone: <<: *job_template steps: @@ -208,8 +217,8 @@ jobs: - run: name: setting env vars for click command: | - echo 'export LC_ALL="C.UTF-8"' >> $BASH_ENV - echo 'export LANG="C.UTF-8"' >> $BASH_ENV + echo 'export LC_ALL="C.UTF-8"' >> $BASH_ENV + echo 'export LANG="C.UTF-8"' >> $BASH_ENV - run: name: lint python files command: inv -e lint-python @@ -307,6 +316,13 @@ workflows: - main requires: - dependencies + - skip_qa: + filters: + branches: + ignore: + - main + requires: + - dependencies - milestone: filters: branches: diff --git a/.ddqa/config.toml b/.ddqa/config.toml index 63bca53c98c08f..d408e718a1cd42 100644 --- a/.ddqa/config.toml +++ b/.ddqa/config.toml @@ -6,6 +6,8 @@ qa_statuses = [ ] ignored_labels = [ "qa/skip-qa", + "qa/done", + "qa/no-code-change", ] [teams."Agent Metrics Logs"] diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 1d0e2ce7226fe6..4faeaff8349ba9 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -57,7 +57,7 @@ Note: Adding GitHub labels is only possible for contributors with write access. - [ ] Use the `major_change` label if your change either has a major impact on the code base, is impacting multiple teams or is changing important well-established internals of the Agent. This label will be use during QA to make sure each team pay extra attention to the changed behavior. For any customer facing change use a releasenote. - [ ] A [release note](https://github.com/DataDog/datadog-agent/blob/main/docs/dev/contributing.md#reno) has been added or the `changelog/no-changelog` label has been applied. - [ ] Changed code has automated tests for its functionality. -- [ ] Adequate QA/testing plan information is provided if the `qa/skip-qa` label is not applied. +- [ ] Adequate QA/testing plan information is provided. Except if the `qa/skip-qa` label, with required either `qa/done` or `qa/no-code-change` labels, are applied. - [ ] At least one `team/..` label has been applied, indicating the team(s) that should QA this change. - [ ] If applicable, docs team has been notified or [an issue has been opened on the documentation repo](https://github.com/DataDog/documentation/issues/new). - [ ] If applicable, the `need-change/operator` and `need-change/helm` labels have been applied. diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index b8c0d9f11b92ea..ce32c4ef15ae13 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -113,6 +113,7 @@ updates: - team/agent-platform - changelog/no-changelog - qa/skip-qa + - qa/no-code-change milestone: 22 schedule: interval: monthly @@ -139,6 +140,7 @@ updates: - team/agent-e2e-test - changelog/no-changelog - qa/skip-qa + - qa/no-code-change - dev/testing milestone: 22 ignore: @@ -157,6 +159,7 @@ updates: - team/agent-e2e-test - changelog/no-changelog - qa/skip-qa + - qa/no-code-change - dev/testing milestone: 22 schedule: @@ -170,6 +173,7 @@ updates: - team/agent-platform - changelog/no-changelog - qa/skip-qa + - qa/no-code-change - dev/tooling milestone: 22 schedule: @@ -183,6 +187,7 @@ updates: - team/agent-security - changelog/no-changelog - qa/skip-qa + - qa/no-code-change - dev/tooling milestone: 22 schedule: @@ -195,6 +200,7 @@ updates: - team/agent-e2e-test - changelog/no-changelog - qa/skip-qa + - qa/no-code-change - dev/testing milestone: 22 schedule: @@ -208,6 +214,7 @@ updates: - team/agent-platform - changelog/no-changelog - qa/skip-qa + - qa/no-code-change - dev/tooling milestone: 22 schedule: diff --git a/.github/workflows/cws-btfhub-sync.yml b/.github/workflows/cws-btfhub-sync.yml index 0aee1b35c2265e..4e9a6ed1e86e88 100644 --- a/.github/workflows/cws-btfhub-sync.yml +++ b/.github/workflows/cws-btfhub-sync.yml @@ -85,5 +85,5 @@ jobs: owner, repo, issue_number: result.data.number, - labels: ['changelog/no-changelog', 'qa/skip-qa', 'team/agent-security'] + labels: ['changelog/no-changelog', 'qa/skip-qa', 'qa/no-code-change', 'team/agent-security'] }); diff --git a/docs/dev/contributing.md b/docs/dev/contributing.md index 5225511793b508..8a23d7ad4eb336 100644 --- a/docs/dev/contributing.md +++ b/docs/dev/contributing.md @@ -258,8 +258,10 @@ labels that can be use: - `community`: for community PRs. - `changelog/no-changelog`: for PRs that don't require a reno releasenote (useful for PRs only changing documentation or tests). -- `qa/skip-qa`: this will skip creating a QA card for the PR during the release - process (example: for a documentation only PRs). +- `qa/skip-qa`, `qa/done`, `qa/no-code-change`: if the `qa/skip-qa` label is set with + an additional required `qa/done` or `qa/no-code-change`, it will skip the creation + of a QA card related to this PR during next release process (example: + documentation-only PRs). - `major_change`: to flag the PR as a major change impacting many/all teams working on the agent and will require deeper QA (example: when we change the Python version shipped in the agent). diff --git a/tasks/__init__.py b/tasks/__init__.py index f07f7698ce384d..a7d50df7a79ca2 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -50,8 +50,7 @@ reset, tidy_all, ) -from .show_linters_issues import show_linters_issues -from .test import ( +from .go_test import ( codecov, download_tools, e2e_tests, @@ -68,10 +67,12 @@ lint_milestone, lint_python, lint_releasenote, + lint_skip_qa, lint_teamassignment, send_unit_tests_stats, test, ) +from .show_linters_issues import show_linters_issues from .update_go import go_version, update_go from .utils import generate_config from .windows_resources import build_messagetable @@ -95,6 +96,7 @@ ns.add_task(lint_copyrights), ns.add_task(lint_teamassignment) ns.add_task(lint_releasenote) +ns.add_task(lint_skip_qa) ns.add_task(lint_milestone) ns.add_task(lint_filenames) ns.add_task(lint_python) diff --git a/tasks/test.py b/tasks/go_test.py similarity index 96% rename from tasks/test.py rename to tasks/go_test.py index 66fcbaab7f119d..1c3e50b9e3e706 100644 --- a/tasks/test.py +++ b/tasks/go_test.py @@ -854,8 +854,9 @@ def lint_teamassignment(_): issue = res.json() labels = {l['name'] for l in issue.get('labels', [])} - if "qa/skip-qa" in labels: - print("qa/skip-qa label set -- no need for team assignment") + skip_qa_labels = ["qa/skip-qa", "qa/done", "qa/no-code-change"] + if any(skip_label in labels for skip_label in skip_qa_labels): + print("A label to skip QA is set -- no need for team assignment") return for label in labels: @@ -874,6 +875,39 @@ def lint_teamassignment(_): print("PR not found, skipping check for team assignment.") +@task +def lint_skip_qa(_): + """ + Ensure that when qa/skip-qa is used, we have one of [qa/done , qa/no-code-change]. Error if not valid. + """ + branch = os.environ.get("CIRCLE_BRANCH") + pr_url = os.environ.get("CIRCLE_PULL_REQUEST") + + if branch == DEFAULT_BRANCH: + print(f"Running on {DEFAULT_BRANCH}, skipping check for skip-qa label.") + elif pr_url: + import requests + + pr_id = pr_url.rsplit('/')[-1] + + res = requests.get(f"https://api.github.com/repos/DataDog/datadog-agent/issues/{pr_id}") + issue = res.json() + + labels = {l['name'] for l in issue.get('labels', [])} + skip_qa = "qa/skip-qa" + new_qa_labels = ["qa/done", "qa/no-code-change"] + if skip_qa in labels and not any(skip_label in labels for skip_label in new_qa_labels): + print( + f"PR {pr_url} request to skip QA without justification. Requires an additional `qa/done` or `qa/no-code-change`." + ) + raise Exit(code=1) + return + # No PR is associated with this build: given that we have the "run only on PRs" setting activated, + # this can only happen when we're building on a tag. We don't need to check for skip-qa. + else: + print("PR not found, skipping check for skip-qa.") + + @task def lint_milestone(_): """ diff --git a/tasks/new_e2e_tests.py b/tasks/new_e2e_tests.py index e2ce8bcbebb1ea..69503338bf77e3 100644 --- a/tasks/new_e2e_tests.py +++ b/tasks/new_e2e_tests.py @@ -15,9 +15,9 @@ from invoke.tasks import task from .flavor import AgentFlavor +from .go_test import test_flavor from .libs.junit_upload import produce_junit_tar from .modules import DEFAULT_MODULES -from .test import test_flavor from .utils import REPO_PATH, get_git_commit diff --git a/tasks/release.py b/tasks/release.py index 075c2b9d6002dc..3ff5ec0acc235d 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -1135,6 +1135,7 @@ def create_rc(ctx, major_versions="6,7", patch_version=False, upstream="origin") labels=[ "changelog/no-changelog", "qa/skip-qa", + "qa/no-code-change", "team/agent-platform", "team/agent-release-management", "category/release_operations", diff --git a/tasks/security_agent.py b/tasks/security_agent.py index a573ac657b19b0..c18673a684d42a 100644 --- a/tasks/security_agent.py +++ b/tasks/security_agent.py @@ -13,6 +13,7 @@ from .build_tags import get_default_build_tags from .go import run_golangci_lint +from .go_test import environ from .libs.ninja_syntax import NinjaWriter from .process_agent import TempDir from .system_probe import ( @@ -22,7 +23,6 @@ ninja_define_ebpf_compiler, ninja_define_exe_compiler, ) -from .test import environ from .utils import ( REPO_PATH, bin_name, diff --git a/tasks/system_probe.py b/tasks/system_probe.py index 32a600de46799d..40f8fd255ef5a4 100644 --- a/tasks/system_probe.py +++ b/tasks/system_probe.py @@ -17,9 +17,9 @@ from invoke.exceptions import Exit from .build_tags import UNIT_TEST_TAGS, get_default_build_tags +from .go_test import environ from .libs.common.color import color_message from .libs.ninja_syntax import NinjaWriter -from .test import environ from .utils import REPO_PATH, bin_name, get_build_flags, get_gobin, get_version_numeric_only from .windows_resources import MESSAGESTRINGS_MC_PATH, arch_to_windres_target diff --git a/tasks/unit-tests/linters_tests.py b/tasks/unit-tests/linters_tests.py new file mode 100644 index 00000000000000..97a36c7e4a5347 --- /dev/null +++ b/tasks/unit-tests/linters_tests.py @@ -0,0 +1,97 @@ +import os +import unittest +from unittest.mock import MagicMock, patch + +from invoke import MockContext +from invoke.exceptions import Exit + +from .. import go_test + + +class TestLintSkipQA(unittest.TestCase): + @patch('builtins.print') + def test_on_default(self, mock_print): + os.environ["CIRCLE_BRANCH"] = "main" + os.environ["CIRCLE_PULL_REQUEST"] = "42" + go_test.lint_skip_qa(MockContext()) + mock_print.assert_called_with(f"Running on {go_test.DEFAULT_BRANCH}, skipping check for skip-qa label.") + + @patch('builtins.print') + def test_no_pr(self, mock_print): + os.environ["CIRCLE_BRANCH"] = "pied" + go_test.lint_skip_qa(MockContext()) + mock_print.assert_called_with("PR not found, skipping check for skip-qa.") + + @patch('builtins.print') + @patch('requests.get') + def test_no_skip_qa(self, mock_requests_get, mock_print): + os.environ["CIRCLE_BRANCH"] = "oak" + os.environ["CIRCLE_PULL_REQUEST"] = "51" + issue = {'labels': [{'name': 'de_cadix_a_des_yeux_de_velours'}]} + mock_response = MagicMock() + mock_response.json.return_value = issue + mock_requests_get.return_value = mock_response + go_test.lint_skip_qa(MockContext()) + mock_print.assert_not_called() + + @patch('requests.get') + def test_skip_qa_alone(self, mock_requests_get): + os.environ["CIRCLE_BRANCH"] = "mapple" + os.environ["CIRCLE_PULL_REQUEST"] = "69" + issue = {'labels': [{'name': 'qa/skip-qa'}]} + mock_response = MagicMock() + mock_response.json.return_value = issue + mock_requests_get.return_value = mock_response + with self.assertRaises(Exit): + go_test.lint_skip_qa(MockContext()) + + @patch('requests.get') + def test_skip_qa_bad_label(self, mock_requests_get): + os.environ["CIRCLE_BRANCH"] = "ash" + os.environ["CIRCLE_PULL_REQUEST"] = "666" + issue = {'labels': [{'name': 'qa/skip-qa'}, {"name": "qa/lity-streets"}]} + mock_response = MagicMock() + mock_response.json.return_value = issue + mock_requests_get.return_value = mock_response + with self.assertRaises(Exit): + go_test.lint_skip_qa(MockContext()) + + @patch('builtins.print') + @patch('requests.get') + def test_skip_qa_done(self, mock_requests_get, mock_print): + os.environ["CIRCLE_BRANCH"] = "gingko" + os.environ["CIRCLE_PULL_REQUEST"] = "1337" + issue = {'labels': [{'name': 'qa/skip-qa'}, {'name': 'qa/done'}]} + mock_response = MagicMock() + mock_response.json.return_value = issue + mock_requests_get.return_value = mock_response + go_test.lint_skip_qa(MockContext()) + mock_print.assert_not_called() + + @patch('builtins.print') + @patch('requests.get') + def test_skip_qa_done_alone(self, mock_requests_get, mock_print): + os.environ["CIRCLE_BRANCH"] = "beech" + os.environ["CIRCLE_PULL_REQUEST"] = "1515" + issue = {'labels': [{'name': 'qa/done'}]} + mock_response = MagicMock() + mock_response.json.return_value = issue + mock_requests_get.return_value = mock_response + go_test.lint_skip_qa(MockContext()) + mock_print.assert_not_called() + + @patch('builtins.print') + @patch('requests.get') + def test_skip_qa_no_code(self, mock_requests_get, mock_print): + os.environ["CIRCLE_BRANCH"] = "sequoia" + os.environ["CIRCLE_PULL_REQUEST"] = "1664" + issue = {'labels': [{'name': 'qa/skip-qa'}, {'name': 'qa/no-code-change'}]} + mock_response = MagicMock() + mock_response.json.return_value = issue + mock_requests_get.return_value = mock_response + go_test.lint_skip_qa(MockContext()) + mock_print.assert_not_called() + + +if __name__ == "__main__": + unittest.main() From 6bb4dd62a3a399417cccfe28fd7559557f3680e4 Mon Sep 17 00:00:00 2001 From: Nicolas Guerguadj <35628945+Kaderinho@users.noreply.github.com> Date: Mon, 18 Dec 2023 14:21:44 +0100 Subject: [PATCH 28/66] fix: add dsd replay, metadata host and inventoryhost back to the run() signature (#21605) Add dsd replay, metadata host and inventoryhost back to the run() signature --- cmd/agent/subcommands/run/command.go | 6 ++++++ cmd/agent/subcommands/run/command_windows.go | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 07b4eadb3a7c7f..78a0082cf21e70 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -53,6 +53,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors" "github.com/DataDog/datadog-agent/comp/dogstatsd" + "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" dogstatsddebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/comp/forwarder" @@ -66,8 +67,10 @@ import ( "github.com/DataDog/datadog-agent/comp/logs" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/metadata" + "github.com/DataDog/datadog-agent/comp/metadata/host" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" + "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost" "github.com/DataDog/datadog-agent/comp/metadata/runner" "github.com/DataDog/datadog-agent/comp/ndmtmp" "github.com/DataDog/datadog-agent/comp/netflow" @@ -198,6 +201,7 @@ func run(log log.Component, telemetry telemetry.Component, sysprobeconfig sysprobeconfig.Component, server dogstatsdServer.Component, + _ replay.Component, serverDebug dogstatsddebug.Component, forwarder defaultforwarder.Component, wmeta workloadmeta.Component, @@ -209,7 +213,9 @@ func run(log log.Component, cliParams *cliParams, logsAgent optional.Option[logsAgent.Component], otelcollector otelcollector.Component, + _ host.Component, invAgent inventoryagent.Component, + _ inventoryhost.Component, _ secrets.Component, invChecks inventorychecks.Component, _ netflowServer.Component, diff --git a/cmd/agent/subcommands/run/command_windows.go b/cmd/agent/subcommands/run/command_windows.go index 6b928d17e0c8bf..26a36d9ac7e89c 100644 --- a/cmd/agent/subcommands/run/command_windows.go +++ b/cmd/agent/subcommands/run/command_windows.go @@ -44,12 +44,15 @@ import ( "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/workloadmeta" + "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" dogstatsddebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" + "github.com/DataDog/datadog-agent/comp/metadata/host" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" + "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost" "github.com/DataDog/datadog-agent/comp/metadata/runner" netflowServer "github.com/DataDog/datadog-agent/comp/netflow/server" otelcollector "github.com/DataDog/datadog-agent/comp/otelcol/collector" @@ -80,6 +83,7 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error telemetry telemetry.Component, sysprobeconfig sysprobeconfig.Component, server dogstatsdServer.Component, + _ replay.Component, serverDebug dogstatsddebug.Component, wmeta workloadmeta.Component, rcclient rcclient.Component, @@ -89,7 +93,9 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error sharedSerializer serializer.MetricSerializer, otelcollector otelcollector.Component, demultiplexer demultiplexer.Component, + _ host.Component, invAgent inventoryagent.Component, + _ inventoryhost.Component, _ secrets.Component, invChecks inventorychecks.Component, _ netflowServer.Component, From 60d6ca1b26deaecb30ec56e39b0cec20640aa11a Mon Sep 17 00:00:00 2001 From: Zhengda Lu Date: Mon, 18 Dec 2023 09:00:51 -0500 Subject: [PATCH 29/66] [DBMON-3271] Add new normalization options to sql obfuscator (#21568) --- go.mod | 2 +- go.sum | 4 +-- .../corechecks/oracle-dbm/config/config.go | 1 + pkg/collector/python/datadog_agent.go | 12 +++++++ pkg/obfuscate/go.mod | 2 +- pkg/obfuscate/go.sum | 4 +-- pkg/obfuscate/obfuscate.go | 10 ++++++ pkg/obfuscate/sql.go | 2 ++ pkg/obfuscate/sql_test.go | 34 +++++++++++++++++++ pkg/trace/go.mod | 2 +- pkg/trace/go.sum | 4 +-- ...-obfuscation-options-ca13fcbeb4c9b299.yaml | 6 ++++ 12 files changed, 74 insertions(+), 9 deletions(-) create mode 100644 releasenotes/notes/bump-go-sqllexer-with-more-obfuscation-options-ca13fcbeb4c9b299.yaml diff --git a/go.mod b/go.mod index 5eb2eca9736e18..0038882b10b4b0 100644 --- a/go.mod +++ b/go.mod @@ -650,7 +650,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/buf v0.50.0-rc.4 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.50.0-rc.4 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.13.0 // indirect - github.com/DataDog/go-sqllexer v0.0.8 // indirect + github.com/DataDog/go-sqllexer v0.0.9 // indirect github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect diff --git a/go.sum b/go.sum index 245345694f5d10..e3cdab1b9d916f 100644 --- a/go.sum +++ b/go.sum @@ -142,8 +142,8 @@ github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302 github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe/go.mod h1:90sqV0j7E8wYCyqIp5d9HmYWLTFQttqPFFtNYDyAybQ= github.com/DataDog/go-libddwaf/v2 v2.2.2 h1:WS0l3qcPju2U4Ot+vr02f525YfW9RcoQfvpoV1410ac= github.com/DataDog/go-libddwaf/v2 v2.2.2/go.mod h1:UH7CLwSL++Ij9U7LmdZRH+71hzD+AfH28lF7pTTpWhs= -github.com/DataDog/go-sqllexer v0.0.8 h1:vfC8R9PhmJfeOKcFYAX9UOd890A3wu3KrjU9Kr7nM0E= -github.com/DataDog/go-sqllexer v0.0.8/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY= +github.com/DataDog/go-sqllexer v0.0.9 h1:Cx2Cu1S0hfj4coCCA8hzjM9+UNFRkcu1avIV//RU5Qw= +github.com/DataDog/go-sqllexer v0.0.9/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY= github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I= github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gopsutil v1.2.2 h1:8lmthwyyCXa1NKiYcHlrtl9AAFdfbNI2gPcioCJcBPU= diff --git a/pkg/collector/corechecks/oracle-dbm/config/config.go b/pkg/collector/corechecks/oracle-dbm/config/config.go index 2eedb6dc368296..e58c648d6c0a03 100644 --- a/pkg/collector/corechecks/oracle-dbm/config/config.go +++ b/pkg/collector/corechecks/oracle-dbm/config/config.go @@ -167,6 +167,7 @@ func GetDefaultObfuscatorOptions() obfuscate.SQLConfig { ObfuscationMode: obfuscate.ObfuscateAndNormalize, RemoveSpaceBetweenParentheses: true, KeepNull: true, + KeepTrailingSemicolon: true, } } diff --git a/pkg/collector/python/datadog_agent.go b/pkg/collector/python/datadog_agent.go index 7658a8b7ede42b..3ac438c379ac48 100644 --- a/pkg/collector/python/datadog_agent.go +++ b/pkg/collector/python/datadog_agent.go @@ -274,6 +274,16 @@ type sqlConfig struct { // KeepPositionalParameter specifies whether to disable obfuscate positional parameter with ?. // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". KeepPositionalParameter bool `json:"keep_positional_parameter"` + + // KeepTrailingSemicolon specifies whether to keep trailing semicolon. + // By default, trailing semicolon is removed during normalization. + // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". + KeepTrailingSemicolon bool `json:"keep_trailing_semicolon"` + + // KeepIdentifierQuotation specifies whether to keep identifier quotation, e.g. "my_table" or [my_table]. + // By default, identifier quotation is removed during normalization. + // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". + KeepIdentifierQuotation bool `json:"keep_identifier_quotation"` } // ObfuscateSQL obfuscates & normalizes the provided SQL query, writing the error into errResult if the operation @@ -306,6 +316,8 @@ func ObfuscateSQL(rawQuery, opts *C.char, errResult **C.char) *C.char { KeepNull: sqlOpts.KeepNull, KeepBoolean: sqlOpts.KeepBoolean, KeepPositionalParameter: sqlOpts.KeepPositionalParameter, + KeepTrailingSemicolon: sqlOpts.KeepTrailingSemicolon, + KeepIdentifierQuotation: sqlOpts.KeepIdentifierQuotation, }) if err != nil { // memory will be freed by caller diff --git a/pkg/obfuscate/go.mod b/pkg/obfuscate/go.mod index 336d14fab237a1..474ef98fce7d5f 100644 --- a/pkg/obfuscate/go.mod +++ b/pkg/obfuscate/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/DataDog/datadog-go/v5 v5.1.1 - github.com/DataDog/go-sqllexer v0.0.8 + github.com/DataDog/go-sqllexer v0.0.9 github.com/outcaste-io/ristretto v0.2.1 github.com/stretchr/testify v1.8.4 go.uber.org/atomic v1.10.0 diff --git a/pkg/obfuscate/go.sum b/pkg/obfuscate/go.sum index ecaa8334abdb91..8f60e065253ad2 100644 --- a/pkg/obfuscate/go.sum +++ b/pkg/obfuscate/go.sum @@ -1,7 +1,7 @@ github.com/DataDog/datadog-go/v5 v5.1.1 h1:JLZ6s2K1pG2h9GkvEvMdEGqMDyVLEAccdX5TltWcLMU= github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E= -github.com/DataDog/go-sqllexer v0.0.8 h1:vfC8R9PhmJfeOKcFYAX9UOd890A3wu3KrjU9Kr7nM0E= -github.com/DataDog/go-sqllexer v0.0.8/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY= +github.com/DataDog/go-sqllexer v0.0.9 h1:Cx2Cu1S0hfj4coCCA8hzjM9+UNFRkcu1avIV//RU5Qw= +github.com/DataDog/go-sqllexer v0.0.9/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= diff --git a/pkg/obfuscate/obfuscate.go b/pkg/obfuscate/obfuscate.go index f0789eb04a075e..ed30de603b4941 100644 --- a/pkg/obfuscate/obfuscate.go +++ b/pkg/obfuscate/obfuscate.go @@ -165,6 +165,16 @@ type SQLConfig struct { // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". KeepPositionalParameter bool `json:"keep_positional_parameter" yaml:"keep_positional_parameter"` + // KeepTrailingSemicolon specifies whether to keep trailing semicolon. + // By default, trailing semicolon is removed during normalization. + // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". + KeepTrailingSemicolon bool `json:"keep_trailing_semicolon" yaml:"keep_trailing_semicolon"` + + // KeepIdentifierQuotation specifies whether to keep identifier quotation, e.g. "my_table" or [my_table]. + // By default, identifier quotation is removed during normalization. + // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". + KeepIdentifierQuotation bool `json:"keep_identifier_quotation" yaml:"keep_identifier_quotation"` + // Cache reports whether the obfuscator should use a LRU look-up cache for SQL obfuscations. Cache bool } diff --git a/pkg/obfuscate/sql.go b/pkg/obfuscate/sql.go index cf30343abedfef..807c9fb5f6a89c 100644 --- a/pkg/obfuscate/sql.go +++ b/pkg/obfuscate/sql.go @@ -458,6 +458,8 @@ func (o *Obfuscator) ObfuscateWithSQLLexer(in string, opts *SQLConfig) (*Obfusca sqllexer.WithCollectProcedures(opts.CollectProcedures), sqllexer.WithKeepSQLAlias(opts.KeepSQLAlias), sqllexer.WithRemoveSpaceBetweenParentheses(opts.RemoveSpaceBetweenParentheses), + sqllexer.WithKeepTrailingSemicolon(opts.KeepTrailingSemicolon), + sqllexer.WithKeepIdentifierQuotation(opts.KeepIdentifierQuotation), ) out, statementMetadata, err := sqllexer.ObfuscateAndNormalize( in, diff --git a/pkg/obfuscate/sql_test.go b/pkg/obfuscate/sql_test.go index d8aa1a15217038..0ca3b39ff63e97 100644 --- a/pkg/obfuscate/sql_test.go +++ b/pkg/obfuscate/sql_test.go @@ -2135,6 +2135,8 @@ func TestSQLLexerObfuscationAndNormalization(t *testing.T) { keepNull bool keepBoolean bool keepPositionalParameter bool + keepTrailingSemicolon bool + keepIdentifierQuotation bool metadata SQLMetadata }{ { @@ -2344,6 +2346,36 @@ func TestSQLLexerObfuscationAndNormalization(t *testing.T) { Procedures: []string{}, }, }, + { + name: "normalization with keep trailing semicolon", + query: "SELECT * FROM users WHERE id = 1 AND name = 'test';", + expected: "SELECT * FROM users WHERE id = ? AND name = ?;", + keepTrailingSemicolon: true, + metadata: SQLMetadata{ + Size: 11, + TablesCSV: "users", + Commands: []string{ + "SELECT", + }, + Comments: []string{}, + Procedures: []string{}, + }, + }, + { + name: "normalization with keep identifier quotation", + query: `SELECT * FROM "users" WHERE id = 1 AND name = 'test'`, + expected: `SELECT * FROM "users" WHERE id = ? AND name = ?`, + keepIdentifierQuotation: true, + metadata: SQLMetadata{ + Size: 11, + TablesCSV: "users", + Commands: []string{ + "SELECT", + }, + Comments: []string{}, + Procedures: []string{}, + }, + }, } for _, tt := range tests { @@ -2362,6 +2394,8 @@ func TestSQLLexerObfuscationAndNormalization(t *testing.T) { KeepBoolean: tt.keepBoolean, KeepPositionalParameter: tt.keepPositionalParameter, RemoveSpaceBetweenParentheses: tt.removeSpaceBetweenParentheses, + KeepTrailingSemicolon: tt.keepTrailingSemicolon, + KeepIdentifierQuotation: tt.keepIdentifierQuotation, }, }).ObfuscateSQLString(tt.query) require.NoError(t, err) diff --git a/pkg/trace/go.mod b/pkg/trace/go.mod index f451f80b421c2a..f5840b6093d695 100644 --- a/pkg/trace/go.mod +++ b/pkg/trace/go.mod @@ -42,7 +42,7 @@ require ( ) require ( - github.com/DataDog/go-sqllexer v0.0.8 // indirect + github.com/DataDog/go-sqllexer v0.0.9 // indirect github.com/DataDog/go-tuf v1.0.2-0.5.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups/v3 v3.0.2 // indirect diff --git a/pkg/trace/go.sum b/pkg/trace/go.sum index 9ba09377d90330..8954bf7ec6a014 100644 --- a/pkg/trace/go.sum +++ b/pkg/trace/go.sum @@ -1,7 +1,7 @@ github.com/DataDog/datadog-go/v5 v5.1.1 h1:JLZ6s2K1pG2h9GkvEvMdEGqMDyVLEAccdX5TltWcLMU= github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E= -github.com/DataDog/go-sqllexer v0.0.8 h1:vfC8R9PhmJfeOKcFYAX9UOd890A3wu3KrjU9Kr7nM0E= -github.com/DataDog/go-sqllexer v0.0.8/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY= +github.com/DataDog/go-sqllexer v0.0.9 h1:Cx2Cu1S0hfj4coCCA8hzjM9+UNFRkcu1avIV//RU5Qw= +github.com/DataDog/go-sqllexer v0.0.9/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY= github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I= github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.8.1 h1:ly/egks++IqejMVPcp0OWV1fcL+Nsq4EHF48AAQPKu4= diff --git a/releasenotes/notes/bump-go-sqllexer-with-more-obfuscation-options-ca13fcbeb4c9b299.yaml b/releasenotes/notes/bump-go-sqllexer-with-more-obfuscation-options-ca13fcbeb4c9b299.yaml new file mode 100644 index 00000000000000..de69a7ffb64114 --- /dev/null +++ b/releasenotes/notes/bump-go-sqllexer-with-more-obfuscation-options-ca13fcbeb4c9b299.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + DBM: Add configuration options to SQL obfuscator to customize the normalization of SQL statements: + - ``KeepTrailingSemicolon`` - disable removing trailing semicolon. This option is only valid when ``ObfuscationMode`` is ``obfuscate_and_normalize``. + - ``KeepIdentifierQuotation`` - disable removing quotation marks around identifiers. This option is only valid when ``ObfuscationMode`` is ``obfuscate_and_normalize``. From d4dd3b48abd0d6b487aa06a9fee153aa0967b307 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 14:05:28 +0000 Subject: [PATCH 30/66] Bump golang.org/x/tools from 0.16.0 to 0.16.1 in /pkg/security/secl (#21610) Bump golang.org/x/tools from 0.16.0 to 0.16.1 in /pkg/security/secl --- pkg/security/secl/go.mod | 2 +- pkg/security/secl/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index 53004a73f45ae7..58ecd393a3d21d 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -18,7 +18,7 @@ require ( golang.org/x/exp v0.0.0-20221114191408-850992195362 golang.org/x/sys v0.15.0 golang.org/x/text v0.14.0 - golang.org/x/tools v0.16.0 + golang.org/x/tools v0.16.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index 47b6a9f3ebad65..220a149bab2bf4 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -96,8 +96,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= -golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= From cfe19ec3ac5354db03f7577614c0acdd97e99559 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 14:52:15 +0000 Subject: [PATCH 31/66] Bump emoji from 2.8.0 to 2.9.0 in /test/e2e/cws-tests (#21611) Bump emoji from 2.8.0 to 2.9.0 in /test/e2e/cws-tests --- test/e2e/cws-tests/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/cws-tests/requirements.txt b/test/e2e/cws-tests/requirements.txt index 44f59e24b6dc57..0434b5f324f56c 100644 --- a/test/e2e/cws-tests/requirements.txt +++ b/test/e2e/cws-tests/requirements.txt @@ -3,6 +3,6 @@ datadog-api-client==2.19.0 pyaml==23.9.7 docker==6.1.3 retry==0.9.2 -emoji==2.8.0 +emoji==2.9.0 requests==2.31.0 jsonschema==4.20.0 \ No newline at end of file From 4b587f84b6c90df6d2a8957443f881e6e9542323 Mon Sep 17 00:00:00 2001 From: Sylvain Afchain Date: Mon, 18 Dec 2023 15:58:30 +0100 Subject: [PATCH 32/66] [CWS] ebpfless handle retval (#21607) --- pkg/security/probe/probe_epbfless.go | 1 + pkg/security/proto/ebpfless/msg.go | 1 + pkg/security/ptracer/cws.go | 7 ++++++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/security/probe/probe_epbfless.go b/pkg/security/probe/probe_epbfless.go index 3bec17a96917e1..2bca7a9ef4a936 100644 --- a/pkg/security/probe/probe_epbfless.go +++ b/pkg/security/probe/probe_epbfless.go @@ -96,6 +96,7 @@ func (p *EBPFLessProbe) handleClientMsg(msg *clientMsg) { p.Resolvers.ProcessResolver.AddForkEntry(process.CacheResolverKey{Pid: syscallMsg.PID, NSID: syscallMsg.NSID}, syscallMsg.Fork.PPID) case ebpfless.SyscallTypeOpen: event.Type = uint32(model.FileOpenEventType) + event.Open.Retval = syscallMsg.Retval event.Open.File.PathnameStr = syscallMsg.Open.Filename event.Open.File.BasenameStr = filepath.Base(syscallMsg.Open.Filename) event.Open.Flags = syscallMsg.Open.Flags diff --git a/pkg/security/proto/ebpfless/msg.go b/pkg/security/proto/ebpfless/msg.go index 37f7c2c990135d..f4acbada21c1ed 100644 --- a/pkg/security/proto/ebpfless/msg.go +++ b/pkg/security/proto/ebpfless/msg.go @@ -103,6 +103,7 @@ type SyscallMsg struct { NSID uint64 Type SyscallType PID uint32 + Retval int64 ContainerContext *ContainerContext Exec *ExecSyscallMsg Open *OpenSyscallMsg diff --git a/pkg/security/ptracer/cws.go b/pkg/security/ptracer/cws.go index 134314a8463fce..e7831301a5044c 100644 --- a/pkg/security/ptracer/cws.go +++ b/pkg/security/ptracer/cws.go @@ -402,6 +402,10 @@ func checkEntryPoint(path string) (string, error) { return name, nil } +func isAcceptedRetval(retval int64) bool { + return retval < 0 && retval != -int64(syscall.EACCES) && retval != -int64(syscall.EPERM) +} + // StartCWSPtracer start the ptracer func StartCWSPtracer(args []string, probeAddr string, creds Creds, verbose bool) error { entry, err := checkEntryPoint(args[0]) @@ -632,11 +636,12 @@ func StartCWSPtracer(args []string, probeAddr string, creds Creds, verbose bool) case ExecveNr, ExecveatNr: send(process.Nr[nr]) case OpenNr, OpenatNr: - if ret := tracer.ReadRet(regs); ret >= 0 { + if ret := tracer.ReadRet(regs); !isAcceptedRetval(ret) { msg, exists := process.Nr[nr] if !exists { return } + msg.Retval = ret send(msg) From 10907e0430601006ed896982a9aeacbff1ffdf8e Mon Sep 17 00:00:00 2001 From: Vincent Boulineau <58430298+vboulineau@users.noreply.github.com> Date: Mon, 18 Dec 2023 16:26:34 +0100 Subject: [PATCH 33/66] Allow configuring check cancel timeout (#21543) --- cmd/agent/common/loader.go | 7 ++----- pkg/collector/collector.go | 21 +++++++++++---------- pkg/collector/collector_demux_test.go | 2 +- pkg/collector/collector_test.go | 3 ++- pkg/config/config.go | 1 + 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/cmd/agent/common/loader.go b/cmd/agent/common/loader.go index daa4867e1892a9..7112f70a46cf17 100644 --- a/cmd/agent/common/loader.go +++ b/cmd/agent/common/loader.go @@ -65,9 +65,7 @@ func GetWorkloadmetaInit() workloadmeta.InitHelper { }) } -var ( - collectorOnce sync.Once -) +var collectorOnce sync.Once // LoadCollector instantiate the collector and init the global state 'Coll'. // @@ -77,7 +75,7 @@ func LoadCollector(senderManager sender.SenderManager) collector.Collector { collectorOnce.Do(func() { // create the Collector instance and start all the components // NOTICE: this will also setup the Python environment, if available - Coll = collector.NewCollector(senderManager, GetPythonPaths()...) + Coll = collector.NewCollector(senderManager, config.Datadog.GetDuration("check_cancel_timeout"), GetPythonPaths()...) }) return Coll } @@ -85,7 +83,6 @@ func LoadCollector(senderManager sender.SenderManager) collector.Collector { // LoadComponents configures several common Agent components: // tagger, collector, scheduler and autodiscovery func LoadComponents(senderManager sender.SenderManager, secretResolver secrets.Component, confdPath string) { - confSearchPaths := []string{ confdPath, filepath.Join(path.GetDistPath(), "conf.d"), diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index f48da965d9c3fb..3102947962c41a 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -28,8 +28,6 @@ const ( started ) -const cancelCheckTimeout time.Duration = 500 * time.Millisecond - // EventType represents the type of events emitted by the collector type EventType uint32 @@ -77,16 +75,19 @@ type collector struct { checks map[checkid.ID]*middleware.CheckWrapper eventReceivers []EventReceiver + cancelCheckTimeout time.Duration + m sync.RWMutex } // NewCollector create a Collector instance and sets up the Python Environment -func NewCollector(senderManager sender.SenderManager, paths ...string) Collector { +func NewCollector(senderManager sender.SenderManager, cancelCheckTimeout time.Duration, paths ...string) Collector { c := &collector{ - senderManager: senderManager, - checks: make(map[checkid.ID]*middleware.CheckWrapper), - state: atomic.NewUint32(stopped), - checkInstances: int64(0), + senderManager: senderManager, + checks: make(map[checkid.ID]*middleware.CheckWrapper), + state: atomic.NewUint32(stopped), + checkInstances: int64(0), + cancelCheckTimeout: cancelCheckTimeout, } pyVer, pyHome, pyPath := pySetup(paths...) @@ -220,11 +221,11 @@ func (c *collector) StopCheck(id checkid.ID) error { err = c.runner.StopCheck(id) if err != nil { // still attempt to cancel the check before returning the error - _ = c.cancelCheck(ch, cancelCheckTimeout) + _ = c.cancelCheck(ch, c.cancelCheckTimeout) return fmt.Errorf("an error occurred while stopping the check: %s", err) } - err = c.cancelCheck(ch, cancelCheckTimeout) + err = c.cancelCheck(ch, c.cancelCheckTimeout) if err != nil { return fmt.Errorf("an error occurred while calling check.Cancel(): %s", err) } @@ -251,7 +252,7 @@ func (c *collector) cancelCheck(ch check.Check, timeout time.Duration) error { case <-done: return nil case <-time.After(timeout): - return fmt.Errorf("timeout while calling check.Cancel() on check ID %s", ch.ID()) + return fmt.Errorf("timeout while calling check.Cancel() on check ID %s, timeout: %s", ch.ID(), timeout) } } diff --git a/pkg/collector/collector_demux_test.go b/pkg/collector/collector_demux_test.go index e49a6e8bb9e867..7ea01c030560d9 100644 --- a/pkg/collector/collector_demux_test.go +++ b/pkg/collector/collector_demux_test.go @@ -33,7 +33,7 @@ type CollectorDemuxTestSuite struct { func (suite *CollectorDemuxTestSuite) SetupTest() { log := fxutil.Test[log.Component](suite.T(), logimpl.MockModule()) suite.demux = aggregator.InitTestAgentDemultiplexerWithFlushInterval(log, 100*time.Hour) - suite.c = NewCollector(suite.demux).(*collector) + suite.c = NewCollector(suite.demux, 500*time.Millisecond).(*collector) suite.c.Start() } diff --git a/pkg/collector/collector_test.go b/pkg/collector/collector_test.go index 4cc63e5b30dcb2..ea0c0d8addf445 100644 --- a/pkg/collector/collector_test.go +++ b/pkg/collector/collector_test.go @@ -41,6 +41,7 @@ func (c *TestCheck) ID() checkid.ID { } return checkid.ID(c.String()) } + func (c *TestCheck) String() string { if c.name != "" { return c.name @@ -84,7 +85,7 @@ type CollectorTestSuite struct { } func (suite *CollectorTestSuite) SetupTest() { - suite.c = NewCollector(aggregator.NewNoOpSenderManager()).(*collector) + suite.c = NewCollector(aggregator.NewNoOpSenderManager(), 500*time.Millisecond).(*collector) suite.c.Start() } diff --git a/pkg/config/config.go b/pkg/config/config.go index 74dca35f649dcf..eb4e8b8b8c5b8b 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -257,6 +257,7 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("enable_metadata_collection", true) config.BindEnvAndSetDefault("enable_gohai", true) config.BindEnvAndSetDefault("check_runners", int64(4)) + config.BindEnvAndSetDefault("check_cancel_timeout", 500*time.Millisecond) config.BindEnvAndSetDefault("auth_token_file_path", "") config.BindEnv("bind_host") config.BindEnvAndSetDefault("health_port", int64(0)) From 7b3653a1c7197077c3878d0f1ecb5bc598c8b7b3 Mon Sep 17 00:00:00 2001 From: pducolin <45568537+pducolin@users.noreply.github.com> Date: Mon, 18 Dec 2023 17:26:58 +0100 Subject: [PATCH 34/66] [fakeintake] make client thread safe (#21507) [fakeintake] make client thread safe --- test/fakeintake/aggregator/common.go | 56 +++++++++++++++++------ test/fakeintake/aggregator/common_test.go | 47 +++++++++++++++---- 2 files changed, 80 insertions(+), 23 deletions(-) diff --git a/test/fakeintake/aggregator/common.go b/test/fakeintake/aggregator/common.go index 7e0bc589055de5..50500b2023118d 100644 --- a/test/fakeintake/aggregator/common.go +++ b/test/fakeintake/aggregator/common.go @@ -11,6 +11,7 @@ import ( "compress/zlib" "io" "sort" + "sync" "time" "github.com/DataDog/datadog-agent/test/fakeintake/api" @@ -29,6 +30,8 @@ type parseFunc[P PayloadItem] func(payload api.Payload) (items []P, err error) type Aggregator[P PayloadItem] struct { payloadsByName map[string][]P parse parseFunc[P] + + mutex sync.RWMutex } const ( @@ -43,42 +46,40 @@ func newAggregator[P PayloadItem](parse parseFunc[P]) Aggregator[P] { return Aggregator[P]{ payloadsByName: map[string][]P{}, parse: parse, + mutex: sync.RWMutex{}, } } // UnmarshallPayloads aggregate the payloads func (agg *Aggregator[P]) UnmarshallPayloads(payloads []api.Payload) error { - // reset map - agg.Reset() - // build map + // build new map + payloadsByName := map[string][]P{} for _, p := range payloads { payloads, err := agg.parse(p) if err != nil { return err } + for _, item := range payloads { - if _, found := agg.payloadsByName[item.name()]; !found { - agg.payloadsByName[item.name()] = []P{} + if _, found := payloadsByName[item.name()]; !found { + payloadsByName[item.name()] = []P{} } - agg.payloadsByName[item.name()] = append(agg.payloadsByName[item.name()], item) + payloadsByName[item.name()] = append(payloadsByName[item.name()], item) } } + agg.replace(payloadsByName) return nil } // ContainsPayloadName return true if name match one of the payloads func (agg *Aggregator[P]) ContainsPayloadName(name string) bool { - _, found := agg.payloadsByName[name] - return found + return len(agg.GetPayloadsByName(name)) != 0 } // ContainsPayloadNameAndTags return true if the payload name exist and on of the payloads contains all the tags func (agg *Aggregator[P]) ContainsPayloadNameAndTags(name string, tags []string) bool { - payloads, found := agg.payloadsByName[name] - if !found { - return false - } + payloads := agg.GetPayloadsByName(name) for _, payloadItem := range payloads { if AreTagsSubsetOfOtherTags(tags, payloadItem.GetTags()) { @@ -91,11 +92,18 @@ func (agg *Aggregator[P]) ContainsPayloadNameAndTags(name string, tags []string) // GetNames return the names of the payloads func (agg *Aggregator[P]) GetNames() []string { - names := []string{} + names := agg.getNamesUnsorted() + sort.Strings(names) + return names +} + +func (agg *Aggregator[P]) getNamesUnsorted() []string { + agg.mutex.RLock() + defer agg.mutex.RUnlock() + names := make([]string, 0, len(agg.payloadsByName)) for name := range agg.payloadsByName { names = append(names, name) } - sort.Strings(names) return names } @@ -126,14 +134,32 @@ func getReadCloserForEncoding(payload []byte, encoding string) (rc io.ReadCloser // GetPayloadsByName return the payloads for the resource name func (agg *Aggregator[P]) GetPayloadsByName(name string) []P { - return agg.payloadsByName[name] + agg.mutex.RLock() + defer agg.mutex.RUnlock() + payloads := agg.payloadsByName[name] + return payloads } // Reset the aggregation func (agg *Aggregator[P]) Reset() { + agg.mutex.Lock() + defer agg.mutex.Unlock() + agg.unsafeReset() +} + +func (agg *Aggregator[P]) unsafeReset() { agg.payloadsByName = map[string][]P{} } +func (agg *Aggregator[P]) replace(payloadsByName map[string][]P) { + agg.mutex.Lock() + defer agg.mutex.Unlock() + agg.unsafeReset() + for name, payloads := range payloadsByName { + agg.payloadsByName[name] = payloads + } +} + // FilterByTags return the payloads that match all the tags func FilterByTags[P PayloadItem](payloads []P, tags []string) []P { ret := []P{} diff --git a/test/fakeintake/aggregator/common_test.go b/test/fakeintake/aggregator/common_test.go index 0caee382a5b9f3..2deff16c3799a7 100644 --- a/test/fakeintake/aggregator/common_test.go +++ b/test/fakeintake/aggregator/common_test.go @@ -7,6 +7,7 @@ package aggregator import ( "encoding/json" "runtime" + "sync" "testing" "time" @@ -66,7 +67,7 @@ func generateTestData() (data []api.Payload, err error) { }, nil } -func validateCollectionTime(t *testing.T, agg Aggregator[*mockPayloadItem]) { +func validateCollectionTime(t *testing.T, agg *Aggregator[*mockPayloadItem]) { if runtime.GOOS != "linux" { t.Logf("validateCollectionTime test skip on %s", runtime.GOOS) return @@ -80,26 +81,28 @@ func validateCollectionTime(t *testing.T, agg Aggregator[*mockPayloadItem]) { func TestCommonAggregator(t *testing.T) { t.Run("ContainsPayloadName", func(t *testing.T) { + agg := newAggregator(parseMockPayloadItem) + assert.False(t, agg.ContainsPayloadName("totoro")) data, err := generateTestData() require.NoError(t, err) - agg := newAggregator(parseMockPayloadItem) err = agg.UnmarshallPayloads(data) assert.NoError(t, err) assert.True(t, agg.ContainsPayloadName("totoro")) assert.False(t, agg.ContainsPayloadName("ponyo")) - validateCollectionTime(t, agg) + validateCollectionTime(t, &agg) }) t.Run("ContainsPayloadNameAndTags", func(t *testing.T) { + agg := newAggregator(parseMockPayloadItem) + assert.False(t, agg.ContainsPayloadNameAndTags("totoro", []string{"age:123"})) data, err := generateTestData() require.NoError(t, err) - agg := newAggregator(parseMockPayloadItem) err = agg.UnmarshallPayloads(data) assert.NoError(t, err) assert.True(t, agg.ContainsPayloadNameAndTags("totoro", []string{"age:123"})) assert.False(t, agg.ContainsPayloadNameAndTags("porco rosso", []string{"country:it", "role:king"})) assert.True(t, agg.ContainsPayloadNameAndTags("porco rosso", []string{"country:it", "role:pilot"})) - validateCollectionTime(t, agg) + validateCollectionTime(t, &agg) }) t.Run("AreTagsSubsetOfOtherTags", func(t *testing.T) { @@ -127,11 +130,39 @@ func TestCommonAggregator(t *testing.T) { }) t.Run("Reset", func(t *testing.T) { - _, err := generateTestData() + data, err := generateTestData() require.NoError(t, err) agg := newAggregator(parseMockPayloadItem) + err = agg.UnmarshallPayloads(data) + require.NoError(t, err) + assert.NotEmpty(t, agg.payloadsByName) agg.Reset() - assert.Equal(t, 0, len(agg.payloadsByName)) - validateCollectionTime(t, agg) + assert.Empty(t, agg.payloadsByName) + }) + + t.Run("Thread safe", func(t *testing.T) { + var wg sync.WaitGroup + data, err := generateTestData() + require.NoError(t, err) + agg := newAggregator(parseMockPayloadItem) + // add some data to ensure we have names + err = agg.UnmarshallPayloads(data) + assert.NoError(t, err) + wg.Add(2) + go func() { + defer wg.Done() + for i := 0; i < 100; i++ { + err := agg.UnmarshallPayloads(data) + assert.NoError(t, err) + } + }() + go func() { + defer wg.Done() + for i := 0; i < 100; i++ { + names := agg.GetNames() + assert.NotEmpty(t, names) + } + }() + wg.Wait() }) } From 42d0697db1a28589d4fd9e40da9f15c1b765bfac Mon Sep 17 00:00:00 2001 From: Nicolas Guerguadj <35628945+Kaderinho@users.noreply.github.com> Date: Mon, 18 Dec 2023 18:08:41 +0100 Subject: [PATCH 35/66] Split e2e tests in multiple jobs (#21582) Split e2e tests in multiple jobs --- .gitlab/e2e.yml | 18 ++++++++++++++++++ .../agent-subcommands/flare/flare_nix_test.go | 1 - .../agent-subcommands/flare/flare_win_test.go | 1 - .../secret/secret_nix_test.go | 1 - .../secret/secret_win_test.go | 1 - 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.gitlab/e2e.yml b/.gitlab/e2e.yml index b0c99cc1f88db4..d5c2c162bbfa8c 100644 --- a/.gitlab/e2e.yml +++ b/.gitlab/e2e.yml @@ -207,6 +207,20 @@ new-e2e-agent-shared-components-main: # Temporary, until we manage to stabilize those tests. allow_failure: true +.agent-subcommands-tests-matrix: &agent-subcommands-tests-matrix + parallel: + matrix: + - EXTRA_PARAMS: --run TestSubcommandSuite + - EXTRA_PARAMS: --run TestAgentSecretSuite + - EXTRA_PARAMS: --run TestAgentConfigSuite + - EXTRA_PARAMS: --run TestAgentHostnameEC2Suite + - EXTRA_PARAMS: --run TestAgentDiagnoseEC2Suite + - EXTRA_PARAMS: --run TestAgentConfigCheckSuite + - EXTRA_PARAMS: --run TestLinuxFlareSuite + - EXTRA_PARAMS: --run TestWindowsFlareSuite + - EXTRA_PARAMS: --run TestLinuxSecretSuite + - EXTRA_PARAMS: --run TestWindowsSecretSuite + new-e2e-agent-subcommands-dev: extends: .new_e2e_template rules: !reference [.on_dev_branch_manual] @@ -214,6 +228,7 @@ new-e2e-agent-subcommands-dev: variables: TARGETS: ./tests/agent-subcommands TEAM: agent-shared-components + <<: *agent-subcommands-tests-matrix new-e2e-agent-subcommands-main: extends: .new_e2e_template @@ -224,6 +239,9 @@ new-e2e-agent-subcommands-main: TEAM: agent-shared-components # Temporary, until we manage to stabilize those tests. allow_failure: true + <<: *agent-subcommands-tests-matrix + + new-e2e-language-detection-dev: extends: .new_e2e_template diff --git a/test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go b/test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go index 9ec2a45b830a20..e75848d3f32616 100644 --- a/test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go @@ -32,7 +32,6 @@ type linuxFlareSuite struct { } func TestLinuxFlareSuite(t *testing.T) { - t.Parallel() e2e.Run(t, &linuxFlareSuite{}, e2e.FakeIntakeStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.UbuntuOS)))) } diff --git a/test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go b/test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go index add1882fc2cf5e..6b30c80223d5dc 100644 --- a/test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go @@ -21,7 +21,6 @@ type windowsFlareSuite struct { } func TestWindowsFlareSuite(t *testing.T) { - t.Parallel() e2e.Run(t, &windowsFlareSuite{}, e2e.FakeIntakeStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.WindowsOS)))) } diff --git a/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go b/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go index f2c5e5f662620a..8933a30db1dec4 100644 --- a/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go @@ -20,7 +20,6 @@ type linuxSecretSuite struct { } func TestLinuxSecretSuite(t *testing.T) { - t.Parallel() e2e.Run(t, &linuxSecretSuite{}, e2e.AgentStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.UbuntuOS)))) } diff --git a/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go b/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go index d77d94ceaf527e..21746c7824a1a3 100644 --- a/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go @@ -21,7 +21,6 @@ type windowsSecretSuite struct { } func TestWindowsSecretSuite(t *testing.T) { - t.Parallel() e2e.Run(t, &windowsSecretSuite{}, e2e.AgentStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.WindowsOS)))) } From 57c758bf428462d2d74149ed0f9311f812de9d36 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 17:57:26 +0000 Subject: [PATCH 36/66] Bump datadog-api-client from 2.19.0 to 2.20.0 in /test/e2e/cws-tests (#21609) Bump datadog-api-client from 2.19.0 to 2.20.0 in /test/e2e/cws-tests --- test/e2e/cws-tests/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/cws-tests/requirements.txt b/test/e2e/cws-tests/requirements.txt index 0434b5f324f56c..88fb710f101673 100644 --- a/test/e2e/cws-tests/requirements.txt +++ b/test/e2e/cws-tests/requirements.txt @@ -1,5 +1,5 @@ kubernetes==28.1.0 -datadog-api-client==2.19.0 +datadog-api-client==2.20.0 pyaml==23.9.7 docker==6.1.3 retry==0.9.2 From ed531ba3784d68d487f655890df4f214ac8126ce Mon Sep 17 00:00:00 2001 From: Rey Abolofia Date: Mon, 18 Dec 2023 10:02:34 -0800 Subject: [PATCH 37/66] [Serverless] Use custom aws event types to speed up json unmarshalling. (#21520) * Custom APIGatewayProxyRequest type. * Custom APIGatewayV2HTTPRequest type * Custom APIGatewayWebsocketProxyRequest type * Custom APIGatewayCustomAuthorizerRequest type. * Custom APIGatewayCustomAuthorizerRequestTypeRequest type. * Custom ALBTargetGroupRequest type. * Custom CloudWatchEvent type. * Custom CloudwatchLogsEvent type. * Custom DynamoDBEvent type. * Custom KinesisEvent type. * Move EventBridgeEvent custom type. * Custom S3Event type. * Custom SNSEvent type. * Custom SQSEvent type. * Custom LambdaFunctionURLRequest type. * Custom S3Entity type. * Switch import from ddevents to events. * Organize imports. * Remove unneeded json decoding keys. * Copyright and package comment. --- pkg/serverless/invocationlifecycle/init.go | 6 +- .../invocationlifecycle/lifecycle.go | 5 +- .../invocationlifecycle/trace_test.go | 2 +- .../trace/inferredspan/constants.go | 8 - .../trace/inferredspan/span_enrichment.go | 4 +- .../inferredspan/span_enrichment_test.go | 9 +- pkg/serverless/trace/propagation/carriers.go | 2 +- .../trace/propagation/carriers_test.go | 2 +- pkg/serverless/trace/propagation/extractor.go | 2 +- .../trace/propagation/extractor_test.go | 2 +- pkg/serverless/trigger/events/events.go | 335 ++++++++++++++++++ pkg/serverless/trigger/extractor.go | 3 +- pkg/serverless/trigger/extractor_test.go | 3 +- 13 files changed, 355 insertions(+), 28 deletions(-) create mode 100644 pkg/serverless/trigger/events/events.go diff --git a/pkg/serverless/invocationlifecycle/init.go b/pkg/serverless/invocationlifecycle/init.go index 00a5341650522a..9121052f871b36 100644 --- a/pkg/serverless/invocationlifecycle/init.go +++ b/pkg/serverless/invocationlifecycle/init.go @@ -13,10 +13,8 @@ import ( pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" - - "github.com/aws/aws-lambda-go/events" - "github.com/DataDog/datadog-agent/pkg/serverless/trigger" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -106,7 +104,7 @@ func (lp *LifecycleProcessor) initFromDynamoDBStreamEvent(event events.DynamoDBE lp.addTag(tagFunctionTriggerEventSourceArn, trigger.ExtractDynamoDBStreamEventARN(event)) } -func (lp *LifecycleProcessor) initFromEventBridgeEvent(event inferredspan.EventBridgeEvent) { +func (lp *LifecycleProcessor) initFromEventBridgeEvent(event events.EventBridgeEvent) { lp.requestHandler.event = event lp.addTag(tagFunctionTriggerEventSource, eventBridge) lp.addTag(tagFunctionTriggerEventSourceArn, event.Source) diff --git a/pkg/serverless/invocationlifecycle/lifecycle.go b/pkg/serverless/invocationlifecycle/lifecycle.go index 8a2290cc5b9dcc..67bb7bc6721756 100644 --- a/pkg/serverless/invocationlifecycle/lifecycle.go +++ b/pkg/serverless/invocationlifecycle/lifecycle.go @@ -12,8 +12,6 @@ import ( "strings" "time" - "github.com/aws/aws-lambda-go/events" - "github.com/DataDog/datadog-agent/pkg/aggregator" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" serverlessLog "github.com/DataDog/datadog-agent/pkg/serverless/logs" @@ -21,6 +19,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" "github.com/DataDog/datadog-agent/pkg/serverless/trace/propagation" "github.com/DataDog/datadog-agent/pkg/serverless/trigger" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/trace/api" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -191,7 +190,7 @@ func (lp *LifecycleProcessor) OnInvokeStart(startDetails *InvocationStartDetails ev = event lp.initFromKinesisStreamEvent(event) case trigger.EventBridgeEvent: - var event inferredspan.EventBridgeEvent + var event events.EventBridgeEvent if err := json.Unmarshal(payloadBytes, &event); err != nil { log.Debugf("Failed to unmarshal %s event: %s", eventBridge, err) break diff --git a/pkg/serverless/invocationlifecycle/trace_test.go b/pkg/serverless/invocationlifecycle/trace_test.go index eb041158858c4e..0b925f9a25be6b 100644 --- a/pkg/serverless/invocationlifecycle/trace_test.go +++ b/pkg/serverless/invocationlifecycle/trace_test.go @@ -10,11 +10,11 @@ import ( "testing" "time" - "github.com/aws/aws-lambda-go/events" "github.com/stretchr/testify/assert" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/pkg/serverless/trace/inferredspan/constants.go b/pkg/serverless/trace/inferredspan/constants.go index cc6ed400ed4330..d48e12c6ccc07f 100644 --- a/pkg/serverless/trace/inferredspan/constants.go +++ b/pkg/serverless/trace/inferredspan/constants.go @@ -55,11 +55,3 @@ const ( // in the payload headers invocationType = "X-Amz-Invocation-Type" ) - -// EventBridgeEvent is used for unmarshalling a EventBridge event. -// AWS Go libraries do not provide this type of event for deserialization. -type EventBridgeEvent struct { - DetailType string `json:"detail-type"` - Source string `json:"source"` - StartTime string `json:"time"` -} diff --git a/pkg/serverless/trace/inferredspan/span_enrichment.go b/pkg/serverless/trace/inferredspan/span_enrichment.go index c4afae205f47f4..c849dae23c09bf 100644 --- a/pkg/serverless/trace/inferredspan/span_enrichment.go +++ b/pkg/serverless/trace/inferredspan/span_enrichment.go @@ -12,8 +12,8 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/aws/aws-lambda-go/events" ) // Define and initialize serviceMapping as a global variable. @@ -287,7 +287,7 @@ func (inferredSpan *InferredSpan) EnrichInferredSpanWithSQSEvent(eventPayload ev // EnrichInferredSpanWithEventBridgeEvent uses the parsed event // payload to enrich the current inferred span. It applies a // specific set of data to the span expected from an EventBridge event. -func (inferredSpan *InferredSpan) EnrichInferredSpanWithEventBridgeEvent(eventPayload EventBridgeEvent) { +func (inferredSpan *InferredSpan) EnrichInferredSpanWithEventBridgeEvent(eventPayload events.EventBridgeEvent) { source := eventPayload.Source serviceName := DetermineServiceName(serviceMapping, source, "lambda_eventbridge", "eventbridge") inferredSpan.IsAsync = true diff --git a/pkg/serverless/trace/inferredspan/span_enrichment_test.go b/pkg/serverless/trace/inferredspan/span_enrichment_test.go index b1203a7f61862f..8379c6bcf717c3 100644 --- a/pkg/serverless/trace/inferredspan/span_enrichment_test.go +++ b/pkg/serverless/trace/inferredspan/span_enrichment_test.go @@ -12,10 +12,10 @@ import ( "testing" "time" - "github.com/aws/aws-lambda-go/events" "github.com/stretchr/testify/assert" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" ) const ( @@ -632,7 +632,7 @@ func TestRemapsSpecificInferredSpanServiceNamesFromS3Event(t *testing.T) { } func TestEnrichInferredSpanWithEventBridgeEvent(t *testing.T) { - var eventBridgeEvent EventBridgeEvent + var eventBridgeEvent events.EventBridgeEvent _ = json.Unmarshal(getEventFromFile("eventbridge-custom.json"), &eventBridgeEvent) inferredSpan := mockInferredSpan() inferredSpan.EnrichInferredSpanWithEventBridgeEvent(eventBridgeEvent) @@ -646,6 +646,7 @@ func TestEnrichInferredSpanWithEventBridgeEvent(t *testing.T) { assert.Equal(t, "web", span.Type) assert.Equal(t, "aws.eventbridge", span.Meta[operationName]) assert.Equal(t, "eventbridge.custom.event.sender", span.Meta[resourceNames]) + assert.Equal(t, "testdetail", span.Meta[detailType]) assert.True(t, inferredSpan.IsAsync) } @@ -663,7 +664,7 @@ func TestRemapsAllInferredSpanServiceNamesFromEventBridgeEvent(t *testing.T) { } SetServiceMapping(newServiceMapping) // Load the original event - var eventBridgeEvent EventBridgeEvent + var eventBridgeEvent events.EventBridgeEvent _ = json.Unmarshal(getEventFromFile("eventbridge-custom.json"), &eventBridgeEvent) inferredSpan := mockInferredSpan() @@ -699,7 +700,7 @@ func TestRemapsSpecificInferredSpanServiceNamesFromEventBridgeEvent(t *testing.T } SetServiceMapping(newServiceMapping) // Load the original event - var eventBridgeEvent EventBridgeEvent + var eventBridgeEvent events.EventBridgeEvent _ = json.Unmarshal(getEventFromFile("eventbridge-custom.json"), &eventBridgeEvent) inferredSpan := mockInferredSpan() diff --git a/pkg/serverless/trace/propagation/carriers.go b/pkg/serverless/trace/propagation/carriers.go index 562202922b8e19..331a3b0dbd1591 100644 --- a/pkg/serverless/trace/propagation/carriers.go +++ b/pkg/serverless/trace/propagation/carriers.go @@ -15,8 +15,8 @@ import ( "strconv" "strings" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/trace/sampler" - "github.com/aws/aws-lambda-go/events" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) diff --git a/pkg/serverless/trace/propagation/carriers_test.go b/pkg/serverless/trace/propagation/carriers_test.go index 102bb55f7c3a1b..107a40147e54be 100644 --- a/pkg/serverless/trace/propagation/carriers_test.go +++ b/pkg/serverless/trace/propagation/carriers_test.go @@ -10,8 +10,8 @@ import ( "errors" "testing" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/trace/sampler" - "github.com/aws/aws-lambda-go/events" "github.com/aws/aws-sdk-go-v2/aws" "github.com/stretchr/testify/assert" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" diff --git a/pkg/serverless/trace/propagation/extractor.go b/pkg/serverless/trace/propagation/extractor.go index 7c46920653aeaf..427ee1b4dfd3e5 100644 --- a/pkg/serverless/trace/propagation/extractor.go +++ b/pkg/serverless/trace/propagation/extractor.go @@ -11,9 +11,9 @@ import ( "net/http" "strconv" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/aws/aws-lambda-go/events" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) diff --git a/pkg/serverless/trace/propagation/extractor_test.go b/pkg/serverless/trace/propagation/extractor_test.go index 7e224e60a51737..c435802a39edf8 100644 --- a/pkg/serverless/trace/propagation/extractor_test.go +++ b/pkg/serverless/trace/propagation/extractor_test.go @@ -12,8 +12,8 @@ import ( "net/http" "testing" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/trace/sampler" - "github.com/aws/aws-lambda-go/events" "github.com/aws/aws-sdk-go-v2/aws" "github.com/stretchr/testify/assert" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace" diff --git a/pkg/serverless/trigger/events/events.go b/pkg/serverless/trigger/events/events.go new file mode 100644 index 00000000000000..0056cb7503c766 --- /dev/null +++ b/pkg/serverless/trigger/events/events.go @@ -0,0 +1,335 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022-present Datadog, Inc. + +// Package events provides a series of drop in replacements for +// "github.com/aws/aws-lambda-go/events". Using these types for json +// unmarshalling event payloads provides huge reduction in processing time. +// This means fewer map/slice allocations since only the fields which we will +// use will be unmarshalled. +package events + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "encoding/json" + "time" + + "github.com/aws/aws-lambda-go/events" +) + +// APIGatewayProxyRequest mirrors events.APIGatewayProxyRequest type, removing +// unused fields. +type APIGatewayProxyRequest struct { + Resource string + Path string + HTTPMethod string + Headers map[string]string + RequestContext APIGatewayProxyRequestContext +} + +// APIGatewayProxyRequestContext mirrors events.APIGatewayProxyRequestContext +// type, removing unused fields. +type APIGatewayProxyRequestContext struct { + Stage string + DomainName string + RequestID string + Path string + HTTPMethod string + RequestTimeEpoch int64 + APIID string +} + +// APIGatewayV2HTTPRequest mirrors events.APIGatewayV2HTTPRequest type, +// removing unused fields. +type APIGatewayV2HTTPRequest struct { + RouteKey string + Headers map[string]string + RequestContext APIGatewayV2HTTPRequestContext +} + +// APIGatewayV2HTTPRequestContext mirrors events.APIGatewayV2HTTPRequestContext +// type, removing unused fields. +type APIGatewayV2HTTPRequestContext struct { + Stage string + RequestID string + APIID string + DomainName string + TimeEpoch int64 + HTTP APIGatewayV2HTTPRequestContextHTTPDescription +} + +// APIGatewayV2HTTPRequestContextHTTPDescription mirrors +// events.APIGatewayV2HTTPRequestContextHTTPDescription type, removing unused +// fields. +type APIGatewayV2HTTPRequestContextHTTPDescription struct { + Method string + Path string + Protocol string + SourceIP string + UserAgent string +} + +// APIGatewayWebsocketProxyRequest mirrors +// events.APIGatewayWebsocketProxyRequest type, removing unused fields. +type APIGatewayWebsocketProxyRequest struct { + Headers map[string]string + RequestContext APIGatewayWebsocketProxyRequestContext +} + +// APIGatewayWebsocketProxyRequestContext mirrors +// events.APIGatewayWebsocketProxyRequestContext type, removing unused fields. +type APIGatewayWebsocketProxyRequestContext struct { + Stage string + RequestID string + APIID string + ConnectionID string + DomainName string + EventType string + MessageDirection string + RequestTimeEpoch int64 + RouteKey string +} + +// APIGatewayCustomAuthorizerRequest mirrors +// events.APIGatewayCustomAuthorizerRequest type, removing unused fields. +type APIGatewayCustomAuthorizerRequest struct { + Type string + AuthorizationToken string + MethodArn string +} + +// APIGatewayCustomAuthorizerRequestTypeRequest mirrors +// events.APIGatewayCustomAuthorizerRequestTypeRequest type, removing unused +// fields. +type APIGatewayCustomAuthorizerRequestTypeRequest struct { + MethodArn string + Resource string + HTTPMethod string + Headers map[string]string + RequestContext APIGatewayCustomAuthorizerRequestTypeRequestContext +} + +// APIGatewayCustomAuthorizerRequestTypeRequestContext mirrors +// events.APIGatewayCustomAuthorizerRequestTypeRequestContext type, removing +// unused fields. +type APIGatewayCustomAuthorizerRequestTypeRequestContext struct { + Path string +} + +// ALBTargetGroupRequest mirrors events.ALBTargetGroupRequest type, removing +// unused fields. +type ALBTargetGroupRequest struct { + HTTPMethod string + Path string + Headers map[string]string + RequestContext ALBTargetGroupRequestContext +} + +// ALBTargetGroupRequestContext mirrors events.ALBTargetGroupRequestContext +// type, removing unused fields. +type ALBTargetGroupRequestContext struct { + ELB ELBContext +} + +// ELBContext mirrors events.ELBContext type, removing unused fields. +type ELBContext struct { + TargetGroupArn string +} + +// CloudWatchEvent mirrors events.CloudWatchEvent type, removing unused fields. +type CloudWatchEvent struct { + Resources []string +} + +// CloudwatchLogsEvent mirrors events.CloudwatchLogsEvent type, removing unused +// fields. +type CloudwatchLogsEvent struct { + AWSLogs CloudwatchLogsRawData +} + +// CloudwatchLogsRawData mirrors events.CloudwatchLogsRawData type, removing +// unused fields. +type CloudwatchLogsRawData struct { + Data string +} + +// Parse returns a struct representing a usable CloudwatchLogs event +func (c CloudwatchLogsRawData) Parse() (d CloudwatchLogsData, err error) { + data, err := base64.StdEncoding.DecodeString(c.Data) + if err != nil { + return + } + + zr, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return + } + defer zr.Close() + + dec := json.NewDecoder(zr) + err = dec.Decode(&d) + + return +} + +// CloudwatchLogsData mirrors events.CloudwatchLogsData type, removing unused +// fields. +type CloudwatchLogsData struct { + LogGroup string +} + +// DynamoDBEvent mirrors events.DynamoDBEvent type, removing unused fields. +type DynamoDBEvent struct { + Records []DynamoDBEventRecord +} + +// DynamoDBEventRecord mirrors events.DynamoDBEventRecord type, removing unused +// fields. +type DynamoDBEventRecord struct { + Change DynamoDBStreamRecord `json:"dynamodb"` + EventID string + EventName string + EventVersion string + EventSourceArn string +} + +// DynamoDBStreamRecord mirrors events.DynamoDBStreamRecord type, removing +// unused fields. +type DynamoDBStreamRecord struct { + ApproximateCreationDateTime events.SecondsEpochTime + SizeBytes int64 + StreamViewType string +} + +// KinesisEvent mirrors events.KinesisEvent type, removing unused fields. +type KinesisEvent struct { + Records []KinesisEventRecord +} + +// KinesisEventRecord mirrors events.KinesisEventRecord type, removing unused +// fields. +type KinesisEventRecord struct { + EventID string + EventName string + EventSourceArn string + EventVersion string + Kinesis KinesisRecord +} + +// KinesisRecord mirrors events.KinesisRecord type, removing unused fields. +type KinesisRecord struct { + ApproximateArrivalTimestamp events.SecondsEpochTime + PartitionKey string +} + +// EventBridgeEvent is used for unmarshalling a EventBridge event. AWS Go +// libraries do not provide this type of event for deserialization. +type EventBridgeEvent struct { + DetailType string `json:"detail-type"` + Source string + StartTime string +} + +// S3Event mirrors events.S3Event type, removing unused fields. +type S3Event struct { + Records []S3EventRecord +} + +// S3EventRecord mirrors events.S3EventRecord type, removing unused fields. +type S3EventRecord struct { + EventSource string + EventTime time.Time + EventName string + S3 S3Entity +} + +// S3Entity mirrors events.S3Entity type, removing unused fields. +type S3Entity struct { + Bucket S3Bucket + Object S3Object +} + +// S3Bucket mirrors events.S3Bucket type, removing unused fields. +type S3Bucket struct { + Name string + Arn string +} + +// S3Object mirrors events.S3Object type, removing unused fields. +type S3Object struct { + Key string + Size int64 + ETag string +} + +// SNSEvent mirrors events.SNSEvent type, removing unused fields. +type SNSEvent struct { + Records []SNSEventRecord +} + +// SNSEventRecord mirrors events.SNSEventRecord type, removing unused fields. +type SNSEventRecord struct { + SNS SNSEntity +} + +// SNSEntity mirrors events.SNSEntity type, removing unused fields. +type SNSEntity struct { + MessageID string + Type string + TopicArn string + Timestamp time.Time + Subject string +} + +// SQSEvent mirrors events.SQSEvent type, removing unused fields. +type SQSEvent struct { + Records []SQSMessage +} + +// SQSMessage mirrors events.SQSMessage type, removing unused fields. +type SQSMessage struct { + ReceiptHandle string + Body string + Attributes map[string]string + MessageAttributes map[string]SQSMessageAttribute + EventSourceARN string +} + +// SQSMessageAttribute mirrors events.SQSMessageAttribute type, removing unused +// fields. +type SQSMessageAttribute struct { + StringValue *string + BinaryValue []byte + DataType string +} + +// LambdaFunctionURLRequest mirrors events.LambdaFunctionURLRequest type, +// removing unused fields. +type LambdaFunctionURLRequest struct { + Headers map[string]string + RequestContext LambdaFunctionURLRequestContext +} + +// LambdaFunctionURLRequestContext mirrors +// events.LambdaFunctionURLRequestContext type, removing unused fields. +type LambdaFunctionURLRequestContext struct { + RequestID string + APIID string + DomainName string + TimeEpoch int64 + HTTP LambdaFunctionURLRequestContextHTTPDescription +} + +// LambdaFunctionURLRequestContextHTTPDescription mirrors +// events.LambdaFunctionURLRequestContextHTTPDescription type, removing unused +// fields. +type LambdaFunctionURLRequestContextHTTPDescription struct { + Method string + Path string + Protocol string + SourceIP string + UserAgent string +} diff --git a/pkg/serverless/trigger/extractor.go b/pkg/serverless/trigger/extractor.go index 7ba2d90aa54afe..f1013e2bbe5ef5 100644 --- a/pkg/serverless/trigger/extractor.go +++ b/pkg/serverless/trigger/extractor.go @@ -11,8 +11,9 @@ import ( "strconv" "strings" - "github.com/aws/aws-lambda-go/events" "github.com/aws/aws-sdk-go-v2/aws/arn" + + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" ) // GetAWSPartitionByRegion parses an AWS region and returns an AWS partition diff --git a/pkg/serverless/trigger/extractor_test.go b/pkg/serverless/trigger/extractor_test.go index 757d1c02d98eba..34cb6f4b3c816a 100644 --- a/pkg/serverless/trigger/extractor_test.go +++ b/pkg/serverless/trigger/extractor_test.go @@ -11,8 +11,9 @@ import ( "encoding/base64" "testing" - "github.com/aws/aws-lambda-go/events" "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" ) func TestGetAWSPartitionByRegion(t *testing.T) { From bba69241e2a3dff5687e7a7b8e26dd5cf779e994 Mon Sep 17 00:00:00 2001 From: Rey Abolofia Date: Mon, 18 Dec 2023 10:22:44 -0800 Subject: [PATCH 38/66] [Serverless] Support trace context extraction from SNS events. (#21491) * Support trace context extraction from SNS events. * Create errors only once. * Update existing test with new propagation context. * Avoid allocs by only unmarshalling necessary fields. --- pkg/serverless/daemon/routes_test.go | 7 +- pkg/serverless/trace/propagation/carriers.go | 81 +++++-- .../trace/propagation/carriers_test.go | 153 +++++++++++- pkg/serverless/trace/propagation/extractor.go | 9 + .../trace/propagation/extractor_test.go | 228 +++++++++++++++++- 5 files changed, 449 insertions(+), 29 deletions(-) diff --git a/pkg/serverless/daemon/routes_test.go b/pkg/serverless/daemon/routes_test.go index ca7a54ea623576..ef617475cc1280 100644 --- a/pkg/serverless/daemon/routes_test.go +++ b/pkg/serverless/daemon/routes_test.go @@ -241,13 +241,12 @@ func TestStartEndInvocationSpanParenting(t *testing.T) { expPriority: 1, }, { - // NOTE: sns trace extraction not yet implemented name: "sns", payload: getEventFromFile("sns.json"), expInfSpans: 1, - expTraceID: 0, - expParentID: 0, - expPriority: -128, + expTraceID: 4948377316357291421, + expParentID: 6746998015037429512, + expPriority: 1, }, { name: "sns-sqs", diff --git a/pkg/serverless/trace/propagation/carriers.go b/pkg/serverless/trace/propagation/carriers.go index 331a3b0dbd1591..71b2f50ff9f5d7 100644 --- a/pkg/serverless/trace/propagation/carriers.go +++ b/pkg/serverless/trace/propagation/carriers.go @@ -34,12 +34,25 @@ const ( var rootRegex = regexp.MustCompile("Root=1-[0-9a-fA-F]{8}-00000000[0-9a-fA-F]{16}") +var ( + errorAWSTraceHeaderMismatch = errors.New("AWSTraceHeader does not match expected regex") + errorAWSTraceHeaderEmpty = errors.New("AWSTraceHeader does not contain trace ID and parent ID") + errorStringNotFound = errors.New("String value not found in _datadog payload") + errorUnsupportedDataType = errors.New("Unsupported DataType in _datadog payload") + errorNoDDContextFound = errors.New("No Datadog trace context found") + errorUnsupportedPayloadType = errors.New("Unsupported type for _datadog payload") + errorUnsupportedTypeType = errors.New("Unsupported type in _datadog payload") + errorUnsupportedValueType = errors.New("Unsupported value type in _datadog payload") + errorUnsupportedTypeValue = errors.New("Unsupported Type in _datadog payload") + errorCouldNotUnmarshal = errors.New("Could not unmarshal the invocation event payload") +) + // extractTraceContextfromAWSTraceHeader extracts trace context from the // AWSTraceHeader directly. Unlike the other carriers in this file, it should // not be passed to the tracer.Propagator, instead extracting context directly. func extractTraceContextfromAWSTraceHeader(value string) (*TraceContext, error) { if !rootRegex.MatchString(value) { - return nil, errors.New("AWSTraceHeader does not match expected regex") + return nil, errorAWSTraceHeaderMismatch } var ( startPart int @@ -86,7 +99,7 @@ func extractTraceContextfromAWSTraceHeader(value string) (*TraceContext, error) tc.SamplingPriority = sampler.PriorityAutoKeep } if tc.TraceID == 0 || tc.ParentID == 0 { - return nil, errors.New("AWSTraceHeader does not contain trace ID and parent ID") + return nil, errorAWSTraceHeaderEmpty } return tc, nil } @@ -108,7 +121,7 @@ func sqsMessageAttrCarrier(attr events.SQSMessageAttribute) (tracer.TextMapReade switch attr.DataType { case "String": if attr.StringValue == nil { - return nil, errors.New("String value not found in _datadog payload") + return nil, errorStringNotFound } bytes = []byte(*attr.StringValue) case "Binary": @@ -116,7 +129,7 @@ func sqsMessageAttrCarrier(attr events.SQSMessageAttribute) (tracer.TextMapReade // MESSAGE DELIVERY option bytes = attr.BinaryValue // No need to decode base64 because already decoded default: - return nil, errors.New("Unsupported DataType in _datadog payload") + return nil, errorUnsupportedDataType } var carrier tracer.TextMapCarrier @@ -126,32 +139,62 @@ func sqsMessageAttrCarrier(attr events.SQSMessageAttribute) (tracer.TextMapReade return carrier, nil } +// snsBody is used to unmarshal only required fields on events.SNSEntity +// types. +type snsBody struct { + MessageAttributes map[string]interface{} +} + // snsSqsMessageCarrier returns the tracer.TextMapReader used to extract trace // context from the body of an events.SQSMessage type. func snsSqsMessageCarrier(event events.SQSMessage) (tracer.TextMapReader, error) { - var body struct { - MessageAttributes map[string]struct { - Type string - Value string - } - } + var body snsBody err := json.Unmarshal([]byte(event.Body), &body) if err != nil { return nil, fmt.Errorf("Error unmarshaling message body: %w", err) } - msgAttrs, ok := body.MessageAttributes[datadogSQSHeader] + return snsEntityCarrier(events.SNSEntity{ + MessageAttributes: body.MessageAttributes, + }) +} + +// snsEntityCarrier returns the tracer.TextMapReader used to extract trace +// context from the attributes of an events.SNSEntity type. +func snsEntityCarrier(event events.SNSEntity) (tracer.TextMapReader, error) { + msgAttrs, ok := event.MessageAttributes[datadogSQSHeader] if !ok { - return nil, errors.New("No Datadog trace context found") + return nil, errorNoDDContextFound } - if msgAttrs.Type != "Binary" { - return nil, errors.New("Unsupported DataType in _datadog payload") + mapAttrs, ok := msgAttrs.(map[string]interface{}) + if !ok { + return nil, errorUnsupportedPayloadType } - attr, err := base64.StdEncoding.DecodeString(string(msgAttrs.Value)) - if err != nil { - return nil, fmt.Errorf("Error decoding binary: %w", err) + + typ, ok := mapAttrs["Type"].(string) + if !ok { + return nil, errorUnsupportedTypeType + } + val, ok := mapAttrs["Value"].(string) + if !ok { + return nil, errorUnsupportedValueType } + + var bytes []byte + var err error + switch typ { + case "Binary": + bytes, err = base64.StdEncoding.DecodeString(val) + if err != nil { + return nil, fmt.Errorf("Error decoding binary: %w", err) + } + case "String": + bytes = []byte(val) + default: + return nil, errorUnsupportedTypeValue + } + var carrier tracer.TextMapCarrier - if err = json.Unmarshal(attr, &carrier); err != nil { + if err = json.Unmarshal(bytes, &carrier); err != nil { return nil, fmt.Errorf("Error unmarshaling the decoded binary: %w", err) } return carrier, nil @@ -166,7 +209,7 @@ type invocationPayload struct { func rawPayloadCarrier(rawPayload []byte) (tracer.TextMapReader, error) { var payload invocationPayload if err := json.Unmarshal(rawPayload, &payload); err != nil { - return nil, errors.New("Could not unmarshal the invocation event payload") + return nil, errorCouldNotUnmarshal } return payload.Headers, nil } diff --git a/pkg/serverless/trace/propagation/carriers_test.go b/pkg/serverless/trace/propagation/carriers_test.go index 107a40147e54be..102646c9be40af 100644 --- a/pkg/serverless/trace/propagation/carriers_test.go +++ b/pkg/serverless/trace/propagation/carriers_test.go @@ -169,7 +169,7 @@ func TestSnsSqsMessageCarrier(t *testing.T) { }`, }, expMap: nil, - expErr: errors.New("Error unmarshaling message body: json: cannot unmarshal string into Go struct field .MessageAttributes of type map[string]struct { Type string; Value string }"), + expErr: errors.New("Error unmarshaling message body: json: cannot unmarshal string into Go struct field snsBody.MessageAttributes of type map[string]interface {}"), }, { name: "non-binary-type", @@ -177,14 +177,14 @@ func TestSnsSqsMessageCarrier(t *testing.T) { Body: `{ "MessageAttributes": { "_datadog": { - "Type": "String", + "Type": "Purple", "Value": "Value" } } }`, }, expMap: nil, - expErr: errors.New("Unsupported DataType in _datadog payload"), + expErr: errors.New("Unsupported Type in _datadog payload"), }, { name: "cannot-decode", @@ -243,6 +243,135 @@ func TestSnsSqsMessageCarrier(t *testing.T) { } } +func TestSnsEntityCarrier(t *testing.T) { + testcases := []struct { + name string + event events.SNSEntity + expMap map[string]string + expErr error + }{ + { + name: "no-msg-attrs", + event: events.SNSEntity{}, + expMap: nil, + expErr: errors.New("No Datadog trace context found"), + }, + { + name: "wrong-type-msg-attrs", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": 12345, + }, + }, + expMap: nil, + expErr: errors.New("Unsupported type for _datadog payload"), + }, + { + name: "wrong-type-type", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": 12345, + "Value": "Value", + }, + }, + }, + expMap: nil, + expErr: errors.New("Unsupported type in _datadog payload"), + }, + { + name: "wrong-value-type", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "Binary", + "Value": 12345, + }, + }, + }, + expMap: nil, + expErr: errors.New("Unsupported value type in _datadog payload"), + }, + { + name: "cannot-decode", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "Binary", + "Value": "Value", + }, + }, + }, + expMap: nil, + expErr: errors.New("Error decoding binary: illegal base64 data at input byte 4"), + }, + { + name: "unknown-type", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "Purple", + "Value": "Value", + }, + }, + }, + expMap: nil, + expErr: errors.New("Unsupported Type in _datadog payload"), + }, + { + name: "empty-string-encoded", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "Binary", + "Value": base64.StdEncoding.EncodeToString([]byte(``)), + }, + }, + }, + expMap: nil, + expErr: errors.New("Error unmarshaling the decoded binary: unexpected end of JSON input"), + }, + { + name: "binary-type", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "Binary", + "Value": base64.StdEncoding.EncodeToString([]byte(headersAll)), + }, + }, + }, + expMap: headersMapAll, + expErr: nil, + }, + { + name: "string-type", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "String", + "Value": headersAll, + }, + }, + }, + expMap: headersMapAll, + expErr: nil, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + tm, err := snsEntityCarrier(tc.event) + t.Logf("snsEntityCarrier returned TextMapReader=%#v error=%#v", tm, err) + assert.Equal(t, tc.expErr != nil, err != nil) + if tc.expErr != nil && err != nil { + assert.Equal(t, tc.expErr.Error(), err.Error()) + } + assert.Equal(t, tc.expMap, getMapFromCarrier(tm)) + }) + } +} + func TestExtractTraceContextfromAWSTraceHeader(t *testing.T) { ctx := func(trace, parent, priority uint64) *TraceContext { return &TraceContext{ @@ -444,6 +573,24 @@ func TestExtractTraceContextfromAWSTraceHeader(t *testing.T) { expTc: nil, expNoErr: false, }, + { + name: "bad trace id", + value: "Root=1-00000000-000000000000000000000001purple;Parent=0000000000000002;Sampled=1", + expTc: nil, + expNoErr: false, + }, + { + name: "bad parent id", + value: "Root=1-00000000-000000000000000000000001;Parent=0000000000000002purple;Sampled=1", + expTc: nil, + expNoErr: false, + }, + { + name: "zero value trace and parent id", + value: "Root=1-00000000-000000000000000000000000;Parent=0000000000000000;Sampled=1", + expTc: nil, + expNoErr: false, + }, } for _, tc := range testcases { diff --git a/pkg/serverless/trace/propagation/extractor.go b/pkg/serverless/trace/propagation/extractor.go index 427ee1b4dfd3e5..eb745f4f491755 100644 --- a/pkg/serverless/trace/propagation/extractor.go +++ b/pkg/serverless/trace/propagation/extractor.go @@ -32,6 +32,7 @@ var ( errorUnsupportedExtractionType = errors.New("Unsupported event type for trace context extraction") errorNoContextFound = errors.New("No trace context found") errorNoSQSRecordFound = errors.New("No sqs message records found for trace context extraction") + errorNoSNSRecordFound = errors.New("No sns message records found for trace context extraction") errorNoTraceIDFound = errors.New("No trace ID found") errorNoParentIDFound = errors.New("No parent ID found") ) @@ -91,6 +92,14 @@ func (e Extractor) extract(event interface{}) (*TraceContext, error) { } } carrier, err = sqsMessageCarrier(ev) + case events.SNSEvent: + // look for context in just the first message + if len(ev.Records) > 0 { + return e.extract(ev.Records[0].SNS) + } + return nil, errorNoSNSRecordFound + case events.SNSEntity: + carrier, err = snsEntityCarrier(ev) case events.APIGatewayProxyRequest: carrier, err = headersCarrier(ev.Headers) case events.APIGatewayV2HTTPRequest: diff --git a/pkg/serverless/trace/propagation/extractor_test.go b/pkg/serverless/trace/propagation/extractor_test.go index c435802a39edf8..21a051dea5a95c 100644 --- a/pkg/serverless/trace/propagation/extractor_test.go +++ b/pkg/serverless/trace/propagation/extractor_test.go @@ -10,6 +10,7 @@ import ( "encoding/json" "errors" "net/http" + "os" "testing" "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" @@ -150,6 +151,28 @@ var ( } return e } + + eventSnsEntity = func(binHdrs, strHdrs string) events.SNSEntity { + e := events.SNSEntity{} + if len(binHdrs) > 0 && len(strHdrs) == 0 { + e.MessageAttributes = map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "Binary", + "Value": base64.StdEncoding.EncodeToString([]byte(binHdrs)), + }, + } + } else if len(binHdrs) == 0 && len(strHdrs) > 0 { + e.MessageAttributes = map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "String", + "Value": strHdrs, + }, + } + } else if len(binHdrs) > 0 && len(strHdrs) > 0 { + panic("expecting one of binHdrs or strHdrs, not both") + } + return e + } ) func TestNilPropagator(t *testing.T) { @@ -193,6 +216,16 @@ func TestExtractorExtract(t *testing.T) { }, // events.SQSEvent + { + name: "sqs-event-no-records", + events: []interface{}{ + events.SQSEvent{ + Records: []events.SQSMessage{}, + }, + }, + expCtx: nil, + expNoErr: false, + }, { name: "sqs-event-uses-first-record", events: []interface{}{ @@ -288,6 +321,92 @@ func TestExtractorExtract(t *testing.T) { expNoErr: true, }, + // events.SNSEvent + { + name: "sns-event-no-records", + events: []interface{}{ + events.SNSEvent{ + Records: []events.SNSEventRecord{}, + }, + }, + expCtx: nil, + expNoErr: false, + }, + { + name: "sns-event-uses-first-record", + events: []interface{}{ + events.SNSEvent{ + Records: []events.SNSEventRecord{ + // Uses the first message only + {SNS: eventSnsEntity(headersDD, headersNone)}, + {SNS: eventSnsEntity(headersW3C, headersNone)}, + }, + }, + }, + expCtx: ddTraceContext, + expNoErr: true, + }, + { + name: "sqs-event-uses-first-record-empty", + events: []interface{}{ + events.SNSEvent{ + Records: []events.SNSEventRecord{ + // Uses the first message only + {SNS: eventSnsEntity(headersNone, headersNone)}, + {SNS: eventSnsEntity(headersW3C, headersNone)}, + }, + }, + }, + expCtx: nil, + expNoErr: false, + }, + + // events.SNSEntity + { + name: "unable-to-get-carrier", + events: []interface{}{ + events.SNSEntity{}, + }, + expCtx: nil, + expNoErr: false, + }, + { + name: "extraction-error", + events: []interface{}{ + events.SNSEvent{ + Records: []events.SNSEventRecord{ + {SNS: eventSnsEntity(headersNone, headersNone)}, + }, + }, + }, + expCtx: nil, + expNoErr: false, + }, + { + name: "extract-binary", + events: []interface{}{ + events.SNSEvent{ + Records: []events.SNSEventRecord{ + {SNS: eventSnsEntity(headersAll, headersNone)}, + }, + }, + }, + expCtx: w3cTraceContext, + expNoErr: true, + }, + { + name: "extract-string", + events: []interface{}{ + events.SNSEvent{ + Records: []events.SNSEventRecord{ + {SNS: eventSnsEntity(headersNone, headersAll)}, + }, + }, + }, + expCtx: w3cTraceContext, + expNoErr: true, + }, + // events.APIGatewayProxyRequest: { name: "APIGatewayProxyRequest", @@ -404,6 +523,112 @@ func TestExtractorExtract(t *testing.T) { } } +func TestExtractorExtractPayloadJson(t *testing.T) { + testcases := []struct { + filename string + eventTyp string + expCtx *TraceContext + }{ + { + filename: "api-gateway.json", + eventTyp: "APIGatewayProxyRequest", + expCtx: &TraceContext{ + TraceID: 12345, + ParentID: 67890, + SamplingPriority: 2, + }, + }, + { + filename: "sns-batch.json", + eventTyp: "SNSEvent", + expCtx: &TraceContext{ + TraceID: 4948377316357291421, + ParentID: 6746998015037429512, + SamplingPriority: 1, + }, + }, + { + filename: "sns.json", + eventTyp: "SNSEvent", + expCtx: &TraceContext{ + TraceID: 4948377316357291421, + ParentID: 6746998015037429512, + SamplingPriority: 1, + }, + }, + { + filename: "snssqs.json", + eventTyp: "SQSEvent", + expCtx: &TraceContext{ + TraceID: 1728904347387697031, + ParentID: 353722510835624345, + SamplingPriority: 1, + }, + }, + { + filename: "sqs-aws-header.json", + eventTyp: "SQSEvent", + expCtx: &TraceContext{ + TraceID: 12297829382473034410, + ParentID: 13527612320720337851, + SamplingPriority: 1, + }, + }, + { + filename: "sqs-batch.json", + eventTyp: "SQSEvent", + expCtx: &TraceContext{ + TraceID: 2684756524522091840, + ParentID: 7431398482019833808, + SamplingPriority: 1, + }, + }, + { + filename: "sqs.json", + eventTyp: "SQSEvent", + expCtx: &TraceContext{ + TraceID: 2684756524522091840, + ParentID: 7431398482019833808, + SamplingPriority: 1, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.filename, func(t *testing.T) { + body, err := os.ReadFile("../testdata/event_samples/" + tc.filename) + assert.NoError(t, err) + + var ev interface{} + switch tc.eventTyp { + case "APIGatewayProxyRequest": + var event events.APIGatewayProxyRequest + err = json.Unmarshal(body, &event) + assert.NoError(t, err) + ev = event + case "SNSEvent": + var event events.SNSEvent + err = json.Unmarshal(body, &event) + assert.NoError(t, err) + ev = event + case "SQSEvent": + var event events.SQSEvent + err = json.Unmarshal(body, &event) + assert.NoError(t, err) + ev = event + default: + t.Fatalf("bad type: %s", tc.eventTyp) + } + + extractor := Extractor{} + ctx, err := extractor.Extract(ev) + t.Logf("Extract returned TraceContext=%#v error=%#v", ctx, err) + assert.NoError(t, err) + assert.Equal(t, tc.expCtx, ctx) + }) + } +} + func TestPropagationStyle(t *testing.T) { testcases := []struct { name string @@ -430,9 +655,6 @@ func TestPropagationStyle(t *testing.T) { expTraceID: w3c.trace.asUint, }, { - // XXX: This is surprising - // The go tracer is designed to always place the tracecontext propagator first - // see https://github.com/DataDog/dd-trace-go/blob/6a938b3b4054ce036cc60147ab42a86f743fcdd5/ddtrace/tracer/textmap.go#L231 name: "datadog,tracecontext-type-headers-all", propType: "datadog,tracecontext", hdrs: headersAll, From 5e200ffe39ad8d7adade5eae1092e7a10c8992ad Mon Sep 17 00:00:00 2001 From: Yoann Ghigoff Date: Mon, 18 Dec 2023 19:41:31 +0100 Subject: [PATCH 39/66] [SEC-11866] cws-instrumentation: add selftests command (#21445) [SEC-11866] cws-instrumentation: add selftests command --- .../subcommands/selftestscmd/selftests.go | 81 +++++++++++++++++++ .../subcommands/tracecmd/trace.go | 3 + 2 files changed, 84 insertions(+) create mode 100644 cmd/cws-instrumentation/subcommands/selftestscmd/selftests.go diff --git a/cmd/cws-instrumentation/subcommands/selftestscmd/selftests.go b/cmd/cws-instrumentation/subcommands/selftestscmd/selftests.go new file mode 100644 index 00000000000000..4908862b7fbf6e --- /dev/null +++ b/cmd/cws-instrumentation/subcommands/selftestscmd/selftests.go @@ -0,0 +1,81 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package selftestscmd holds the selftests command of CWS injector +package selftestscmd + +import ( + "errors" + "os" + "os/exec" + "strings" + + "github.com/spf13/cobra" +) + +type execParams struct { + enabled bool + path string + args string +} + +type openParams struct { + enabled bool + path string +} + +type selftestsCliParams struct { + exec execParams + open openParams +} + +// Command returns the commands for the selftests subcommand +func Command() []*cobra.Command { + var params selftestsCliParams + + selftestsCmd := &cobra.Command{ + Use: "selftests", + Short: "run selftests against the tracer", + RunE: func(cmd *cobra.Command, args []string) error { + var err error + if params.exec.enabled { + err = errors.Join(err, selftestExec(¶ms.exec)) + } + if params.open.enabled { + err = errors.Join(err, selftestOpen(¶ms.open)) + } + return err + }, + } + + selftestsCmd.Flags().BoolVar(¶ms.exec.enabled, "exec", false, "run the exec selftest") + selftestsCmd.Flags().StringVar(¶ms.exec.path, "exec.path", "/usr/bin/date", "path to the file to execute") + selftestsCmd.Flags().StringVar(¶ms.exec.args, "exec.args", "", "arguments to pass to the executable") + selftestsCmd.Flags().BoolVar(¶ms.open.enabled, "open", false, "run the open selftest") + selftestsCmd.Flags().StringVar(¶ms.open.path, "open.path", "/tmp/open.test", "path to the file to open") + + return []*cobra.Command{selftestsCmd} +} + +func selftestExec(params *execParams) error { + if params.args != "" { + return exec.Command(params.path, strings.Split(params.args, " ")...).Run() + } + return exec.Command(params.path).Run() +} + +func selftestOpen(params *openParams) error { + f, createErr := os.OpenFile(params.path, os.O_CREATE|os.O_EXCL, 0400) + if createErr != nil { + f, openErr := os.Open(params.path) + if openErr != nil { + return errors.Join(createErr, openErr) + } + return f.Close() + } + return errors.Join(f.Close(), os.Remove(params.path)) +} diff --git a/cmd/cws-instrumentation/subcommands/tracecmd/trace.go b/cmd/cws-instrumentation/subcommands/tracecmd/trace.go index 379573e97d4f44..29db0e7d7cb291 100644 --- a/cmd/cws-instrumentation/subcommands/tracecmd/trace.go +++ b/cmd/cws-instrumentation/subcommands/tracecmd/trace.go @@ -11,6 +11,7 @@ package tracecmd import ( "github.com/spf13/cobra" + "github.com/DataDog/datadog-agent/cmd/cws-instrumentation/subcommands/selftestscmd" "github.com/DataDog/datadog-agent/pkg/security/ptracer" ) @@ -58,5 +59,7 @@ func Command() []*cobra.Command { traceCmd.Flags().Int32Var(¶ms.UID, uid, -1, "uid used to start the tracee") traceCmd.Flags().Int32Var(¶ms.GID, gid, -1, "gid used to start the tracee") + traceCmd.AddCommand(selftestscmd.Command()...) + return []*cobra.Command{traceCmd} } From 2bbefc8ae6539050f9fd47b365a552993af90a96 Mon Sep 17 00:00:00 2001 From: "Brian L. Troutwine" Date: Mon, 18 Dec 2023 11:07:09 -0800 Subject: [PATCH 40/66] Introduce memory goal process-agent experiments (#21599) * Introduce memory goal process-agent experiments This commit re-introduces the process-agent experiments cut in #21017. We modify the optimization goal to be memory focused. Signed-off-by: Brian L. Troutwine * set DD_API_KEY per experiment Signed-off-by: Brian L. Troutwine --------- Signed-off-by: Brian L. Troutwine --- .../datadog-agent/datadog.yaml | 13 ++++++++++++ .../experiment.yaml | 17 +++++++++++++++ .../lading/lading.yaml | 21 +++++++++++++++++++ .../datadog-agent/datadog.yaml | 16 ++++++++++++++ .../experiment.yaml | 17 +++++++++++++++ .../lading/lading.yaml | 21 +++++++++++++++++++ .../datadog-agent/datadog.yaml | 16 ++++++++++++++ .../datadog-agent/system-probe.yaml | 0 .../experiment.yaml | 18 ++++++++++++++++ .../lading/lading.yaml | 21 +++++++++++++++++++ 10 files changed, 160 insertions(+) create mode 100644 test/regression/cases/process_agent_real_time_mode/datadog-agent/datadog.yaml create mode 100644 test/regression/cases/process_agent_real_time_mode/experiment.yaml create mode 100644 test/regression/cases/process_agent_real_time_mode/lading/lading.yaml create mode 100644 test/regression/cases/process_agent_standard_check/datadog-agent/datadog.yaml create mode 100644 test/regression/cases/process_agent_standard_check/experiment.yaml create mode 100644 test/regression/cases/process_agent_standard_check/lading/lading.yaml create mode 100644 test/regression/cases/process_agent_standard_check_with_stats/datadog-agent/datadog.yaml create mode 100644 test/regression/cases/process_agent_standard_check_with_stats/datadog-agent/system-probe.yaml create mode 100644 test/regression/cases/process_agent_standard_check_with_stats/experiment.yaml create mode 100644 test/regression/cases/process_agent_standard_check_with_stats/lading/lading.yaml diff --git a/test/regression/cases/process_agent_real_time_mode/datadog-agent/datadog.yaml b/test/regression/cases/process_agent_real_time_mode/datadog-agent/datadog.yaml new file mode 100644 index 00000000000000..aee29a8c347856 --- /dev/null +++ b/test/regression/cases/process_agent_real_time_mode/datadog-agent/datadog.yaml @@ -0,0 +1,13 @@ +auth_token_file_path: /tmp/agent-auth-token +hostname: smp-regression + +dd_url: http://127.0.0.1:9092 + +confd_path: /etc/datadog-agent/conf.d + +# Disable cloud detection. This stops the Agent from poking around the +# execution environment & network. This is particularly important if the target +# has network access. +cloud_provider_metadata: [] + +dogstatsd_socket: '/tmp/dsd.socket' diff --git a/test/regression/cases/process_agent_real_time_mode/experiment.yaml b/test/regression/cases/process_agent_real_time_mode/experiment.yaml new file mode 100644 index 00000000000000..005f9511e733a1 --- /dev/null +++ b/test/regression/cases/process_agent_real_time_mode/experiment.yaml @@ -0,0 +1,17 @@ +optimization_goal: memory +erratic: false + +environment: + DD_TELEMETRY_ENABLED: true + DD_PROCESS_CONFIG_PROCESS_DD_URL: http://127.0.0.1:9092 + # For regression detection we only care about the processes generated inside the container + # so this disables checking of the processes of the host the container is running on + HOST_PROC: /tmp/procfs + DD_API_KEY: 00000001 + +profiling_environment: + DD_INTERNAL_PROFILING_ENABLED: true + DD_INTERNAL_PROFILING_UNIX_SOCKET: /var/run/datadog/apm.socket + DD_INTERNAL_PROFILING_DELTA_PROFILES: true + DD_INTERNAL_PROFILING_ENABLE_GOROUTINE_STACKTRACES: true + HOST_PROC: /tmp/procfs diff --git a/test/regression/cases/process_agent_real_time_mode/lading/lading.yaml b/test/regression/cases/process_agent_real_time_mode/lading/lading.yaml new file mode 100644 index 00000000000000..ac61bdd3b0c9fe --- /dev/null +++ b/test/regression/cases/process_agent_real_time_mode/lading/lading.yaml @@ -0,0 +1,21 @@ +generator: + - proc_fs: + seed: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, + 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131] + root: /tmp/procfs + copy_from_host: + - /proc/uptime + - /proc/stat + - /proc/cpuinfo + total_processes: 128 + +blackhole: + - http: + binding_addr: "127.0.0.1:9092" + body_variant: "raw_bytes" + # process agent RT mode enabled response + raw_bytes: [0x1, 0x0, 0x17, 0x0, 0xa, 0x2, 0x20, 0x17, 0x1a, 0x4, 0x8, 0x2, 0x10, 0x2] + +target_metrics: + - prometheus: + uri: "http://127.0.0.1:5000/telemetry" diff --git a/test/regression/cases/process_agent_standard_check/datadog-agent/datadog.yaml b/test/regression/cases/process_agent_standard_check/datadog-agent/datadog.yaml new file mode 100644 index 00000000000000..bf270d87fd2c47 --- /dev/null +++ b/test/regression/cases/process_agent_standard_check/datadog-agent/datadog.yaml @@ -0,0 +1,16 @@ +api_key: 00000000000000000000000000000000 +auth_token_file_path: /tmp/agent-auth-token +hostname: smp-regression + +dd_url: http://127.0.0.1:9092 + +confd_path: /etc/datadog-agent/conf.d + +# Disable cloud detection. This stops the Agent from poking around the +# execution environment & network. This is particularly important if the target +# has network access. +cloud_provider_metadata: [] + +process_config: + process_collection: + enabled: true diff --git a/test/regression/cases/process_agent_standard_check/experiment.yaml b/test/regression/cases/process_agent_standard_check/experiment.yaml new file mode 100644 index 00000000000000..005f9511e733a1 --- /dev/null +++ b/test/regression/cases/process_agent_standard_check/experiment.yaml @@ -0,0 +1,17 @@ +optimization_goal: memory +erratic: false + +environment: + DD_TELEMETRY_ENABLED: true + DD_PROCESS_CONFIG_PROCESS_DD_URL: http://127.0.0.1:9092 + # For regression detection we only care about the processes generated inside the container + # so this disables checking of the processes of the host the container is running on + HOST_PROC: /tmp/procfs + DD_API_KEY: 00000001 + +profiling_environment: + DD_INTERNAL_PROFILING_ENABLED: true + DD_INTERNAL_PROFILING_UNIX_SOCKET: /var/run/datadog/apm.socket + DD_INTERNAL_PROFILING_DELTA_PROFILES: true + DD_INTERNAL_PROFILING_ENABLE_GOROUTINE_STACKTRACES: true + HOST_PROC: /tmp/procfs diff --git a/test/regression/cases/process_agent_standard_check/lading/lading.yaml b/test/regression/cases/process_agent_standard_check/lading/lading.yaml new file mode 100644 index 00000000000000..7c2239a5b4fd8c --- /dev/null +++ b/test/regression/cases/process_agent_standard_check/lading/lading.yaml @@ -0,0 +1,21 @@ +generator: + - proc_fs: + seed: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, + 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131] + root: /tmp/procfs + copy_from_host: + - /proc/uptime + - /proc/stat + - /proc/cpuinfo + total_processes: 128 + +blackhole: + - http: + binding_addr: "127.0.0.1:9092" + body_variant: "raw_bytes" + # process agent RT mode disabled response + raw_bytes: [0x1, 0x0, 0x17, 0x0, 0xa, 0x2, 0x20, 0x17, 0x1a, 0x2, 0x10, 0xa] + +target_metrics: + - prometheus: + uri: "http://127.0.0.1:5000/telemetry" diff --git a/test/regression/cases/process_agent_standard_check_with_stats/datadog-agent/datadog.yaml b/test/regression/cases/process_agent_standard_check_with_stats/datadog-agent/datadog.yaml new file mode 100644 index 00000000000000..bf270d87fd2c47 --- /dev/null +++ b/test/regression/cases/process_agent_standard_check_with_stats/datadog-agent/datadog.yaml @@ -0,0 +1,16 @@ +api_key: 00000000000000000000000000000000 +auth_token_file_path: /tmp/agent-auth-token +hostname: smp-regression + +dd_url: http://127.0.0.1:9092 + +confd_path: /etc/datadog-agent/conf.d + +# Disable cloud detection. This stops the Agent from poking around the +# execution environment & network. This is particularly important if the target +# has network access. +cloud_provider_metadata: [] + +process_config: + process_collection: + enabled: true diff --git a/test/regression/cases/process_agent_standard_check_with_stats/datadog-agent/system-probe.yaml b/test/regression/cases/process_agent_standard_check_with_stats/datadog-agent/system-probe.yaml new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/test/regression/cases/process_agent_standard_check_with_stats/experiment.yaml b/test/regression/cases/process_agent_standard_check_with_stats/experiment.yaml new file mode 100644 index 00000000000000..d1a8ec9651ba5f --- /dev/null +++ b/test/regression/cases/process_agent_standard_check_with_stats/experiment.yaml @@ -0,0 +1,18 @@ +optimization_goal: memory +erratic: false + +environment: + DD_TELEMETRY_ENABLED: true + DD_PROCESS_CONFIG_PROCESS_DD_URL: http://127.0.0.1:9092 + # For regression detection we only care about the processes generated inside the container + # so this disables checking of the processes of the host the container is running on + HOST_PROC: /tmp/procfs + DD_SYSTEM_PROBE_PROCESS_ENABLED: true + DD_API_KEY: 00000001 + +profiling_environment: + DD_INTERNAL_PROFILING_ENABLED: true + DD_INTERNAL_PROFILING_UNIX_SOCKET: /var/run/datadog/apm.socket + DD_INTERNAL_PROFILING_DELTA_PROFILES: true + DD_INTERNAL_PROFILING_ENABLE_GOROUTINE_STACKTRACES: true + HOST_PROC: /tmp/procfs diff --git a/test/regression/cases/process_agent_standard_check_with_stats/lading/lading.yaml b/test/regression/cases/process_agent_standard_check_with_stats/lading/lading.yaml new file mode 100644 index 00000000000000..7c2239a5b4fd8c --- /dev/null +++ b/test/regression/cases/process_agent_standard_check_with_stats/lading/lading.yaml @@ -0,0 +1,21 @@ +generator: + - proc_fs: + seed: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, + 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131] + root: /tmp/procfs + copy_from_host: + - /proc/uptime + - /proc/stat + - /proc/cpuinfo + total_processes: 128 + +blackhole: + - http: + binding_addr: "127.0.0.1:9092" + body_variant: "raw_bytes" + # process agent RT mode disabled response + raw_bytes: [0x1, 0x0, 0x17, 0x0, 0xa, 0x2, 0x20, 0x17, 0x1a, 0x2, 0x10, 0xa] + +target_metrics: + - prometheus: + uri: "http://127.0.0.1:5000/telemetry" From 04b658233d0cfc9ced0a38ed9a1ef7d00b4c7385 Mon Sep 17 00:00:00 2001 From: Rey Abolofia Date: Mon, 18 Dec 2023 13:30:01 -0800 Subject: [PATCH 41/66] Add missing required field SNSEntity.MessageAttributes. (#21631) --- pkg/serverless/trigger/events/events.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/pkg/serverless/trigger/events/events.go b/pkg/serverless/trigger/events/events.go index 0056cb7503c766..2bf48358779c45 100644 --- a/pkg/serverless/trigger/events/events.go +++ b/pkg/serverless/trigger/events/events.go @@ -277,11 +277,12 @@ type SNSEventRecord struct { // SNSEntity mirrors events.SNSEntity type, removing unused fields. type SNSEntity struct { - MessageID string - Type string - TopicArn string - Timestamp time.Time - Subject string + MessageID string + Type string + TopicArn string + MessageAttributes map[string]interface{} + Timestamp time.Time + Subject string } // SQSEvent mirrors events.SQSEvent type, removing unused fields. From 3ee4483bf5b8294aff4b6175843d1e9bf41cb35f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 22:31:57 +0100 Subject: [PATCH 42/66] Bump docker from 6.1.3 to 7.0.0 in /test/e2e/cws-tests (#21608) Bumps [docker](https://github.com/docker/docker-py) from 6.1.3 to 7.0.0. - [Release notes](https://github.com/docker/docker-py/releases) - [Commits](https://github.com/docker/docker-py/compare/6.1.3...7.0.0) --- updated-dependencies: - dependency-name: docker dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- test/e2e/cws-tests/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/cws-tests/requirements.txt b/test/e2e/cws-tests/requirements.txt index 88fb710f101673..afd5d527093e5c 100644 --- a/test/e2e/cws-tests/requirements.txt +++ b/test/e2e/cws-tests/requirements.txt @@ -1,7 +1,7 @@ kubernetes==28.1.0 datadog-api-client==2.20.0 pyaml==23.9.7 -docker==6.1.3 +docker==7.0.0 retry==0.9.2 emoji==2.9.0 requests==2.31.0 From b1bcfe5da5d8d3188f87f74bfb423f9cb7b2f737 Mon Sep 17 00:00:00 2001 From: Guy Arbitman Date: Tue, 19 Dec 2023 06:18:49 +0200 Subject: [PATCH 43/66] usm: http: Enable status code aggregation by default (#21602) --- pkg/config/system_probe.go | 2 +- ...ode-aggregation-by-default-a9a70cd3af443439.yaml | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/enable-status-code-aggregation-by-default-a9a70cd3af443439.yaml diff --git a/pkg/config/system_probe.go b/pkg/config/system_probe.go index bd5ee872bbc6b6..9fa304e4087186 100644 --- a/pkg/config/system_probe.go +++ b/pkg/config/system_probe.go @@ -226,7 +226,7 @@ func InitSystemProbeConfig(cfg Config) { cfg.BindEnvAndSetDefault(join(smjtNS, "args"), defaultServiceMonitoringJavaAgentArgs) cfg.BindEnvAndSetDefault(join(smjtNS, "allow_regex"), "") cfg.BindEnvAndSetDefault(join(smjtNS, "block_regex"), "") - cfg.BindEnvAndSetDefault(join(smNS, "enable_http_stats_by_status_code"), false) + cfg.BindEnvAndSetDefault(join(smNS, "enable_http_stats_by_status_code"), true) cfg.BindEnvAndSetDefault(join(netNS, "enable_gateway_lookup"), true, "DD_SYSTEM_PROBE_NETWORK_ENABLE_GATEWAY_LOOKUP") // Default value (100000) is set in `adjustUSM`, to avoid having "deprecation warning", due to the default value. diff --git a/releasenotes/notes/enable-status-code-aggregation-by-default-a9a70cd3af443439.yaml b/releasenotes/notes/enable-status-code-aggregation-by-default-a9a70cd3af443439.yaml new file mode 100644 index 00000000000000..0a3402ade73d08 --- /dev/null +++ b/releasenotes/notes/enable-status-code-aggregation-by-default-a9a70cd3af443439.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + USM will report the actual status code of the HTTP traffic, instead of reporting + only the status code family (2xx, 3xx, etc.). + From 5af28a56611ae3fa31343fc8f51d992ccc406a7e Mon Sep 17 00:00:00 2001 From: Marethyu <45374460+Pythyu@users.noreply.github.com> Date: Tue, 19 Dec 2023 05:19:25 +0100 Subject: [PATCH 44/66] [Migration] Step By Step Kitchen tests to e2e (#21448) [Migration] Step By Step Kitchen tests to e2e --- .gitlab/e2e_test_junit_upload.yml | 17 ++ .gitlab/kitchen_testing/centos.yml | 32 -- .gitlab/kitchen_testing/debian.yml | 14 - .gitlab/kitchen_testing/suse.yml | 16 - .gitlab/kitchen_testing/ubuntu.yml | 32 -- .gitlab/new-e2e_common/testing.yml | 6 + .gitlab/new-e2e_testing/amazonlinux.yml | 61 +++- .gitlab/new-e2e_testing/centos.yml | 42 ++- .gitlab/new-e2e_testing/debian.yml | 69 ++++- .gitlab/new-e2e_testing/suse.yml | 48 ++- .gitlab/new-e2e_testing/ubuntu.yml | 74 ++++- .../install-script/install_script_test.go | 1 - .../agent-platform/platforms/platforms.json | 2 +- .../step-by-step/step_by_step_test.go | 274 ++++++++++++++++++ 14 files changed, 558 insertions(+), 130 deletions(-) create mode 100644 test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go diff --git a/.gitlab/e2e_test_junit_upload.yml b/.gitlab/e2e_test_junit_upload.yml index 2f3ce4e41b4579..7bdcf421eac95c 100644 --- a/.gitlab/e2e_test_junit_upload.yml +++ b/.gitlab/e2e_test_junit_upload.yml @@ -39,6 +39,23 @@ e2e_test_junit_upload: - new-e2e-agent-platform-install-script-centos-fips-a7-x86_64 - new-e2e-agent-platform-install-script-centos-fips-iot-agent-a7-x86_64 - new-e2e-agent-platform-install-script-centos-fips-dogstatsd-a7-x86_64 + - new-e2e-agent-platform-step-by-step-debian-a7-x64 + - new-e2e-agent-platform-step-by-step-debian-a7-arm64 + - new-e2e-agent-platform-step-by-step-debian-a6-x86_64 + - new-e2e-agent-platform-step-by-step-debian-a6-arm64 + - new-e2e-agent-platform-step-by-step-ubuntu-a6-x86_64 + - new-e2e-agent-platform-step-by-step-ubuntu-a6-arm64 + - new-e2e-agent-platform-step-by-step-ubuntu-a7-x86_64 + - new-e2e-agent-platform-step-by-step-ubuntu-a7-arm64 + - new-e2e-agent-platform-step-by-step-suse-a6-x86_64 + - new-e2e-agent-platform-step-by-step-suse-a7-x86_64 + - new-e2e-agent-platform-step-by-step-suse-a7-arm64 + - new-e2e-agent-platform-step-by-step-centos-a6-x86_64 + - new-e2e-agent-platform-step-by-step-centos-a7-x86_64 + - new-e2e-agent-platform-step-by-step-amazonlinux-a6-x86_64 + - new-e2e-agent-platform-step-by-step-amazonlinux-a6-arm64 + - new-e2e-agent-platform-step-by-step-amazonlinux-a7-x64 + - new-e2e-agent-platform-step-by-step-amazonlinux-a7-arm64 - new-e2e-npm-main - new-e2e-aml-main - new-e2e-process-main diff --git a/.gitlab/kitchen_testing/centos.yml b/.gitlab/kitchen_testing/centos.yml index a89eefef8dfb09..c743e22c958e5a 100644 --- a/.gitlab/kitchen_testing/centos.yml +++ b/.gitlab/kitchen_testing/centos.yml @@ -99,38 +99,6 @@ # We only want to run step-by-step tests on deploy pipelines, # which is why they have a different rule (if_deploy_6/7) -kitchen_centos_step_by_step_agent-a6: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_centos_without_fips_a6 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a6] - -kitchen_centos_fips_step_by_step_agent-a6: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_centos_with_fips_a6 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a6] - -kitchen_centos_step_by_step_agent-a7: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_centos_without_fips_a7 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a7] - -kitchen_centos_fips_step_by_step_agent-a7: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_centos_with_fips_a7 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a7] - # Agent 5 RPMs won't install on CentOS/RHEL 8 in FIPS mode, so we always # run upgrade5 on all systems with FIPS off kitchen_centos_upgrade5_agent-a6: diff --git a/.gitlab/kitchen_testing/debian.yml b/.gitlab/kitchen_testing/debian.yml index 6f0fff4d2bd146..54c4bc196044bc 100644 --- a/.gitlab/kitchen_testing/debian.yml +++ b/.gitlab/kitchen_testing/debian.yml @@ -85,20 +85,6 @@ kitchen_debian_install_script_heroku_agent-a6: # We only want to run step-by-step tests on deploy pipelines, # which is why they have a different rule (if_deploy_6/7) -kitchen_debian_step_by_step_agent-a6: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_debian_a6_x64 - - .kitchen_test_step_by_step_agent - rules: !reference [.on_deploy_a6] - -kitchen_debian_step_by_step_agent-a7: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_debian_a7_x64 - - .kitchen_test_step_by_step_agent - rules: !reference [.on_deploy_a7] - kitchen_debian_upgrade5_agent-a6: extends: - .kitchen_scenario_debian_a6_x64 diff --git a/.gitlab/kitchen_testing/suse.yml b/.gitlab/kitchen_testing/suse.yml index d3e66b7e6d82ce..753e3eee7afbd9 100644 --- a/.gitlab/kitchen_testing/suse.yml +++ b/.gitlab/kitchen_testing/suse.yml @@ -78,22 +78,6 @@ kitchen_suse_install_script_dogstatsd_x64-a7: # We only want to run step-by-step tests on deploy pipelines, # which is why they have a different rule (if_deploy_6/7) -kitchen_suse_step_by_step_agent_x64-a6: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_suse_x64_a6 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a6] - -kitchen_suse_step_by_step_agent_x64-a7: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_suse_x64_a7 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a7] - kitchen_suse_install_script_agent_arm64-a7: # Run install script test on branches, on a reduced number of platforms rules: diff --git a/.gitlab/kitchen_testing/ubuntu.yml b/.gitlab/kitchen_testing/ubuntu.yml index a32c1af238eae8..42d2e3b4f3c418 100644 --- a/.gitlab/kitchen_testing/ubuntu.yml +++ b/.gitlab/kitchen_testing/ubuntu.yml @@ -68,38 +68,6 @@ # We only want to run step-by-step tests on deploy pipelines, # which is why they have a different rule (if_deploy_6/7) -kitchen_ubuntu_step_by_step_agent-a6_x64: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_ubuntu_a6_x64 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a6] - -kitchen_ubuntu_step_by_step_agent-a6_arm64: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_ubuntu_a6_arm64 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a6] - -kitchen_ubuntu_step_by_step_agent-a7_x64: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_ubuntu_a7_x64 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a7] - -kitchen_ubuntu_step_by_step_agent-a7_arm64: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_ubuntu_a7_arm64 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a7] - kitchen_ubuntu_upgrade5_agent-a6: extends: - .kitchen_scenario_ubuntu_a6_x64 diff --git a/.gitlab/new-e2e_common/testing.yml b/.gitlab/new-e2e_common/testing.yml index bc8867d5f13a9b..08f4951ef25067 100644 --- a/.gitlab/new-e2e_common/testing.yml +++ b/.gitlab/new-e2e_common/testing.yml @@ -15,3 +15,9 @@ TARGETS: ./tests/agent-platform/install-script TEAM: agent-platform EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --cws-supported-osversion $E2E_CWS_SUPPORTED_OSVERS --major-version $AGENT_MAJOR_VERSION --arch $E2E_ARCH --flavor $FLAVOR + +.new-e2e_step_by_step: + variables: + TARGETS: ./tests/agent-platform/step-by-step + TEAM: agent-platform + EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --cws-supported-osversion $E2E_CWS_SUPPORTED_OSVERS --major-version $AGENT_MAJOR_VERSION --arch $E2E_ARCH --flavor $FLAVOR diff --git a/.gitlab/new-e2e_testing/amazonlinux.yml b/.gitlab/new-e2e_testing/amazonlinux.yml index a88cf563d3d4f7..5a4d312c4ab647 100644 --- a/.gitlab/new-e2e_testing/amazonlinux.yml +++ b/.gitlab/new-e2e_testing/amazonlinux.yml @@ -37,7 +37,7 @@ new-e2e-agent-platform-install-script-amazonlinux-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_amazonlinux @@ -48,7 +48,7 @@ new-e2e-agent-platform-install-script-amazonlinux-a6-x86_64: new-e2e-agent-platform-install-script-amazonlinux-a6-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_amazonlinux @@ -59,7 +59,7 @@ new-e2e-agent-platform-install-script-amazonlinux-a6-arm64: new-e2e-agent-platform-install-script-amazonlinux-a7-x64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_amazonlinux @@ -72,7 +72,7 @@ new-e2e-agent-platform-install-script-amazonlinux-a7-x64: new-e2e-agent-platform-install-script-amazonlinux-a7-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_amazonlinux @@ -82,3 +82,56 @@ new-e2e-agent-platform-install-script-amazonlinux-a7-arm64: !reference [.on_all_new-e2e_tests_a7] variables: FLAVOR: datadog-agent + + +new-e2e-agent-platform-step-by-step-amazonlinux-a6-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_amazonlinux + - .new-e2e_amazonlinux_a6_x86_64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-amazonlinux-a6-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_amazonlinux + - .new-e2e_amazonlinux_a6_arm64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-amazonlinux-a7-x64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_amazonlinux + - .new-e2e_amazonlinux_a7_x86_64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-amazonlinux-a7-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_amazonlinux + - .new-e2e_amazonlinux_a7_arm64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent diff --git a/.gitlab/new-e2e_testing/centos.yml b/.gitlab/new-e2e_testing/centos.yml index 70c1170d6fd461..7cbe9b62e960d3 100644 --- a/.gitlab/new-e2e_testing/centos.yml +++ b/.gitlab/new-e2e_testing/centos.yml @@ -39,7 +39,7 @@ new-e2e-agent-platform-install-script-centos-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -50,7 +50,7 @@ new-e2e-agent-platform-install-script-centos-a6-x86_64: new-e2e-agent-platform-install-script-centos-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -63,7 +63,7 @@ new-e2e-agent-platform-install-script-centos-a7-x86_64: new-e2e-agent-platform-install-script-centos-iot-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -76,7 +76,7 @@ new-e2e-agent-platform-install-script-centos-iot-agent-a7-x86_64: new-e2e-agent-platform-install-script-centos-dogstatsd-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -87,7 +87,7 @@ new-e2e-agent-platform-install-script-centos-dogstatsd-a7-x86_64: new-e2e-agent-platform-install-script-centos-fips-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -98,7 +98,7 @@ new-e2e-agent-platform-install-script-centos-fips-a6-x86_64: new-e2e-agent-platform-install-script-centos-fips-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -111,7 +111,7 @@ new-e2e-agent-platform-install-script-centos-fips-a7-x86_64: new-e2e-agent-platform-install-script-centos-fips-iot-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -124,7 +124,7 @@ new-e2e-agent-platform-install-script-centos-fips-iot-agent-a7-x86_64: new-e2e-agent-platform-install-script-centos-fips-dogstatsd-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -132,3 +132,29 @@ new-e2e-agent-platform-install-script-centos-fips-dogstatsd-a7-x86_64: - .new-e2e_agent_a7 variables: FLAVOR: datadog-dogstatsd + +new-e2e-agent-platform-step-by-step-centos-a6-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_centos + - .new-e2e_centos_a6_x86_64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-centos-a7-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_centos + - .new-e2e_centos_a7_x86_64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent diff --git a/.gitlab/new-e2e_testing/debian.yml b/.gitlab/new-e2e_testing/debian.yml index bbceab7b4c6611..a305ca5fb465b1 100644 --- a/.gitlab/new-e2e_testing/debian.yml +++ b/.gitlab/new-e2e_testing/debian.yml @@ -1,4 +1,3 @@ - .new-e2e_os_debian: variables: E2E_PLATFORM: debian @@ -37,7 +36,7 @@ new-e2e-agent-platform-install-script-debian-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_debian @@ -48,7 +47,7 @@ new-e2e-agent-platform-install-script-debian-a6-x86_64: new-e2e-agent-platform-install-script-debian-a6-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_debian @@ -59,7 +58,7 @@ new-e2e-agent-platform-install-script-debian-a6-arm64: new-e2e-agent-platform-install-script-debian-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_debian @@ -72,7 +71,7 @@ new-e2e-agent-platform-install-script-debian-a7-x86_64: new-e2e-agent-platform-install-script-debian-a7-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_debian @@ -85,7 +84,7 @@ new-e2e-agent-platform-install-script-debian-a7-arm64: new-e2e-agent-platform-install-script-debian-iot-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_debian @@ -98,7 +97,7 @@ new-e2e-agent-platform-install-script-debian-iot-agent-a7-x86_64: new-e2e-agent-platform-install-script-debian-dogstatsd-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_debian @@ -109,7 +108,7 @@ new-e2e-agent-platform-install-script-debian-dogstatsd-a7-x86_64: new-e2e-agent-platform-install-script-debian-heroku-agent-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_os_debian - .new-e2e_debian_a6_x86_64 @@ -119,10 +118,62 @@ new-e2e-agent-platform-install-script-debian-heroku-agent-a6-x86_64: new-e2e-agent-platform-install-script-debian-heroku-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_os_debian - .new-e2e_debian_a7_x86_64 - .new-e2e_agent_a7 variables: FLAVOR: datadog-heroku-agent + +new-e2e-agent-platform-step-by-step-debian-a7-x64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_debian + - .new-e2e_debian_a7_x86_64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-debian-a7-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_debian + - .new-e2e_debian_a7_arm64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-debian-a6-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_debian + - .new-e2e_debian_a6_x86_64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-debian-a6-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_debian + - .new-e2e_debian_a6_arm64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent diff --git a/.gitlab/new-e2e_testing/suse.yml b/.gitlab/new-e2e_testing/suse.yml index b7e671034e4582..8093f141f7bd4b 100644 --- a/.gitlab/new-e2e_testing/suse.yml +++ b/.gitlab/new-e2e_testing/suse.yml @@ -35,7 +35,7 @@ new-e2e-agent-platform-install-script-suse-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_suse @@ -46,7 +46,7 @@ new-e2e-agent-platform-install-script-suse-a6-x86_64: new-e2e-agent-platform-install-script-suse-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_suse @@ -59,7 +59,7 @@ new-e2e-agent-platform-install-script-suse-a7-x86_64: new-e2e-agent-platform-install-script-suse-a7-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_suse @@ -72,7 +72,7 @@ new-e2e-agent-platform-install-script-suse-a7-arm64: new-e2e-agent-platform-install-script-suse-iot-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_suse @@ -85,7 +85,7 @@ new-e2e-agent-platform-install-script-suse-iot-agent-a7-x86_64: new-e2e-agent-platform-install-script-suse-dogstatsd-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_suse @@ -94,3 +94,41 @@ new-e2e-agent-platform-install-script-suse-dogstatsd-a7-x86_64: variables: FLAVOR: datadog-dogstatsd +new-e2e-agent-platform-step-by-step-suse-a6-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_suse + - .new-e2e_suse_a6_x86_64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-suse-a7-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_suse + - .new-e2e_suse_a7_x86_64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-suse-a7-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_suse + - .new-e2e_suse_a7_arm64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent diff --git a/.gitlab/new-e2e_testing/ubuntu.yml b/.gitlab/new-e2e_testing/ubuntu.yml index 39819b54528414..d465ec9aa76df1 100644 --- a/.gitlab/new-e2e_testing/ubuntu.yml +++ b/.gitlab/new-e2e_testing/ubuntu.yml @@ -9,6 +9,12 @@ TEAM: agent-platform EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --cws-supported-osversion $E2E_CWS_SUPPORTED_OSVERS --major-version $AGENT_MAJOR_VERSION --arch $E2E_ARCH --flavor $FLAVOR +.new-e2e_step_by_step: + variables: + TARGETS: ./tests/agent-platform/step-by-step + TEAM: agent-platform + EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --cws-supported-osversion $E2E_CWS_SUPPORTED_OSVERS --major-version $AGENT_MAJOR_VERSION --arch $E2E_ARCH --flavor $FLAVOR + .new-e2e_ubuntu_a6_x86_64: variables: E2E_ARCH: x86_64 @@ -43,7 +49,7 @@ new-e2e-agent-platform-install-script-ubuntu-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_ubuntu @@ -54,7 +60,7 @@ new-e2e-agent-platform-install-script-ubuntu-a6-x86_64: new-e2e-agent-platform-install-script-ubuntu-a6-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_ubuntu @@ -65,7 +71,7 @@ new-e2e-agent-platform-install-script-ubuntu-a6-arm64: new-e2e-agent-platform-install-script-ubuntu-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_ubuntu @@ -78,7 +84,7 @@ new-e2e-agent-platform-install-script-ubuntu-a7-x86_64: new-e2e-agent-platform-install-script-ubuntu-a7-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_ubuntu @@ -91,7 +97,7 @@ new-e2e-agent-platform-install-script-ubuntu-a7-arm64: new-e2e-agent-platform-install-script-ubuntu-iot-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_ubuntu @@ -102,7 +108,7 @@ new-e2e-agent-platform-install-script-ubuntu-iot-agent-a7-x86_64: new-e2e-agent-platform-install-script-ubuntu-dogstatsd-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_ubuntu @@ -113,7 +119,7 @@ new-e2e-agent-platform-install-script-ubuntu-dogstatsd-a7-x86_64: new-e2e-agent-platform-install-script-ubuntu-heroku-agent-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_os_ubuntu - .new-e2e_ubuntu_a6_x86_64 @@ -123,10 +129,62 @@ new-e2e-agent-platform-install-script-ubuntu-heroku-agent-a6-x86_64: new-e2e-agent-platform-install-script-ubuntu-heroku-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_os_ubuntu - .new-e2e_ubuntu_a7_x86_64 - .new-e2e_agent_a7 variables: FLAVOR: datadog-heroku-agent + +new-e2e-agent-platform-step-by-step-ubuntu-a6-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_ubuntu + - .new-e2e_ubuntu_a6_x86_64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-ubuntu-a6-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_ubuntu + - .new-e2e_ubuntu_a6_arm64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-ubuntu-a7-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_ubuntu + - .new-e2e_ubuntu_a7_x86_64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-ubuntu-a7-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_ubuntu + - .new-e2e_ubuntu_a7_arm64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent diff --git a/test/new-e2e/tests/agent-platform/install-script/install_script_test.go b/test/new-e2e/tests/agent-platform/install-script/install_script_test.go index e4dd94c61a6f0c..4e1777f85dd958 100644 --- a/test/new-e2e/tests/agent-platform/install-script/install_script_test.go +++ b/test/new-e2e/tests/agent-platform/install-script/install_script_test.go @@ -92,7 +92,6 @@ func TestInstallScript(t *testing.T) { cwsSupported = true } } - vmOpts = append(vmOpts, ec2params.WithImageName(platformJSON[*platform][*architecture][osVers], archMapping[*architecture], testOsType)) if instanceType, ok := os.LookupEnv("E2E_OVERRIDE_INSTANCE_TYPE"); ok { vmOpts = append(vmOpts, ec2params.WithInstanceType(instanceType)) diff --git a/test/new-e2e/tests/agent-platform/platforms/platforms.json b/test/new-e2e/tests/agent-platform/platforms/platforms.json index b3158631605b5f..d246127ecec81f 100644 --- a/test/new-e2e/tests/agent-platform/platforms/platforms.json +++ b/test/new-e2e/tests/agent-platform/platforms/platforms.json @@ -1,7 +1,7 @@ { "debian": { "x86_64": { - "debian-9": "ami-099d228beefd189f5", + "debian-9": "ami-0182559468c1975fe", "debian-10": "ami-041540a5c191757a0", "debian-11": "ami-09e24b0cfe072ecef", "debian-12": "ami-06db4d78cb1d3bbf9" diff --git a/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go b/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go new file mode 100644 index 00000000000000..eae681185664f0 --- /dev/null +++ b/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go @@ -0,0 +1,274 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. +package stepbystep + +import ( + "encoding/json" + "flag" + "fmt" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/params" + "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-platform/common" + filemanager "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-platform/common/file-manager" + helpers "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-platform/common/helper" + "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-platform/platforms" + e2eOs "github.com/DataDog/test-infra-definitions/components/os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2params" + "github.com/stretchr/testify/require" + "os" + "strconv" + "strings" + "testing" +) + +var osVersion = flag.String("osversion", "", "os version to test") +var platform = flag.String("platform", "", "platform to test") +var cwsSupportedOsVersion = flag.String("cws-supported-osversion", "", "list of os where CWS is supported") +var architecture = flag.String("arch", "", "architecture to test (x86_64, arm64))") +var flavorName = flag.String("flavor", "datadog-agent", "package flavor to install") +var majorVersion = flag.String("major-version", "7", "major version to test (6, 7)") + +type stepByStepSuite struct { + e2e.Suite[e2e.VMEnv] + osVersion float64 + cwsSupported bool +} + +func ExecuteWithoutError(t *testing.T, client *common.TestClient, cmd string, args ...any) { + var finalCmd string + if len(args) > 0 { + finalCmd = fmt.Sprintf(cmd, args...) + } else { + finalCmd = cmd + } + _, err := client.VMClient.ExecuteWithError(finalCmd) + require.NoError(t, err) +} + +func TestStepByStepScript(t *testing.T) { + osMapping := map[string]ec2os.Type{ + "debian": ec2os.DebianOS, + "ubuntu": ec2os.UbuntuOS, + "centos": ec2os.CentOS, + "rhel": ec2os.RedHatOS, + "amazonlinux": ec2os.AmazonLinuxOS, + "redhat": ec2os.RedHatOS, + "windows": ec2os.WindowsOS, + "fedora": ec2os.FedoraOS, + "suse": ec2os.SuseOS, + } + + archMapping := map[string]e2eOs.Architecture{ + "x86_64": e2eOs.AMD64Arch, + "arm64": e2eOs.ARM64Arch, + } + + platformJSON := map[string]map[string]map[string]string{} + + err := json.Unmarshal(platforms.Content, &platformJSON) + require.NoErrorf(t, err, "failed to umarshall platform file: %v", err) + + osVersions := strings.Split(*osVersion, ",") + cwsSupportedOsVersionList := strings.Split(*cwsSupportedOsVersion, ",") + fmt.Println("Parsed platform json file: ", platformJSON) + for _, osVers := range osVersions { + vmOpts := []ec2params.Option{} + osVers := osVers + cwsSupported := false + for _, cwsSupportedOs := range cwsSupportedOsVersionList { + if cwsSupportedOs == osVers { + cwsSupported = true + } + } + + t.Run(fmt.Sprintf("test step by step on %s %s", osVers, *architecture), func(tt *testing.T) { + tt.Parallel() + fmt.Printf("Testing %s", osVers) + slice := strings.Split(osVers, "-") + var version float64 + if len(slice) == 2 { + version, err = strconv.ParseFloat(slice[1], 64) + require.NoError(tt, err) + } else if len(slice) == 3 { + version, err = strconv.ParseFloat(slice[1]+"."+slice[2], 64) + require.NoError(tt, err) + } else { + version = 0 + } + vmOpts = append(vmOpts, ec2params.WithImageName(platformJSON[*platform][*architecture][osVers], archMapping[*architecture], osMapping[*platform])) + if instanceType, ok := os.LookupEnv("E2E_OVERRIDE_INSTANCE_TYPE"); ok { + vmOpts = append(vmOpts, ec2params.WithInstanceType(instanceType)) + } + e2e.Run(tt, &stepByStepSuite{cwsSupported: cwsSupported, osVersion: version}, e2e.EC2VMStackDef(vmOpts...), params.WithStackName(fmt.Sprintf("step-by-step-test-%v-%v-%s-%s", os.Getenv("CI_PIPELINE_ID"), osVers, *architecture, *majorVersion))) + }) + } +} + +func (is *stepByStepSuite) TestStepByStep() { + fileManager := filemanager.NewUnixFileManager(is.Env().VM) + unixHelper := helpers.NewUnixHelper() + vm := is.Env().VM.(*client.PulumiStackVM) + agentClient, err := client.NewAgentClient(is.T(), vm, vm.GetOS(), false) + require.NoError(is.T(), err) + VMclient := common.NewTestClient(is.Env().VM, agentClient, fileManager, unixHelper) + + if *platform == "debian" || *platform == "ubuntu" { + is.StepByStepDebianTest(VMclient) + } else if *platform == "centos" || *platform == "amazonlinux" || *platform == "fedora" || *platform == "redhat" { + is.StepByStepRhelTest(VMclient) + } else { + require.Equal(is.T(), *platform, "suse", "NonSupportedPlatformError : %s isn't supported !", *platform) + is.StepByStepSuseTest(VMclient) + } + is.ConfigureAndRunAgentService(VMclient) + is.CheckStepByStepAgentInstallation(VMclient) + +} + +func (is *stepByStepSuite) ConfigureAndRunAgentService(VMclient *common.TestClient) { + is.T().Run("add config file", func(t *testing.T) { + ExecuteWithoutError(t, VMclient, "sudo sh -c \"sed 's/api_key:.*/api_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/' /etc/datadog-agent/datadog.yaml.example > /etc/datadog-agent/datadog.yaml\"") + ExecuteWithoutError(t, VMclient, "sudo sh -c \"chown dd-agent:dd-agent /etc/datadog-agent/datadog.yaml && chmod 640 /etc/datadog-agent/datadog.yaml\"") + if *platform == "ubuntu" && is.osVersion == 14.04 { + ExecuteWithoutError(t, VMclient, "sudo initctl start datadog-agent") + } else { + ExecuteWithoutError(t, VMclient, "sudo systemctl restart datadog-agent.service") + } + }) +} + +func (is *stepByStepSuite) CheckStepByStepAgentInstallation(VMclient *common.TestClient) { + common.CheckInstallation(is.T(), VMclient) + common.CheckAgentBehaviour(is.T(), VMclient) + common.CheckAgentStops(is.T(), VMclient) + common.CheckAgentRestarts(is.T(), VMclient) + common.CheckIntegrationInstall(is.T(), VMclient) + common.CheckAgentPython(is.T(), VMclient, "3") + if *majorVersion == "6" { + common.CheckAgentPython(is.T(), VMclient, "2") + } + common.CheckApmEnabled(is.T(), VMclient) + common.CheckApmDisabled(is.T(), VMclient) + if *flavorName == "datadog-agent" && is.cwsSupported { + common.CheckCWSBehaviour(is.T(), VMclient) + } + common.CheckUninstallation(is.T(), VMclient, *flavorName) +} + +func (is *stepByStepSuite) StepByStepDebianTest(VMclient *common.TestClient) { + var aptTrustedDKeyring = "/etc/apt/trusted.gpg.d/datadog-archive-keyring.gpg" + var aptUsrShareKeyring = "/usr/share/keyrings/datadog-archive-keyring.gpg" + var aptrepo = "[signed-by=/usr/share/keyrings/datadog-archive-keyring.gpg] http://apttesting.datad0g.com/" + var aptrepoDist = fmt.Sprintf("pipeline-%s-a%s-%s", os.Getenv("CI_PIPELINE_ID"), *majorVersion, *architecture) + fileManager := VMclient.FileManager + var err error + + is.T().Run("create /usr/share keyring and source list", func(t *testing.T) { + ExecuteWithoutError(t, VMclient, "sudo apt-get update && sudo DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl gnupg") + tmpFileContent := fmt.Sprintf("deb %s %s %s", aptrepo, aptrepoDist, *majorVersion) + _, err = fileManager.WriteFile("/etc/apt/sources.list.d/datadog.list", tmpFileContent) + require.NoError(t, err) + ExecuteWithoutError(t, VMclient, "sudo touch %s && sudo chmod a+r %s", aptUsrShareKeyring, aptUsrShareKeyring) + keys := []string{"DATADOG_APT_KEY_CURRENT.public", "DATADOG_APT_KEY_C0962C7D.public", "DATADOG_APT_KEY_F14F620E.public", "DATADOG_APT_KEY_382E94DE.public"} + for _, key := range keys { + ExecuteWithoutError(t, VMclient, "sudo curl --retry 5 -o \"/tmp/%s\" \"https://keys.datadoghq.com/%s\"", key, key) + ExecuteWithoutError(t, VMclient, "sudo cat \"/tmp/%s\" | sudo gpg --import --batch --no-default-keyring --keyring \"%s\"", key, aptUsrShareKeyring) + } + }) + if (*platform == "ubuntu" && is.osVersion < 15) || (*platform == "debian" && is.osVersion < 9) { + is.T().Run("create /etc/apt keyring", func(t *testing.T) { + ExecuteWithoutError(t, VMclient, "sudo cp %s %s", aptUsrShareKeyring, aptTrustedDKeyring) + }) + } + + is.T().Run("install debian", func(t *testing.T) { + ExecuteWithoutError(t, VMclient, "sudo apt-get update") + ExecuteWithoutError(is.T(), VMclient, "sudo apt-get install %s datadog-signing-keys -y -q", *flavorName) + }) +} + +func (is *stepByStepSuite) StepByStepRhelTest(VMclient *common.TestClient) { + var arch string + if *architecture == "arm64" { + arch = "aarch64" + } else { + arch = *architecture + } + var yumrepo = fmt.Sprintf("http://yumtesting.datad0g.com/testing/pipeline-%s-a%s/%s/%s/", + os.Getenv("CI_PIPELINE_ID"), *majorVersion, *majorVersion, arch) + fileManager := VMclient.FileManager + var err error + + var protocol = "https" + if is.osVersion < 6 { + protocol = "http" + } + var repogpgcheck = "1" + if is.osVersion < 8.2 { + repogpgcheck = "0" + } + + fileContent := fmt.Sprintf("[datadog]\n"+ + "name = Datadog, Inc.\n"+ + "baseurl = %s\n"+ + "enabled=1\n"+ + "gpgcheck=1\n"+ + "repo_gpgcheck=%s\n"+ + "gpgkey=%s://keys.datadoghq.com/DATADOG_RPM_KEY_CURRENT.public\n"+ + "\t%s://keys.datadoghq.com/DATADOG_RPM_KEY_B01082D3.public\n"+ + "\t%s://keys.datadoghq.com/DATADOG_RPM_KEY_FD4BF915.public\n"+ + "\t%s://keys.datadoghq.com/DATADOG_RPM_KEY_E09422B3.public", + yumrepo, repogpgcheck, protocol, protocol, protocol, protocol) + _, err = fileManager.WriteFile("/etc/yum.repos.d/datadog.repo", fileContent) + require.NoError(is.T(), err) + + is.T().Run("install rhel", func(t *testing.T) { + ExecuteWithoutError(t, VMclient, "sudo yum makecache -y") + ExecuteWithoutError(t, VMclient, "sudo yum install -y %s", *flavorName) + }) +} + +func (is *stepByStepSuite) StepByStepSuseTest(VMclient *common.TestClient) { + var arch string + if *architecture == "arm64" { + arch = "aarch64" + } else { + arch = *architecture + } + + var suseRepo = fmt.Sprintf("http://yumtesting.datad0g.com/suse/testing/pipeline-%s-a%s/%s/%s/", + os.Getenv("CI_PIPELINE_ID"), *majorVersion, *majorVersion, arch) + fileManager := VMclient.FileManager + var err error + + fileContent := fmt.Sprintf("[datadog]\n"+ + "name = Datadog, Inc.\n"+ + "baseurl = %s\n"+ + "enabled=1\n"+ + "gpgcheck=1\n"+ + "repo_gpgcheck=1\n"+ + "gpgkey=https://keys.datadoghq.com/DATADOG_RPM_KEY_CURRENT.public\n"+ + " https://keys.datadoghq.com/DATADOG_RPM_KEY_B01082D3.public\n"+ + " https://keys.datadoghq.com/DATADOG_RPM_KEY_FD4BF915.public\n"+ + " https://keys.datadoghq.com/DATADOG_RPM_KEY_E09422B3.public\n", + suseRepo) + _, err = fileManager.WriteFile("/etc/zypp/repos.d/datadog.repo", fileContent) + require.NoError(is.T(), err) + + is.T().Run("install suse", func(t *testing.T) { + ExecuteWithoutError(t, VMclient, "sudo curl -o /tmp/DATADOG_RPM_KEY_CURRENT.public https://keys.datadoghq.com/DATADOG_RPM_KEY_CURRENT.public") + ExecuteWithoutError(t, VMclient, "sudo rpm --import /tmp/DATADOG_RPM_KEY_CURRENT.public") + ExecuteWithoutError(t, VMclient, "sudo curl -o /tmp/DATADOG_RPM_KEY_B01082D3.public https://keys.datadoghq.com/DATADOG_RPM_KEY_B01082D3.public") + ExecuteWithoutError(t, VMclient, "sudo rpm --import /tmp/DATADOG_RPM_KEY_B01082D3.public") + ExecuteWithoutError(t, VMclient, "sudo curl -o /tmp/DATADOG_RPM_KEY_FD4BF915.public https://keys.datadoghq.com/DATADOG_RPM_KEY_FD4BF915.public") + ExecuteWithoutError(t, VMclient, "sudo rpm --import /tmp/DATADOG_RPM_KEY_FD4BF915.public") + ExecuteWithoutError(t, VMclient, "sudo curl -o /tmp/DATADOG_RPM_KEY_E09422B3.public https://keys.datadoghq.com/DATADOG_RPM_KEY_E09422B3.public") + ExecuteWithoutError(t, VMclient, "sudo rpm --import /tmp/DATADOG_RPM_KEY_E09422B3.public") + ExecuteWithoutError(t, VMclient, "sudo zypper --non-interactive --no-gpg-checks refresh datadog") + ExecuteWithoutError(t, VMclient, "sudo zypper --non-interactive install %s", *flavorName) + }) +} From 354fa40795e932f915b0b015a94b3162525d7a46 Mon Sep 17 00:00:00 2001 From: Guy Arbitman Date: Tue, 19 Dec 2023 10:36:40 +0200 Subject: [PATCH 45/66] usm: http2: Change frame idx and count to u16 (#21641) The change will allow us to filter and process more than 255 frames. --- pkg/network/ebpf/c/protocols/http2/decoding-defs.h | 4 ++-- pkg/network/ebpf/c/protocols/http2/decoding.h | 10 ++++------ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/pkg/network/ebpf/c/protocols/http2/decoding-defs.h b/pkg/network/ebpf/c/protocols/http2/decoding-defs.h index ede2777ab5d7cf..e7be1e6f2da499 100644 --- a/pkg/network/ebpf/c/protocols/http2/decoding-defs.h +++ b/pkg/network/ebpf/c/protocols/http2/decoding-defs.h @@ -136,9 +136,9 @@ typedef struct { } http2_frame_with_offset; typedef struct { + __u16 iteration; + __u16 frames_count; http2_frame_with_offset frames_array[HTTP2_MAX_FRAMES_ITERATIONS] __attribute__((aligned(8))); - __u8 iteration; - __u8 frames_count; } http2_tail_call_state_t; typedef struct { diff --git a/pkg/network/ebpf/c/protocols/http2/decoding.h b/pkg/network/ebpf/c/protocols/http2/decoding.h index 4fcabfe89cf5d2..b3bb3da46c99a7 100644 --- a/pkg/network/ebpf/c/protocols/http2/decoding.h +++ b/pkg/network/ebpf/c/protocols/http2/decoding.h @@ -550,7 +550,7 @@ static __always_inline bool get_first_frame(struct __sk_buff *skb, skb_info_t *s // - HEADERS frames // - RST_STREAM frames // - DATA frames with the END_STREAM flag set -static __always_inline __u8 find_relevant_frames(struct __sk_buff *skb, skb_info_t *skb_info, http2_frame_with_offset *frames_array, __u8 original_index, http2_telemetry_t *http2_tel) { +static __always_inline __u16 find_relevant_frames(struct __sk_buff *skb, skb_info_t *skb_info, http2_frame_with_offset *frames_array, __u8 original_index, http2_telemetry_t *http2_tel) { bool is_headers_or_rst_frame, is_data_end_of_stream; struct http2_frame current_frame = {}; @@ -559,7 +559,7 @@ static __always_inline __u8 find_relevant_frames(struct __sk_buff *skb, skb_info // interesting_frame_index to original_index directly, as this will confuse // the verifier, leading it into thinking the index could have an arbitrary // value. - __u8 interesting_frame_index = original_index == 1; + __u16 interesting_frame_index = original_index == 1; __u32 iteration = 0; #pragma unroll(HTTP2_MAX_FRAMES_TO_FILTER) @@ -714,9 +714,7 @@ int socket__http2_filter(struct __sk_buff *skb) { // We have a remainder new_frame_state.remainder = local_skb_info.data_off - local_skb_info.data_end; bpf_map_update_elem(&http2_remainder, &dispatcher_args_copy.tup, &new_frame_state, BPF_ANY); - } - - if (local_skb_info.data_off < local_skb_info.data_end && local_skb_info.data_off + HTTP2_FRAME_HEADER_SIZE > local_skb_info.data_end) { + } else if (local_skb_info.data_off < local_skb_info.data_end && local_skb_info.data_off + HTTP2_FRAME_HEADER_SIZE > local_skb_info.data_end) { // We have a frame header remainder new_frame_state.remainder = HTTP2_FRAME_HEADER_SIZE - (local_skb_info.data_end - local_skb_info.data_off); bpf_memset(new_frame_state.buf, 0, HTTP2_FRAME_HEADER_SIZE); @@ -785,7 +783,7 @@ int socket__http2_frames_parser(struct __sk_buff *skb) { http2_ctx->dynamic_index.tup = dispatcher_args_copy.tup; #pragma unroll(HTTP2_FRAMES_PER_TAIL_CALL) - for (__u8 index = 0; index < HTTP2_FRAMES_PER_TAIL_CALL; index++) { + for (__u16 index = 0; index < HTTP2_FRAMES_PER_TAIL_CALL; index++) { if (tail_call_state->iteration >= HTTP2_MAX_FRAMES_ITERATIONS) { break; } From 570216e278cdb7fc668b1df49292acc507063ece Mon Sep 17 00:00:00 2001 From: pducolin <45568537+pducolin@users.noreply.github.com> Date: Tue, 19 Dec 2023 10:53:30 +0100 Subject: [PATCH 46/66] [e2e] migrate snmp testsuite (#21505) [e2e] migrate snmp testsuite --- .../tests/ndm/snmp/compose/snmpCompose.yaml | 2 - .../ndm/snmp/{snmpTestEnv.go => snmp_test.go} | 105 +++++++++--------- 2 files changed, 51 insertions(+), 56 deletions(-) rename test/new-e2e/tests/ndm/snmp/{snmpTestEnv.go => snmp_test.go} (61%) diff --git a/test/new-e2e/tests/ndm/snmp/compose/snmpCompose.yaml b/test/new-e2e/tests/ndm/snmp/compose/snmpCompose.yaml index 07aca8b4aaf1ab..0a65323914335b 100644 --- a/test/new-e2e/tests/ndm/snmp/compose/snmpCompose.yaml +++ b/test/new-e2e/tests/ndm/snmp/compose/snmpCompose.yaml @@ -11,5 +11,3 @@ services: agent: volumes: - ${CONFIG_DIR}/snmp.yaml:/etc/datadog-agent/conf.d/snmp.d/snmp.yaml - container_name: dd-agent - diff --git a/test/new-e2e/tests/ndm/snmp/snmpTestEnv.go b/test/new-e2e/tests/ndm/snmp/snmp_test.go similarity index 61% rename from test/new-e2e/tests/ndm/snmp/snmpTestEnv.go rename to test/new-e2e/tests/ndm/snmp/snmp_test.go index 449b3decb9dcc2..c699d6755fa75c 100644 --- a/test/new-e2e/tests/ndm/snmp/snmpTestEnv.go +++ b/test/new-e2e/tests/ndm/snmp/snmp_test.go @@ -7,30 +7,24 @@ package snmp import ( - "context" "embed" - "errors" "path" + "testing" + "time" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" "github.com/DataDog/test-infra-definitions/components/datadog/agent" "github.com/DataDog/test-infra-definitions/components/datadog/dockeragentparams" "github.com/DataDog/test-infra-definitions/scenarios/aws" + "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2params" "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2vm" + "github.com/stretchr/testify/assert" - "github.com/pulumi/pulumi/sdk/v3/go/auto" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) -// TestEnv implements a test environment for NDM. Deprecated, should port to TestSuite -type TestEnv struct { - context context.Context - name string - - InstanceIP string - StackOutput auto.UpResult -} - //go:embed compose/snmpCompose.yaml var snmpCompose string @@ -41,96 +35,79 @@ const ( composeDataPath = "compose/data" ) -// NewTestEnv creates a new test environment for NDM. Deprecated, should port to NDM -func NewTestEnv() (*TestEnv, error) { - snmpTestEnv := &TestEnv{ - context: context.Background(), - name: "snmp-agent", - } - - stackManager := infra.GetStackManager() - - _, upResult, err := stackManager.GetStack(snmpTestEnv.context, snmpTestEnv.name, nil, func(ctx *pulumi.Context) error { +// snmpDockerStackDef defines a stack with a docker agent on an AmazonLinuxDockerOS VM +// with snmpsim installed and configured with snmp recordings +func snmpDockerStackDef() *e2e.StackDefinition[e2e.DockerEnv] { + return e2e.EnvFactoryStackDef(func(ctx *pulumi.Context) (*e2e.DockerEnv, error) { // setup VM - vm, err := ec2vm.NewUnixEc2VM(ctx) + vm, err := ec2vm.NewUnixEc2VM(ctx, ec2params.WithOS(ec2os.AmazonLinuxDockerOS)) if err != nil { - return err + return nil, err } fakeintakeExporter, err := aws.NewEcsFakeintake(vm.GetAwsEnvironment()) if err != nil { - return err + return nil, err } filemanager := vm.GetFileManager() // upload snmpsim data files createDataDirCommand, dataPath, err := filemanager.TempDirectory("data") if err != nil { - return err + return nil, err } dataFiles, err := loadDataFileNames() if err != nil { - return err + return nil, err } + fileCommands := []pulumi.Resource{} for _, fileName := range dataFiles { fileContent, err := dataFolder.ReadFile(path.Join(composeDataPath, fileName)) if err != nil { - return err + return nil, err } dontUseSudo := false fileCommand, err := filemanager.CopyInlineFile(pulumi.String(fileContent), path.Join(dataPath, fileName), dontUseSudo, pulumi.DependsOn([]pulumi.Resource{createDataDirCommand})) if err != nil { - return err + return nil, err } fileCommands = append(fileCommands, fileCommand) } createConfigDirCommand, configPath, err := filemanager.TempDirectory("config") if err != nil { - return err + return nil, err } // edit snmp config file dontUseSudo := false configCommand, err := filemanager.CopyInlineFile(pulumi.String(snmpConfig), path.Join(configPath, "snmp.yaml"), dontUseSudo, pulumi.DependsOn([]pulumi.Resource{createConfigDirCommand})) if err != nil { - return err + return nil, err } // install agent and snmpsim on docker envVars := pulumi.StringMap{"DATA_DIR": pulumi.String(dataPath), "CONFIG_DIR": pulumi.String(configPath)} composeDependencies := []pulumi.Resource{createDataDirCommand, configCommand} composeDependencies = append(composeDependencies, fileCommands...) - _, err = agent.NewDaemon( + docker, err := agent.NewDaemon( vm, dockeragentparams.WithFakeintake(fakeintakeExporter), dockeragentparams.WithExtraComposeManifest("snmpsim", snmpCompose), dockeragentparams.WithEnvironmentVariables(envVars), dockeragentparams.WithPulumiDependsOn(pulumi.DependsOn(composeDependencies)), ) - return err - }, false) - if err != nil { - return nil, err - } - - snmpTestEnv.StackOutput = upResult - - output, found := upResult.Outputs["instance-ip"] - - if !found { - return nil, errors.New("unable to find host ip") - } - snmpTestEnv.InstanceIP = output.Value.(string) - - return snmpTestEnv, nil -} - -// Destroy delete the NDM stack. Deprecated, should port to NDM -func (testEnv *TestEnv) Destroy() error { - return infra.GetStackManager().DeleteStack(testEnv.context, testEnv.name, nil) + if err != nil { + return nil, err + } + return &e2e.DockerEnv{ + Docker: client.NewDocker(docker), + VM: client.NewPulumiStackVM(vm), + Fakeintake: client.NewFakeintake(fakeintakeExporter), + }, nil + }) } //go:embed compose/data @@ -146,3 +123,23 @@ func loadDataFileNames() (out []string, err error) { } return out, nil } + +type snmpDockerSuite struct { + e2e.Suite[e2e.DockerEnv] +} + +// TestSnmpSuite runs the snmp e2e suite +func TestSnmpSuite(t *testing.T) { + e2e.Run(t, &snmpDockerSuite{}, snmpDockerStackDef()) +} + +// TestSnmp tests that the snmpsim container is running and that the agent container +// is sending snmp metrics to the fakeintake +func (s *snmpDockerSuite) TestSnmp() { + fakeintake := s.Env().Fakeintake + s.EventuallyWithT(func(c *assert.CollectT) { + metrics, err := fakeintake.GetMetricNames() + assert.NoError(c, err) + assert.Contains(c, metrics, "snmp.sysUpTimeInstance", "metrics %v doesn't contain snmp.sysUpTimeInstance", metrics) + }, 5*time.Minute, 10*time.Second) +} From c5e99efdb533dd7bf709701f86b8ea2a15bf17d5 Mon Sep 17 00:00:00 2001 From: Vincent Boulineau <58430298+vboulineau@users.noreply.github.com> Date: Tue, 19 Dec 2023 10:59:18 +0100 Subject: [PATCH 47/66] Allow excluding checks from advanced dispatching (#21552) * Allow excluding checks from advanced dispatching * Fix missing assignment --- .../clusterchecks/dispatcher_main.go | 22 ++++++++++++++----- .../clusterchecks/dispatcher_nodes.go | 16 +++++++++++--- .../clusterchecks/dispatcher_rebalance.go | 7 ++---- pkg/config/config.go | 1 + 4 files changed, 32 insertions(+), 14 deletions(-) diff --git a/pkg/clusteragent/clusterchecks/dispatcher_main.go b/pkg/clusteragent/clusterchecks/dispatcher_main.go index 939e4cb26cf396..49acf34dc4093e 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_main.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_main.go @@ -30,12 +30,13 @@ const ( // dispatcher holds the management logic for cluster-checks type dispatcher struct { - store *clusterStore - nodeExpirationSeconds int64 - extraTags []string - clcRunnersClient clusteragent.CLCRunnerClientInterface - advancedDispatching bool - excludedChecks map[string]struct{} + store *clusterStore + nodeExpirationSeconds int64 + extraTags []string + clcRunnersClient clusteragent.CLCRunnerClientInterface + advancedDispatching bool + excludedChecks map[string]struct{} + excludedChecksFromDispatching map[string]struct{} } func newDispatcher() *dispatcher { @@ -54,6 +55,15 @@ func newDispatcher() *dispatcher { } } + excludedChecksFromDispatching := config.Datadog.GetStringSlice("cluster_checks.exclude_checks_from_dispatching") + // This option will almost always be empty + if len(excludedChecksFromDispatching) > 0 { + d.excludedChecksFromDispatching = make(map[string]struct{}, len(excludedChecksFromDispatching)) + for _, checkName := range excludedChecksFromDispatching { + d.excludedChecksFromDispatching[checkName] = struct{}{} + } + } + hname, _ := hostname.Get(context.TODO()) clusterTagValue := clustername.GetClusterName(context.TODO(), hname) clusterTagName := config.Datadog.GetString("cluster_checks.cluster_tag_name") diff --git a/pkg/clusteragent/clusterchecks/dispatcher_nodes.go b/pkg/clusteragent/clusterchecks/dispatcher_nodes.go index ae65e9c6aca451..f4ebda60a807d8 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_nodes.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_nodes.go @@ -216,15 +216,25 @@ func (d *dispatcher) updateRunnersStats() { continue } node.Lock() - for id, checkStats := range stats { + for idStr, checkStats := range stats { + id := checkid.ID(idStr) + // Stats contain info about all the running checks on a node // Node checks must be filtered from Cluster Checks // so they can be included in calculating node Agent busyness and excluded from rebalancing decisions. - if _, found := d.store.idToDigest[checkid.ID(id)]; found { + if _, found := d.store.idToDigest[id]; found { // Cluster check detected (exists in the Cluster Agent checks store) log.Tracef("Check %s running on node %s is a cluster check", id, node.name) checkStats.IsClusterCheck = true - stats[id] = checkStats + stats[idStr] = checkStats + } + + checkName := checkid.IDToCheckName(id) + if _, found := d.excludedChecksFromDispatching[checkName]; found { + // TODO: We are abusing the IsClusterCheck field to mark checks that should be excluded from rebalancing decisions. + // It behaves the same way as we want to count them in rebalance decisions but we don't want to move them. + checkStats.IsClusterCheck = false + stats[idStr] = checkStats } } node.clcRunnerStats = stats diff --git a/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go b/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go index d1e28bce5fe832..be81bff42fc575 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go @@ -364,15 +364,12 @@ func (d *dispatcher) currentDistribution() checksDistribution { for nodeName, nodeStoreInfo := range d.store.nodes { for checkID, stats := range nodeStoreInfo.clcRunnerStats { - digest, found := d.store.idToDigest[checkid.ID(checkID)] - if !found { // Not a cluster check + if !stats.IsClusterCheck { continue } minCollectionInterval := defaults.DefaultCheckInterval - - conf := d.store.digestToConfig[digest] - + conf := d.store.digestToConfig[d.store.idToDigest[checkid.ID(checkID)]] if len(conf.Instances) > 0 { commonOptions := integration.CommonInstanceConfig{} err := yaml.Unmarshal(conf.Instances[0], &commonOptions) diff --git a/pkg/config/config.go b/pkg/config/config.go index eb4e8b8b8c5b8b..23f98d399f6be1 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -1010,6 +1010,7 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("cluster_checks.rebalance_min_percentage_improvement", 10) // Experimental. Subject to change. Rebalance only if the distribution found improves the current one by this. config.BindEnvAndSetDefault("cluster_checks.clc_runners_port", 5005) config.BindEnvAndSetDefault("cluster_checks.exclude_checks", []string{}) + config.BindEnvAndSetDefault("cluster_checks.exclude_checks_from_dispatching", []string{}) // Cluster check runner config.BindEnvAndSetDefault("clc_runner_enabled", false) From afb43cea8114403b99a2fadd355d40e19cd5f7af Mon Sep 17 00:00:00 2001 From: Sylvain Afchain Date: Tue, 19 Dec 2023 11:12:12 +0100 Subject: [PATCH 48/66] [CWS] add ability to filter rules based on envs (#21603) --- pkg/security/rules/engine.go | 2 ++ pkg/security/rules/rule_filters_model.go | 2 +- pkg/security/rules/rule_filters_model_linux.go | 8 ++++++++ pkg/security/rules/rule_filters_model_other.go | 10 ++++++++++ 4 files changed, 21 insertions(+), 1 deletion(-) diff --git a/pkg/security/rules/engine.go b/pkg/security/rules/engine.go index c9cbca6dff07cb..273cd3eefa8180 100644 --- a/pkg/security/rules/engine.go +++ b/pkg/security/rules/engine.go @@ -520,6 +520,8 @@ func logLoadingErrors(msg string, m *multierror.Error) { } else { seclog.Warnf(msg, rErr.Error()) } + } else { + seclog.Errorf(msg, err.Error()) } } } diff --git a/pkg/security/rules/rule_filters_model.go b/pkg/security/rules/rule_filters_model.go index 5a76c2db7385e5..a8b1018170b5e9 100644 --- a/pkg/security/rules/rule_filters_model.go +++ b/pkg/security/rules/rule_filters_model.go @@ -31,7 +31,7 @@ func (e *RuleFilterEvent) GetFieldType(field eval.Field) (reflect.Kind, error) { case "kernel.version.major", "kernel.version.minor", "kernel.version.patch", "kernel.version.abi": return reflect.Int, nil case "kernel.version.flavor", - "os", "os.id", "os.platform_id", "os.version_id": + "os", "os.id", "os.platform_id", "os.version_id", "envs": return reflect.String, nil case "os.is_amazon_linux", "os.is_cos", "os.is_debian", "os.is_oracle", "os.is_rhel", "os.is_rhel7", "os.is_rhel8", "os.is_sles", "os.is_sles12", "os.is_sles15": diff --git a/pkg/security/rules/rule_filters_model_linux.go b/pkg/security/rules/rule_filters_model_linux.go index f3fd957826babb..1a097d4f10c12b 100644 --- a/pkg/security/rules/rule_filters_model_linux.go +++ b/pkg/security/rules/rule_filters_model_linux.go @@ -9,6 +9,7 @@ package rules import ( + "os" "runtime" "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel" @@ -174,6 +175,11 @@ func (m *RuleFilterModel) GetEvaluator(field eval.Field, _ eval.RegisterID) (eva EvalFnc: func(ctx *eval.Context) bool { return ctx.Event.(*RuleFilterEvent).IsSuse15Kernel() }, Field: field, }, nil + case "envs": + return &eval.StringArrayEvaluator{ + Values: os.Environ(), + Field: field, + }, nil } return nil, &eval.ErrFieldNotFound{Field: field} @@ -237,6 +243,8 @@ func (e *RuleFilterEvent) GetFieldValue(field eval.Field) (interface{}, error) { return e.IsSuse12Kernel(), nil case "os.is_sles15": return e.IsSuse15Kernel(), nil + case "envs": + return os.Environ(), nil } return nil, &eval.ErrFieldNotFound{Field: field} diff --git a/pkg/security/rules/rule_filters_model_other.go b/pkg/security/rules/rule_filters_model_other.go index d58cef58b2055f..00d3f318858f4b 100644 --- a/pkg/security/rules/rule_filters_model_other.go +++ b/pkg/security/rules/rule_filters_model_other.go @@ -9,6 +9,7 @@ package rules import ( + "os" "runtime" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" @@ -59,6 +60,12 @@ func (m *RuleFilterModel) GetEvaluator(field eval.Field, _ eval.RegisterID) (eva Value: false, Field: field, }, nil + + case "envs": + return &eval.StringArrayEvaluator{ + Values: os.Environ(), + Field: field, + }, nil } return nil, &eval.ErrFieldNotFound{Field: field} @@ -79,6 +86,9 @@ func (e *RuleFilterEvent) GetFieldValue(field eval.Field) (interface{}, error) { case "os.is_amazon_linux", "os.is_cos", "os.is_debian", "os.is_oracle", "os.is_rhel", "os.is_rhel7", "os.is_rhel8", "os.is_sles", "os.is_sles12", "os.is_sles15": return false, nil + + case "envs": + return os.Environ(), nil } return nil, &eval.ErrFieldNotFound{Field: field} From 2c005bb6a5a7371ee3835572484f63c2b521ca38 Mon Sep 17 00:00:00 2001 From: Vincent Boulineau <58430298+vboulineau@users.noreply.github.com> Date: Tue, 19 Dec 2023 11:19:50 +0100 Subject: [PATCH 49/66] Bump s6-overlay to latest v2 version (#21065) --- Dockerfiles/agent/Dockerfile | 2 +- Dockerfiles/agent/s6.amd64.sha256 | 2 +- Dockerfiles/agent/s6.arm64.sha256 | 2 +- .../notes/bump-s6-overlay-a4fa7ecd845fe093.yaml | 3 +++ tasks/agent.py | 14 +++++++++----- 5 files changed, 15 insertions(+), 8 deletions(-) create mode 100644 releasenotes/notes/bump-s6-overlay-a4fa7ecd845fe093.yaml diff --git a/Dockerfiles/agent/Dockerfile b/Dockerfiles/agent/Dockerfile index 4a97d00786b20d..20e748987300a5 100644 --- a/Dockerfiles/agent/Dockerfile +++ b/Dockerfiles/agent/Dockerfile @@ -46,7 +46,7 @@ COPY datadog-agent*_$TARGETARCH.deb / WORKDIR /output # Get s6-overlay -ENV S6_VERSION v1.22.1.0 +ENV S6_VERSION v2.2.0.3 ENV JUST_CONTAINERS_DOWNLOAD_LOCATION=${GENERAL_ARTIFACTS_CACHE_BUCKET_URL:+${GENERAL_ARTIFACTS_CACHE_BUCKET_URL}/s6-overlay} ENV JUST_CONTAINERS_DOWNLOAD_LOCATION=${JUST_CONTAINERS_DOWNLOAD_LOCATION:-https://github.com/just-containers/s6-overlay/releases/download} RUN apt install --no-install-recommends -y curl ca-certificates diff --git a/Dockerfiles/agent/s6.amd64.sha256 b/Dockerfiles/agent/s6.amd64.sha256 index c1ea1fd0816dd6..117fcd5d3485e9 100644 --- a/Dockerfiles/agent/s6.amd64.sha256 +++ b/Dockerfiles/agent/s6.amd64.sha256 @@ -1 +1 @@ -73f9779203310ddf9c5132546a1978e1a2b05990263b92ed2c34c1e258e2df6c \ No newline at end of file +a7076cf205b331e9f8479bbb09d9df77dbb5cd8f7d12e9b74920902e0c16dd98 \ No newline at end of file diff --git a/Dockerfiles/agent/s6.arm64.sha256 b/Dockerfiles/agent/s6.arm64.sha256 index 9c82a38fb8be18..5dace0033135cd 100644 --- a/Dockerfiles/agent/s6.arm64.sha256 +++ b/Dockerfiles/agent/s6.arm64.sha256 @@ -1 +1 @@ -4eac8bfebdb004eaa0b5ff6a09eb0b24e308cc0d7f37912ab19d3d063be3279c \ No newline at end of file +84f585a100b610124bb80e441ef2dc2d68ac2c345fd393d75a6293e0951ccfc5 \ No newline at end of file diff --git a/releasenotes/notes/bump-s6-overlay-a4fa7ecd845fe093.yaml b/releasenotes/notes/bump-s6-overlay-a4fa7ecd845fe093.yaml new file mode 100644 index 00000000000000..5f89404e5666b9 --- /dev/null +++ b/releasenotes/notes/bump-s6-overlay-a4fa7ecd845fe093.yaml @@ -0,0 +1,3 @@ +other: + - | + Update s6-overlay version used in Datadog Agent container images to v2.2.0.3 diff --git a/tasks/agent.py b/tasks/agent.py index e2d358e1b2e7d4..2a463240c43d62 100644 --- a/tasks/agent.py +++ b/tasks/agent.py @@ -17,7 +17,6 @@ from invoke.exceptions import Exit, ParseError from .build_tags import filter_incompatible_tags, get_build_tags, get_default_build_tags -from .docker_tasks import pull_base_images from .flavor import AgentFlavor from .go import deps from .process_agent import build as process_agent_build @@ -314,7 +313,7 @@ def system_tests(_): @task -def image_build(ctx, arch='amd64', base_dir="omnibus", python_version="2", skip_tests=False, signed_pull=True): +def image_build(ctx, arch='amd64', base_dir="omnibus", python_version="2", skip_tests=False, tag=None, push=False): """ Build the docker image """ @@ -336,9 +335,11 @@ def image_build(ctx, arch='amd64', base_dir="omnibus", python_version="2", skip_ raise Exit(code=1) latest_file = max(list_of_files, key=os.path.getctime) shutil.copy2(latest_file, build_context) - # Pull base image with content trust enabled - pull_base_images(ctx, dockerfile_path, signed_pull) - common_build_opts = f"-t {AGENT_TAG} -f {dockerfile_path}" + + if tag is None: + tag = AGENT_TAG + + common_build_opts = f"-t {tag} -f {dockerfile_path}" if python_version not in BOTH_VERSIONS: common_build_opts = f"{common_build_opts} --build-arg PYTHON_VERSION={python_version}" @@ -348,6 +349,9 @@ def image_build(ctx, arch='amd64', base_dir="omnibus", python_version="2", skip_ # Build with the release target ctx.run(f"docker build {common_build_opts} --platform linux/{arch} --target release {build_context}") + if push: + ctx.run(f"docker push {tag}") + ctx.run(f"rm {build_context}/{deb_glob}") From bc73e977f43ac53dbdfab9ad87ca5a915b706eae Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Tue, 19 Dec 2023 11:37:12 +0100 Subject: [PATCH 50/66] Remove verbose from gotestsum for intsall script test for readability (#21620) Remove verbose from gotestsum for intsall script test for readability --- .gitlab/new-e2e_common/testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab/new-e2e_common/testing.yml b/.gitlab/new-e2e_common/testing.yml index 08f4951ef25067..4370f4e22c7217 100644 --- a/.gitlab/new-e2e_common/testing.yml +++ b/.gitlab/new-e2e_common/testing.yml @@ -14,7 +14,7 @@ variables: TARGETS: ./tests/agent-platform/install-script TEAM: agent-platform - EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --cws-supported-osversion $E2E_CWS_SUPPORTED_OSVERS --major-version $AGENT_MAJOR_VERSION --arch $E2E_ARCH --flavor $FLAVOR + EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --cws-supported-osversion $E2E_CWS_SUPPORTED_OSVERS --major-version $AGENT_MAJOR_VERSION --arch $E2E_ARCH --flavor $FLAVOR --no-verbose .new-e2e_step_by_step: variables: From 389b90c90e161c3d0a0c9d2c12a1b457a628e566 Mon Sep 17 00:00:00 2001 From: Julien Lebot Date: Tue, 19 Dec 2023 11:58:58 +0100 Subject: [PATCH 51/66] Remove metrics pattern from Windows Registry config (#21570) --- .../dist/conf.d/windows_registry.d/conf.yaml.example | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/cmd/agent/dist/conf.d/windows_registry.d/conf.yaml.example b/cmd/agent/dist/conf.d/windows_registry.d/conf.yaml.example index 1237b66f75e17b..a66f53940b1536 100644 --- a/cmd/agent/dist/conf.d/windows_registry.d/conf.yaml.example +++ b/cmd/agent/dist/conf.d/windows_registry.d/conf.yaml.example @@ -145,14 +145,3 @@ instances: ## This is useful for cluster-level checks. # # empty_default_hostname: false - - ## @param metric_patterns - mapping - optional - ## A mapping of metrics to include or exclude, with each entry being a regular expression. - ## - ## Metrics defined in `exclude` will take precedence in case of overlap. - # - # metric_patterns: - # include: - # - - # exclude: - # - From 61fd33fafc0aae72f92a53d168b9cf52d6446809 Mon Sep 17 00:00:00 2001 From: Maxime mouial Date: Fri, 8 Dec 2023 11:28:45 +0100 Subject: [PATCH 52/66] Adding notification system to the configuration --- pkg/config/model/types.go | 9 +++++++++ pkg/config/model/viper.go | 15 +++++++++++++++ pkg/config/model/viper_test.go | 19 +++++++++++++++++++ 3 files changed, 43 insertions(+) diff --git a/pkg/config/model/types.go b/pkg/config/model/types.go index a5349de7b66556..15dd503af971e8 100644 --- a/pkg/config/model/types.go +++ b/pkg/config/model/types.go @@ -22,6 +22,11 @@ type Proxy struct { NoProxy []string `mapstructure:"no_proxy"` } +// NotificationReceiver represents the callback type to receive notifications each time the `Set` method is called. The +// configuration will call each NotificationReceiver registered through the 'OnUpdate' method, therefore +// 'NotificationReceiver' should not be blocking. +type NotificationReceiver func(key string) + // Reader is a subset of Config that only allows reading of configuration type Reader interface { Get(key string) interface{} @@ -77,6 +82,10 @@ type Reader interface { // Object returns Reader to config (completes config.Component interface) Object() Reader + + // OnUpdate adds a callback to the list receivers to be called each time a value is change in the configuration + // by a call to the 'Set' method. The configuration will sequentially call each receiver. + OnUpdate(callback NotificationReceiver) } // Writer is a subset of Config that only allows writing the configuration diff --git a/pkg/config/model/viper.go b/pkg/config/model/viper.go index 4dac9baba20e60..e0eae02686420e 100644 --- a/pkg/config/model/viper.go +++ b/pkg/config/model/viper.go @@ -63,6 +63,8 @@ type safeConfig struct { envPrefix string envKeyReplacer *strings.Replacer + notificationReceivers []NotificationReceiver + // Proxy settings proxiesOnce sync.Once proxies *Proxy @@ -72,6 +74,14 @@ type safeConfig struct { configEnvVars map[string]struct{} } +// OnUpdate adds a callback to the list receivers to be called each time a value is change in the configuration +// by a call to the 'Set' method. +func (c *safeConfig) OnUpdate(callback NotificationReceiver) { + c.Lock() + defer c.Unlock() + c.notificationReceivers = append(c.notificationReceivers, callback) +} + // Set wraps Viper for concurrent access func (c *safeConfig) Set(key string, value interface{}, source Source) { if source == SourceDefault { @@ -83,6 +93,11 @@ func (c *safeConfig) Set(key string, value interface{}, source Source) { defer c.Unlock() c.configSources[source].Set(key, value) c.mergeViperInstances(key) + + // notifying all receiver about the updated setting + for _, receiver := range c.notificationReceivers { + receiver(key) + } } // SetWithoutSource sets the given value using source Unknown diff --git a/pkg/config/model/viper_test.go b/pkg/config/model/viper_test.go index 1c3b4c93fe2e6b..073d2e5a6c41cb 100644 --- a/pkg/config/model/viper_test.go +++ b/pkg/config/model/viper_test.go @@ -262,3 +262,22 @@ foo: bar assert.Equal(t, SourceFile, config.GetSource("foo")) assert.Equal(t, map[string]interface{}{"foo": "bar"}, config.AllSourceSettingsWithoutDefault(SourceFile)) } + +func TestNotification(t *testing.T) { + config := NewConfig("test", "DD", strings.NewReplacer(".", "_")) + + updatedKeyCB1 := []string{} + updatedKeyCB2 := []string{} + + config.OnUpdate(func(key string) { updatedKeyCB1 = append(updatedKeyCB1, key) }) + + config.Set("foo", "bar", SourceFile) + assert.Equal(t, []string{"foo"}, updatedKeyCB1) + + config.OnUpdate(func(key string) { updatedKeyCB2 = append(updatedKeyCB2, key) }) + + config.Set("foo", "bar2", SourceFile) + config.Set("foo2", "bar2", SourceFile) + assert.Equal(t, []string{"foo", "foo", "foo2"}, updatedKeyCB1) + assert.Equal(t, []string{"foo", "foo2"}, updatedKeyCB2) +} From ccfbc9ce1e5e6322b5fa9f187e2cdda32aa695da Mon Sep 17 00:00:00 2001 From: Maxime mouial Date: Fri, 8 Dec 2023 13:40:05 +0100 Subject: [PATCH 53/66] Leveraging the configuration notifications to refresh inventoryagent We now use the new notification system to refresh the inventoryagent metadata payload each time the configuration is updated. --- cmd/agent/subcommands/run/command.go | 2 +- cmd/agent/subcommands/run/settings.go | 5 ++--- cmd/cluster-agent/subcommands/start/command.go | 2 +- cmd/security-agent/subcommands/start/command.go | 2 +- comp/metadata/inventoryagent/component.go | 2 -- comp/metadata/inventoryagent/inventoryagent.go | 2 ++ comp/metadata/inventoryagent/inventoryagent_test.go | 9 +++++++++ comp/process/apiserver/apiserver.go | 2 +- pkg/config/settings/runtime_setting_log_level.go | 11 +---------- 9 files changed, 18 insertions(+), 19 deletions(-) diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 78a0082cf21e70..f34601003ba024 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -441,7 +441,7 @@ func startAgent( } // init settings that can be changed at runtime - if err := initRuntimeSettings(serverDebug, invAgent); err != nil { + if err := initRuntimeSettings(serverDebug); err != nil { log.Warnf("Can't initiliaze the runtime settings: %v", err) } diff --git a/cmd/agent/subcommands/run/settings.go b/cmd/agent/subcommands/run/settings.go index 2f5857efc7f969..e63c92cb20e40d 100644 --- a/cmd/agent/subcommands/run/settings.go +++ b/cmd/agent/subcommands/run/settings.go @@ -8,14 +8,13 @@ package run import ( "github.com/DataDog/datadog-agent/cmd/agent/subcommands/run/internal/settings" dogstatsddebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" - "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" ) // initRuntimeSettings builds the map of runtime settings configurable at runtime. -func initRuntimeSettings(serverDebug dogstatsddebug.Component, invAgent inventoryagent.Component) error { +func initRuntimeSettings(serverDebug dogstatsddebug.Component) error { // Runtime-editable settings must be registered here to dynamically populate command-line information - if err := commonsettings.RegisterRuntimeSetting(commonsettings.NewLogLevelRuntimeSetting(invAgent)); err != nil { + if err := commonsettings.RegisterRuntimeSetting(commonsettings.NewLogLevelRuntimeSetting()); err != nil { return err } if err := commonsettings.RegisterRuntimeSetting(commonsettings.NewRuntimeMutexProfileFraction()); err != nil { diff --git a/cmd/cluster-agent/subcommands/start/command.go b/cmd/cluster-agent/subcommands/start/command.go index fe38311c2e1222..2ef986eaf06d84 100644 --- a/cmd/cluster-agent/subcommands/start/command.go +++ b/cmd/cluster-agent/subcommands/start/command.go @@ -419,7 +419,7 @@ func start(log log.Component, config config.Component, telemetry telemetry.Compo // initRuntimeSettings builds the map of runtime Cluster Agent settings configurable at runtime. func initRuntimeSettings() error { - if err := commonsettings.RegisterRuntimeSetting(commonsettings.NewLogLevelRuntimeSetting(nil)); err != nil { + if err := commonsettings.RegisterRuntimeSetting(commonsettings.NewLogLevelRuntimeSetting()); err != nil { return err } diff --git a/cmd/security-agent/subcommands/start/command.go b/cmd/security-agent/subcommands/start/command.go index 23eca06e719fcf..6474059d1444a2 100644 --- a/cmd/security-agent/subcommands/start/command.go +++ b/cmd/security-agent/subcommands/start/command.go @@ -308,7 +308,7 @@ func RunAgent(ctx context.Context, log log.Component, config config.Component, s } func initRuntimeSettings() error { - return settings.RegisterRuntimeSetting(settings.NewLogLevelRuntimeSetting(nil)) + return settings.RegisterRuntimeSetting(settings.NewLogLevelRuntimeSetting()) } // StopAgent stops the API server and clean up resources diff --git a/comp/metadata/inventoryagent/component.go b/comp/metadata/inventoryagent/component.go index 96510ef56f787b..c93ebafbd737fb 100644 --- a/comp/metadata/inventoryagent/component.go +++ b/comp/metadata/inventoryagent/component.go @@ -22,8 +22,6 @@ type Component interface { GetAsJSON() ([]byte, error) // Get returns a copy of the agent metadata. Useful to be incorporated in the status page. Get() map[string]interface{} - // Refresh trigger a new payload to be send while still respecting the minimal interval between two updates. - Refresh() } // Module defines the fx options for this component. diff --git a/comp/metadata/inventoryagent/inventoryagent.go b/comp/metadata/inventoryagent/inventoryagent.go index 17e16d6de41ef9..795cf43366d9a6 100644 --- a/comp/metadata/inventoryagent/inventoryagent.go +++ b/comp/metadata/inventoryagent/inventoryagent.go @@ -95,6 +95,8 @@ func newInventoryAgentProvider(deps dependencies) provides { if ia.Enabled { ia.initData() + // We want to be notified when the configuration is updated + deps.Config.OnUpdate(func(_ string) { ia.Refresh() }) } return provides{ diff --git a/comp/metadata/inventoryagent/inventoryagent_test.go b/comp/metadata/inventoryagent/inventoryagent_test.go index c0e92af3ff06ba..18e3b6cce3cde0 100644 --- a/comp/metadata/inventoryagent/inventoryagent_test.go +++ b/comp/metadata/inventoryagent/inventoryagent_test.go @@ -15,6 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -232,3 +233,11 @@ func TestFlareProviderFilename(t *testing.T) { ia := getTestInventoryPayload(t, nil) assert.Equal(t, "agent.json", ia.FlareFileName) } + +func TestConfigRefresh(t *testing.T) { + ia := getTestInventoryPayload(t, nil) + + assert.False(t, ia.ForceRefresh) + pkgconfig.Datadog.Set("inventories_max_interval", 10*time.Minute, pkgconfigmodel.SourceAgentRuntime) + assert.True(t, ia.ForceRefresh) +} diff --git a/comp/process/apiserver/apiserver.go b/comp/process/apiserver/apiserver.go index ea28657e60cbfb..09247498fafe48 100644 --- a/comp/process/apiserver/apiserver.go +++ b/comp/process/apiserver/apiserver.go @@ -88,7 +88,7 @@ func newApiServer(deps dependencies) Component { func initRuntimeSettings(logger log.Component) { // NOTE: Any settings you want to register should simply be added here processRuntimeSettings := []settings.RuntimeSetting{ - settings.NewLogLevelRuntimeSetting(nil), + settings.NewLogLevelRuntimeSetting(), settings.NewRuntimeMutexProfileFraction(), settings.NewRuntimeBlockProfileRate(), settings.NewProfilingGoroutines(), diff --git a/pkg/config/settings/runtime_setting_log_level.go b/pkg/config/settings/runtime_setting_log_level.go index 99532cdd8e96f5..cf5103ad1ac3d1 100644 --- a/pkg/config/settings/runtime_setting_log_level.go +++ b/pkg/config/settings/runtime_setting_log_level.go @@ -6,7 +6,6 @@ package settings import ( - "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/pkg/config" pkgconfiglogs "github.com/DataDog/datadog-agent/pkg/config/logs" "github.com/DataDog/datadog-agent/pkg/config/model" @@ -17,16 +16,12 @@ import ( type LogLevelRuntimeSetting struct { Config config.ReaderWriter ConfigKey string - // invAgent is a temporary dependency until the configuration is capable of sending it's own notification upon - // a value being set. - invAgent inventoryagent.Component } // NewLogLevelRuntimeSetting returns a new LogLevelRuntimeSetting -func NewLogLevelRuntimeSetting(invAgent inventoryagent.Component) *LogLevelRuntimeSetting { +func NewLogLevelRuntimeSetting() *LogLevelRuntimeSetting { return &LogLevelRuntimeSetting{ ConfigKey: "log_level", - invAgent: invAgent, } } @@ -72,9 +67,5 @@ func (l *LogLevelRuntimeSetting) Set(v interface{}, source model.Source) error { cfg = l.Config } cfg.Set(key, level, source) - // we trigger a new inventory metadata payload since the configuration was updated by the user. - if l.invAgent != nil { - l.invAgent.Refresh() - } return nil } From 49c2ab5e7fe3fffe32b17facffb96f96eee50119 Mon Sep 17 00:00:00 2001 From: Vincent Boulineau <58430298+vboulineau@users.noreply.github.com> Date: Tue, 19 Dec 2023 12:34:47 +0100 Subject: [PATCH 54/66] Fix parsing of /proc/mountinfo to retrieve self cgroup ID + fix HostCPUCount value in unit tests (#21529) * Fix parsing of /proc/mountinfo to retrieve self cgroup ID + fix HostCPUCount value in unit tests * Remove ability to mock cpu count --- .../metrics/system/containerid_linux.go | 2 +- .../metrics/system/containerid_linux_test.go | 5 +++++ .../metrics/system/testdata/mountinfo_kind | 1 + pkg/util/system/cpu_mock.go | 17 +++++++++++++++++ pkg/util/system/cpu_test.go | 4 ++++ 5 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 pkg/util/containers/metrics/system/testdata/mountinfo_kind create mode 100644 pkg/util/system/cpu_mock.go diff --git a/pkg/util/containers/metrics/system/containerid_linux.go b/pkg/util/containers/metrics/system/containerid_linux.go index fde087fdbae888..b7c6f3b90a0756 100644 --- a/pkg/util/containers/metrics/system/containerid_linux.go +++ b/pkg/util/containers/metrics/system/containerid_linux.go @@ -17,7 +17,7 @@ import ( const ( selfMountInfoPath = "/proc/self/mountinfo" containerdSandboxPrefix = "sandboxes" - cIDRegexp = `([^\s/]+)/(` + cgroups.ContainerRegexpStr + `)/[\S]*hostname` + cIDRegexp = `.*/([^\s/]+)/(` + cgroups.ContainerRegexpStr + `)/[\S]*hostname` ) var cIDMountInfoRegexp = regexp.MustCompile(cIDRegexp) diff --git a/pkg/util/containers/metrics/system/containerid_linux_test.go b/pkg/util/containers/metrics/system/containerid_linux_test.go index 45dc639ce98f8e..e4647318c9b14a 100644 --- a/pkg/util/containers/metrics/system/containerid_linux_test.go +++ b/pkg/util/containers/metrics/system/containerid_linux_test.go @@ -32,6 +32,11 @@ func TestParseMountinfo(t *testing.T) { filePath: "./testdata/mountinfo_k8s_agent", wantContainerID: "fc7038bc73a8d3850c66ddbfb0b2901afa378bfcbb942cc384b051767e4ac6b0", }, + { + name: "Kind (containerd in docker)", + filePath: "./testdata/mountinfo_kind", + wantContainerID: "", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/util/containers/metrics/system/testdata/mountinfo_kind b/pkg/util/containers/metrics/system/testdata/mountinfo_kind new file mode 100644 index 00000000000000..cfa0ebd86547c0 --- /dev/null +++ b/pkg/util/containers/metrics/system/testdata/mountinfo_kind @@ -0,0 +1 @@ +1258 1249 254:1 /docker/volumes/0919c2d87ec8ba99f3c85fdada5fe26eca73b2fce73a5974d6030f30bf91cbaf/_data/lib/containerd/io.containerd.grpc.v1.cri/sandboxes/ca30bb64884083e29b1dc08a1081dd2df123f13f045dadb64dc346e56c0b6871/hostname /etc/hostname rw,relatime - ext4 /dev/vda1 rw,discard \ No newline at end of file diff --git a/pkg/util/system/cpu_mock.go b/pkg/util/system/cpu_mock.go new file mode 100644 index 00000000000000..92c0794c9fe9b5 --- /dev/null +++ b/pkg/util/system/cpu_mock.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build test + +package system + +const ( + // Arbitrary CPU count used for unit tests + defaultCPUCountUnitTest = 3 +) + +func init() { + hostCPUCount.Store(defaultCPUCountUnitTest) +} diff --git a/pkg/util/system/cpu_test.go b/pkg/util/system/cpu_test.go index 1e3b9396f64b7a..443ee4135c227f 100644 --- a/pkg/util/system/cpu_test.go +++ b/pkg/util/system/cpu_test.go @@ -3,6 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. +//go:build test + package system import ( @@ -32,6 +34,8 @@ func (f *fakeCPUCount) info(context.Context, bool) (int, error) { } func TestHostCPUCount(t *testing.T) { + defer hostCPUCount.Store(defaultCPUCountUnitTest) + f := newFakeCPUCount(10000, nil) assert.Equal(t, f.count, HostCPUCount()) From 8160fd54f9fc1bd8b1754bd92ebdc7c3f5ae0c50 Mon Sep 17 00:00:00 2001 From: Pierre Gimalac Date: Tue, 19 Dec 2023 13:02:33 +0100 Subject: [PATCH 55/66] [ASCII-991] Fix test-infra version in go.mod (#21624) [ASCII-991] Fix test-infra version in go.mod --- test/new-e2e/go.mod | 2 +- test/new-e2e/go.sum | 4 ++-- test/new-e2e/pkg/utils/e2e/stack_definition.go | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 3161179848dbb9..bad446edf98798 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -22,7 +22,7 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20231213130016-281b2a324002 + github.com/DataDog/test-infra-definitions v0.0.0-20231215154848-67d2009bcd81 github.com/aws/aws-sdk-go-v2 v1.23.4 github.com/aws/aws-sdk-go-v2/config v1.25.10 github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.1 diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index 868584692c2074..2c9142957433b8 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -12,8 +12,8 @@ github.com/DataDog/datadog-api-client-go/v2 v2.15.0 h1:5UVON1xs6Lul4d6R5TmLDqqSJ github.com/DataDog/datadog-api-client-go/v2 v2.15.0/go.mod h1:ZG8wS+y2rUmkRDJZQq7Og7EAPFPage+7vXcmuah2I9o= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20231213130016-281b2a324002 h1:INEUQUD0zU5HlAMNb5BVOrAUun5gu3MjNviSIsZ+mmE= -github.com/DataDog/test-infra-definitions v0.0.0-20231213130016-281b2a324002/go.mod h1:pS50ENq41vbF+59otYFA/k2xh4Xar4+ZQSiMgF1vMLQ= +github.com/DataDog/test-infra-definitions v0.0.0-20231215154848-67d2009bcd81 h1:9YV5ebtavZcx+BZBzKs5oIa7JjDGceD6EderNH3c5fE= +github.com/DataDog/test-infra-definitions v0.0.0-20231215154848-67d2009bcd81/go.mod h1:pS50ENq41vbF+59otYFA/k2xh4Xar4+ZQSiMgF1vMLQ= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= diff --git a/test/new-e2e/pkg/utils/e2e/stack_definition.go b/test/new-e2e/pkg/utils/e2e/stack_definition.go index 6a2a7333e14d0b..c5a6655a83d067 100644 --- a/test/new-e2e/pkg/utils/e2e/stack_definition.go +++ b/test/new-e2e/pkg/utils/e2e/stack_definition.go @@ -6,9 +6,6 @@ package e2e import ( - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams" "github.com/DataDog/test-infra-definitions/components/datadog/agent" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" "github.com/DataDog/test-infra-definitions/components/datadog/dockeragentparams" @@ -20,6 +17,10 @@ import ( "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2params" "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2vm" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams" ) // StackDefinition contains a Pulumi stack definition @@ -218,8 +219,7 @@ func FakeIntakeStackDef(options ...func(*AgentStackDefParam) error) *StackDefini } // fakeintakeExporter, err := aws.NewEcsFakeintake(vm.GetAwsEnvironment(), params.fakeintakeParams...) - fakeintakeOptions := append([]fakeintakeparams.Option{fakeintakeparams.WithoutLoadBalancer()}, params.fakeintakeParams...) - fakeintakeExporter, err := aws.NewEcsFakeintake(vm.GetAwsEnvironment(), fakeintakeOptions...) + fakeintakeExporter, err := aws.NewEcsFakeintake(vm.GetAwsEnvironment(), params.fakeintakeParams...) if err != nil { return nil, err From 310ac568c1f1acb47aef9c4d790572dfa2521948 Mon Sep 17 00:00:00 2001 From: Kacper <89013263+kacper-murzyn@users.noreply.github.com> Date: Tue, 19 Dec 2023 13:17:59 +0100 Subject: [PATCH 56/66] 7.50.0 changelog (#21591) (#21643) * CHANGELOG update for 7.49.1 (#20892) * CHANGELOG update for 7.49.1 * Update CHANGELOG.rst * release note format fix * 7.50.0 changelog updates * Update CHANGELOG-DCA.rst --------- Co-authored-by: Heston Hoffman --- CHANGELOG-DCA.rst | 49 +++ CHANGELOG.rst | 292 ++++++++++++++++++ ...-content-allocations-8c6fd48fb63649f1.yaml | 3 +- 3 files changed, 343 insertions(+), 1 deletion(-) diff --git a/CHANGELOG-DCA.rst b/CHANGELOG-DCA.rst index 8cd3a66569e9ec..d1cd7367e7bb12 100644 --- a/CHANGELOG-DCA.rst +++ b/CHANGELOG-DCA.rst @@ -2,6 +2,55 @@ Release Notes ============= +.. _Release Notes_7.50.0: + +7.50.0 / 6.50.0 +====== + +.. _Release Notes_7.50.0_New Features: + +New Features +------------ + +- Add language detection API handler to the cluster-agent. + +- Report `rate_limit_queries_remaining_min` telemetry from `external-metrics` server. + +- Added a new `--force` option to the `datadog-cluster-agent clusterchecks rebalance` command that allows you to force clustercheck rebalancing with utilization. + +- [Beta] Enable `APM` library injection in `cluster-agent` admission controller based on automatic language detection annotations. + + +.. _Release Notes_7.50.0_Enhancement Notes: + +Enhancement Notes +----------------- + +- Show Autodiscovery information in the output of ``datadog-cluster-agent status``. + +- Added CreateContainerConfigError wait reason to the `kubernetes_state.container.status_report.count.waiting` metric + reported by the kubernetes_state_core check. + +- Release the Leader Election Lock on shutdown to make the initialization of future cluster-agents faster. + +- The Datadog cluster-agent container image is now using Ubuntu 23.10 mantic + as the base image. + + +.. _Release Notes_7.50.0_Bug Fixes: + +Bug Fixes +--------- + +- Fixed a bug in the ``kubernetes_state_core`` check that caused tag corruption when ``telemetry`` was set to ``true``. + +- Fix stale metrics being reported by kubernetes_state_core check in some rare cases. + +- Fixed a bug in the rebalancing of cluster checks. Checks that contained + secrets were never rebalanced when the Cluster Agent was configured to not + resolve check secrets (option ``secret_backend_skip_checks`` set to true). + + .. _Release Notes_7.49.0: 7.49.0 / 6.49.0 diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 13d42f23900712..b98dd305a3bf7e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,298 @@ Release Notes ============= +.. _Release Notes_7.50.0: + +7.50.0 / 6.50.0 +====== + +.. _Release Notes_7.50.0_Prelude: + +Prelude +------- + +Release on: 2023-12-18 + +- Please refer to the `7.50.0 tag on integrations-core `_ for the list of changes on the Core Checks + + +.. _Release Notes_7.50.0_Upgrade Notes: + +Upgrade Notes +------------- + +- The `win32_event_log check `_ + has moved from Python `(integrations-core#16108) `_ + to Go `(#20701 )`_. + All ``legacy_mode: false`` configuration options are backwards compatible except for some regular expressions + used in the ``included_messages`` and ``excluded_messages`` options. + For example, Go regular expressions do not support lookahead or lookbehind assertions. If you do not + use these options, then no configuration changes are necessary. + See the `Python regular expression docs `_ and the + `Go regular expression docs `_ for more information on + the supported regular expression syntax. + Set ``legacy_mode_v2: true`` to revert to the Python implementation of the check. The Python implementation + may be removed in a future version of the Agent. + + +.. _Release Notes_7.50.0_New Features: + +New Features +------------ + +- The orchestrator check is moving from the Process Agent to the Node Agent. In the next release, this new check will replace the current pod check in the Process Agent. You can start using this new check now by manually setting the environment variable ``DD_ORCHESTRATOR_EXPLORER_RUN_ON_NODE_AGENT`` to ``true``. + +- Adds the following CPU manager metrics to the kubelet core check: `kubernetes_core.kubelet.cpu_manager.pinning_errors_total`, `kubernetes_core.kubelet.cpu_manager.pinning_requests_total`. + +- Add a diagnosis for connecting to the agent logs endpoints. This is accessible through the ``agent diagnose`` command. + +- Add FIPS mode support for Network Device Monitoring products + +- Added support for collecting Cloud Foundry container names without the Cluster Agent. + +- The Kubernetes State Metrics Core check now collects `kubernetes_state.ingress.tls`. + +- APM: Added a new endpoint tracer_flare/v1/. This endpoint acts as a + proxy to forward HTTP POST request from tracers to the serverless_flare + endpoint, allowing tracer flares to be triggered via remote config, improving + the support experience by automating the collection of logs. + +- CWS: Ability to send a signal to a process when a rule was triggered. + CWS: Add Kubernetes user session context to events, in particular the username, UID and groups of the user that ran the commands remotely. + +- A new rule post action - 'kill' - can now be used to send a specific + signal to a process that caused a rule to be triggered. By default, this + signal is SIGTERM. + + ``` + - id: my_rule + expression: ... + actions: + - kill: + signal: SIGUSR1 + ``` + +- Enable container image collection by default. + +- Enable container lifecycle events collection by default. + This feature helps stopped containers to be cleaned from Datadog faster. + +- [netflow] Allow collecting configurable fields for Netflow V9/IPFIX + +- Add support for Oracle 12.1 and Oracle 11. + +- Add monitoring of Oracle ASM disk groups. + +- Add metrics for monitoring Oracle resource manager. + +- [corechecks/snmp] Load downloaded profiles + +- DBM: Add configuration option to SQL obfuscator to use go-sqllexer package to run SQL obfuscation and normalization + +- Support filtering metrics from endpoint and service checks based + on namespace when the `DD_CONTAINER_EXCLUDE_METRICS` environment + variable is set. + +- The Windows Event Log tailer saves its current position in an event log and + resumes reading from that location when the Agent restarts. This allows + the Agent to collect events created before the Agent starts. + + +.. _Release Notes_7.50.0_Enhancement Notes: + +Enhancement Notes +----------------- + +- [corechecks/snmp] Support symbol modifiers for global metric tags and metadata tags. + +- Update the go-systemd package to the latest version (22.5.0). + +- Added default peer tags for APM stats aggregation which can be enabled through a new flag (`peer_tags_aggregation`). + +- Add a stop timeout to the Windows Agent services. If an Agent service + does not cleanly stop within 15 seconds after receiving a stop command + from the Service Control Manager, the service will hard stop. + The timeout can be configured by setting the DD_WINDOWS_SERVICE_STOP_TIMEOUT_SECONDS + environment variable. + Agent stop timeouts are logged to the Windows Event Log and can be monitored and alerted on. + +- APM: OTLP: Add support for custom container tags via resource attributes prefixed by `datadog.container.tag.*`. + +- Agents are now built with Go ``1.20.11``. + +- CWS: Support for Ubuntu 23.10. + CWS: Reduce memory usage of ring buffer on machines with more than 64 CPU cores. + CSPM: Move away from libapt to run Debian packages compliance checks. + +- DBM: Bump the minimum version of the `go-sqllexer` library to 0.0.7 to support collecting stored procedure names. + +- Add subcommand `diagnose show-metadata gohai` for gohai data + +- Upgraded JMXFetch to ``0.49.0`` which adds some more telemetry + and contains some small fixes. + +- Netflow now supports the `datadog-agent status` command, providing + configuration information. Any configuration errors encountered will be + listed. + +- Emit `database_instance` tag with the value `host/cdb`. The goal is to show each database separately in the DBM entry page. Currently, the backend initializes `database_instance` to `host`. + Also, the Agent will emit the new `db_server` tag because we have to initialize the `host` tag to `host/cdb`. + +- Improve obfuscator formatting. Prevent spaces after parentheses. + Prevent spaces before `#` when `#` is a part of an identifier. + +- Emit query metrics with zero executions to capture long runners spanning over several sampling periods. + +- Impose a time limit on query metrics processing. After exceeding the default limit of 20s, the Agent stops emitting execution plans and fqt events. + +- Add `oracle.inactive_seconds` metric. Add tags with session attributes to `oracle.process_pga*` metrics. + +- Stop override peer.service with other attributes in OTel spans. + +- Process-Agent: Improved parsing performance of the '/proc/pid/stat' file (Linux only) + +- [snmp_listener] Enable ``collect_topology`` by default. + +- dbm: add SQL obfuscation options to give customer more control over how SQL is obfuscated and normalized. + - ``RemoveSpaceBetweenParentheses`` - remove spaces between parentheses. This option is only valid when ``ObfuscationMode`` is ``obfuscate_and_normalize``. + - ``KeepNull` - disable obfuscating null values with ?. This option is only valid when ``ObfuscationMode`` is "obfuscate_only" or ``obfuscate_and_normalize``. + - ``KeepBoolean`` - disable obfuscating boolean values with ?. This option is only valid when ``ObfuscationMode`` is ``obfuscate_only`` or ``obfuscate_and_normalize``. + - ``KeepPositionalParameter`` - disable obfuscating positional parameters with ?. This option is only valid when ``ObfuscationMode`` is ``obfuscate_only`` or ``obfuscate_and_normalize``. + +- Add logic to support multiple tags created by a single label/annotaion. + For example, add the following config to extract tags for chart_name and app_chart_name. + podLabelsAsTags: + chart_name: chart_name, app_chart_name + Note: the format must be a comma-separated list of tags. + +- The logs collection pipeline has been through a refactor to support + processing only the message content (instead of the whole raw message) + in the journald and Windows events tailers. + This feature is experimental and off by default since it changes how + existing `log_processing_rules` behaves with journald and Windows events + tailer. + Note that it will be switched on by default in a future release of the Agent. + A warning notifying about this is shown when the journald and Windows events + tailers are used with some `log_processing_rules`. + +- The Datadog agent container image is now using Ubuntu 23.10 mantic + as the base image. + +- The win32_event_log check now continuously collects and reports events instead of waiting for + ``min_collection_interval`` to collect. + ``min_collection_interval`` now controls how frequently the check attempts to reconnect + when the event subscription is in an error state. + + +.. _Release Notes_7.50.0_Deprecation Notes: + +Deprecation Notes +----------------- + +- Installing the Agent on Windows Server versions lower than 2016 and client versions lower than 10 is now deprecated. + +- The ``timeout`` option for the win32_event_log check is no longer applicable and can be removed. If the option + is set, the check logs a deprecation warning and ignores the option. + + +.. _Release Notes_7.50.0_Security Notes: + +Security Notes +-------------- + +- Fix ``CVE-2023-45283`` and ``CVE-2023-45284`` + +- Update OpenSSL from 3.0.11 to 3.0.12. + This addresses CVE-2023-5363. + + +.. _Release Notes_7.50.0_Bug Fixes: + +Bug Fixes +--------- + +- On Windows, uninstalling the Agent should not fail if the Datadog Agent registry key is missing. + +- APM: OTLP: Only extract DD container tags from resource attributes. Previously, container tags were also extracted from span attributes. + +- APM: OTLP: Only add container tags in tag `_dd.tags.container`. Previously, container tags were also added as span tags. + +- Resolved an issue in the containerd collector where the SBOM collection did not correctly attach RepoTags and RepoDigests to the SBOM payload. + +- Add a workaround for a bug in a Windows API that can cause the Agent to + crash when collecting forwarded events from the Windows Event Log. + +- Resolve the issue with hostname resolution in the kube_apiserver provider when the useHostNetwork setting is enabled. + +- Fix an issue that prevented process ID (PID) from being associated with containers in Live Container View when the Agent is deployed in AWS Fargate. + +- APM: Fixed trace-agent not forwarding errors from remote configuration and reporting them all as 500s + +- On Windows, the `SE_DACL_AUTO_INHERITED` flag is reset on `%PROJECTLOCATION%` during upgrades and uninstalls. + +- Fixes a bug in the Windows NPM driver where NPM displays byte overcounts. + +- For USM on Windows, fixes the problem where paths were being erroneously + reported as truncated + +- Fixes journald log's Seek function to be set at the beginning or end upon initialization. + +- Fixed the cause of some crashes related to CPU instruction + incompatibility happening under certain CPUs when making calls to + the included libgmp library. + +- [kubelet] The Kubelet client no longer fails to initialize when the parameter ``kubelet_tls_verify`` is set to ``false`` with a misconfigured root certificate authority. + +- Fixes a bug where the process-agent process check command would fail to run + when language detection was enabled. + +- Document query metrics `metric_prefix` parameter. + +- Set the tag `dd.internal.resource:database_instance` to `host` instead of `host/cdb`. + +- Switch to the new obfuscator where bugs such as getting an error when obfuscating `@!` and where comments on DMLs weren't being removed are fixed. + +- Fixes wrong values in Oracle query metrics data. Extreme cases had inflated statistics and missing statements. The affected were pure DML and PL/SQL statements. + +- Fix the bug that prevented Oracle DBM working properly on AWS RDS non-multitenant instances. + +- Fix an issue that caused the win32_event_log check to not stop running when the rate of incoming event + records was higher than the ``timeout`` option. The ``timeout`` option is now deprecated. + +- The Windows Event Log tailer automatically recovers and is able to resume collecting + events when a log provider is reinstalled, which sometimes happens during Windows updates. + + +.. _Release Notes_7.49.1: + +7.49.1 / 6.49.1 +====== + +.. _Release Notes_7.49.1_Prelude: + +Prelude +------- + +Release on: 2023-11-15 + +- Please refer to the `7.49.1 tag on integrations-core `_ for the list of changes on the Core Checks + + +.. _Release Notes_7.49.1_Bug Fixes: + +Bug Fixes +--------- + +- CWS: add ``arch`` field into agent context included in CWS events. + +- APM: Fix a deadlock issue which can prevent the trace-agent from shutting down. + +- CWS: Fix the broken lineage check for process activity in CWS. + +- APM: fix a regression in the Trace Agent that caused container tagging + with UDS and cgroup v2 to fail. + + .. _Release Notes_7.49.0: 7.49.0 / 6.49.0 diff --git a/releasenotes/notes/process-agent-improve-parse-stat-content-allocations-8c6fd48fb63649f1.yaml b/releasenotes/notes/process-agent-improve-parse-stat-content-allocations-8c6fd48fb63649f1.yaml index 2c17b344130f60..1b6f312ea10177 100644 --- a/releasenotes/notes/process-agent-improve-parse-stat-content-allocations-8c6fd48fb63649f1.yaml +++ b/releasenotes/notes/process-agent-improve-parse-stat-content-allocations-8c6fd48fb63649f1.yaml @@ -1,3 +1,4 @@ --- enhancements: - - Process-Agent: Improved parsing performance of the '/proc/pid/stat' file (Linux only) + - | + Process-Agent: Improved parsing performance of the '/proc/pid/stat' file (Linux only) From 2587005a84897e93f7d1c76ac265e372d9ab8481 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Tue, 19 Dec 2023 13:30:51 +0100 Subject: [PATCH 57/66] Test case adjustments (#21647) --- pkg/collector/corechecks/oracle-dbm/oracle_test.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/collector/corechecks/oracle-dbm/oracle_test.go b/pkg/collector/corechecks/oracle-dbm/oracle_test.go index b83f978e315ec8..ec5a8e9176e7b7 100644 --- a/pkg/collector/corechecks/oracle-dbm/oracle_test.go +++ b/pkg/collector/corechecks/oracle-dbm/oracle_test.go @@ -146,10 +146,14 @@ func TestChkRun(t *testing.T) { tempLobsBefore, _ := getTemporaryLobs(chk.db) + /* Requires: + * create table sys.t(n number); + * grant insert on sys.t to c##datadog + */ _, err = chk.db.Exec(`begin for i in 1..1000 loop - execute immediate 'insert into t values (' || i || ')'; + execute immediate 'insert into sys.t values (' || i || ')'; end loop; end ;`) assert.NoError(t, err, "error generating statements with %s driver", driver) @@ -301,9 +305,9 @@ func TestObfuscator(t *testing.T) { _, err := o.ObfuscateSQLString(`SELECT TRUNC(SYSDATE@!) from dual`) assert.NoError(t, err, "can't obfuscate @!") - sql := "begin null ; end" + sql := "begin null ; end;" obfuscatedStatement, err := o.ObfuscateSQLString(sql) - assert.Equal(t, sql, obfuscatedStatement.Query) + assert.Equal(t, obfuscatedStatement.Query, "begin null; end;") sql = "select count (*) from dual" obfuscatedStatement, err = o.ObfuscateSQLString(sql) From a3be90c31577da8fde27f1c72e25dee715ad3cd4 Mon Sep 17 00:00:00 2001 From: Guy Arbitman Date: Tue, 19 Dec 2023 14:41:13 +0200 Subject: [PATCH 58/66] allow lower case approval (#21648) allow lower case approval --- tasks/kmt.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tasks/kmt.py b/tasks/kmt.py index 626f5c87619bdc..3f7b3af74a200e 100644 --- a/tasks/kmt.py +++ b/tasks/kmt.py @@ -104,7 +104,7 @@ def init(ctx, lite=False): @task def update_resources(ctx, no_backup=False): warn("Updating resource dependencies will delete all running stacks.") - if ask("are you sure you want to continue? (Y/n)") != "Y": + if ask("are you sure you want to continue? (y/n)").lower() != "y": raise Exit("[-] Update aborted") for stack in glob(f"{KMT_STACKS_DIR}/*"): @@ -117,7 +117,7 @@ def update_resources(ctx, no_backup=False): @task def revert_resources(ctx): warn("Reverting resource dependencies will delete all running stacks.") - if ask("are you sure you want to revert to backups? (Y/n)") != "Y": + if ask("are you sure you want to revert to backups? (y/n)").lower() != "y": raise Exit("[-] Revert aborted") for stack in glob(f"{KMT_STACKS_DIR}/*"): @@ -208,7 +208,7 @@ def sync(ctx, vms, stack=None, ssh_key=""): for _, vm, ip in target_vms: info(f" Syncing VM {vm} with ip {ip}") - if ask("Do you want to sync? (y/n)") != "y": + if ask("Do you want to sync? (y/n)").lower() != "y": warn("[-] Sync aborted !") return From a9ab674e3b8f53658e169b09a467509eeef54fdf Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Tue, 19 Dec 2023 14:36:13 +0100 Subject: [PATCH 59/66] Fix Heroku agent install script job for debian and ubuntu (#21642) Fix Heroku agent install script job for debian and ubuntu --- .gitlab/new-e2e_testing/debian.yml | 2 ++ .gitlab/new-e2e_testing/ubuntu.yml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/.gitlab/new-e2e_testing/debian.yml b/.gitlab/new-e2e_testing/debian.yml index a305ca5fb465b1..9d3e6ccc752e8b 100644 --- a/.gitlab/new-e2e_testing/debian.yml +++ b/.gitlab/new-e2e_testing/debian.yml @@ -110,6 +110,7 @@ new-e2e-agent-platform-install-script-debian-heroku-agent-a6-x86_64: stage: kitchen_testing extends: - .new_e2e_template + - .new-e2e_install_script - .new-e2e_os_debian - .new-e2e_debian_a6_x86_64 - .new-e2e_agent_a6 @@ -120,6 +121,7 @@ new-e2e-agent-platform-install-script-debian-heroku-agent-a7-x86_64: stage: kitchen_testing extends: - .new_e2e_template + - .new-e2e_install_script - .new-e2e_os_debian - .new-e2e_debian_a7_x86_64 - .new-e2e_agent_a7 diff --git a/.gitlab/new-e2e_testing/ubuntu.yml b/.gitlab/new-e2e_testing/ubuntu.yml index d465ec9aa76df1..d9b148674dfe04 100644 --- a/.gitlab/new-e2e_testing/ubuntu.yml +++ b/.gitlab/new-e2e_testing/ubuntu.yml @@ -121,6 +121,7 @@ new-e2e-agent-platform-install-script-ubuntu-heroku-agent-a6-x86_64: stage: kitchen_testing extends: - .new_e2e_template + - .new-e2e_install_script - .new-e2e_os_ubuntu - .new-e2e_ubuntu_a6_x86_64 - .new-e2e_agent_a6 @@ -131,6 +132,7 @@ new-e2e-agent-platform-install-script-ubuntu-heroku-agent-a7-x86_64: stage: kitchen_testing extends: - .new_e2e_template + - .new-e2e_install_script - .new-e2e_os_ubuntu - .new-e2e_ubuntu_a7_x86_64 - .new-e2e_agent_a7 From 83901b1e0543770e83beb1688c18086d3b412e7e Mon Sep 17 00:00:00 2001 From: Pierre Guilleminot Date: Tue, 19 Dec 2023 14:41:20 +0100 Subject: [PATCH 60/66] [CSPM] Detect K8s managed environment via node-labels (#21646) Provides better detection of managed environment for K8s clusters and configuration relying on node-labels. We were lacking detection for GKE and AKS and looking into the node-labels that are set, it looks like a good convention that we can rely on to safely detect whether a worker node is running on a specific managed environment. --- pkg/compliance/agent.go | 8 +++++ pkg/compliance/data.go | 1 + pkg/compliance/k8sconfig/loader.go | 53 +++++++++++++++++++++++++----- 3 files changed, 53 insertions(+), 9 deletions(-) diff --git a/pkg/compliance/agent.go b/pkg/compliance/agent.go index bbbe461072a5e6..ce139e66a8a5ca 100644 --- a/pkg/compliance/agent.go +++ b/pkg/compliance/agent.go @@ -121,6 +121,8 @@ type Agent struct { finish chan struct{} cancel context.CancelFunc + + k8sManaged *string } func xccdfEnabled() bool { @@ -213,6 +215,11 @@ func (a *Agent) Start() error { }), ) + _, k8sResourceData := k8sconfig.LoadConfiguration(ctx, a.opts.HostRoot) + if k8sResourceData != nil && k8sResourceData.ManagedEnvironment != nil { + a.k8sManaged = &k8sResourceData.ManagedEnvironment.Name + } + var wg sync.WaitGroup wg.Add(1) @@ -536,6 +543,7 @@ func (a *Agent) reportCheckEvents(eventsTTL time.Duration, events ...*CheckEvent event.Container.ImageTag = ctnr.Image.Tag } } + event.K8SManaged = a.k8sManaged a.opts.Reporter.ReportEvent(event) } } diff --git a/pkg/compliance/data.go b/pkg/compliance/data.go index 08286c097d23dc..c52d5529be48ab 100644 --- a/pkg/compliance/data.go +++ b/pkg/compliance/data.go @@ -81,6 +81,7 @@ type CheckEvent struct { ResourceType string `json:"resource_type,omitempty"` ResourceID string `json:"resource_id,omitempty"` Container *CheckContainerMeta `json:"container,omitempty"` + K8SManaged *string `json:"k8s_managed,omitempty"` Tags []string `json:"tags"` Data map[string]interface{} `json:"data"` diff --git a/pkg/compliance/k8sconfig/loader.go b/pkg/compliance/k8sconfig/loader.go index aca7d859638407..d5056ec20eed0d 100644 --- a/pkg/compliance/k8sconfig/loader.go +++ b/pkg/compliance/k8sconfig/loader.go @@ -28,7 +28,7 @@ import ( "gopkg.in/yaml.v3" ) -const version = "202305" +const version = "202312" const ( k8sManifestsDir = "/etc/kubernetes/manifests" @@ -78,13 +78,6 @@ func (l *loader) load(ctx context.Context, loadProcesses procsLoader) (string, * node.Manifests.KubeScheduler = l.loadConfigFileMeta(filepath.Join(k8sManifestsDir, "kube-scheduler.yaml")) node.Manifests.Etcd = l.loadConfigFileMeta(filepath.Join(k8sManifestsDir, "etcd.yaml")) - if eksMeta := l.loadConfigFileMeta("/etc/eks/release"); eksMeta != nil { - node.ManagedEnvironment = &K8sManagedEnvConfig{ - Name: "eks", - Metadata: eksMeta.Content, - } - } - for _, proc := range loadProcesses(ctx) { switch proc.name { case "etcd": @@ -97,6 +90,7 @@ func (l *loader) load(ctx context.Context, loadProcesses procsLoader) (string, * node.Components.KubeScheduler = l.newK8sKubeSchedulerConfig(proc.flags) case "kubelet": node.Components.Kubelet = l.newK8sKubeletConfig(proc.flags) + node.ManagedEnvironment = l.detectManagedEnvironment(proc.flags) case "kube-proxy": node.Components.KubeProxy = l.newK8sKubeProxyConfig(proc.flags) } @@ -107,13 +101,54 @@ func (l *loader) load(ctx context.Context, loadProcesses procsLoader) (string, * } resourceType := "kubernetes_worker_node" - if node.Components.KubeApiserver != nil { + if managedEnv := node.ManagedEnvironment; managedEnv != nil { + switch managedEnv.Name { + case "eks": + resourceType = "aws_eks_worker_node" + case "gke": + resourceType = "gcp_gke_worker_node" + case "aks": + resourceType = "azure_aks_worker_node" + } + } else if node.Components.KubeApiserver != nil || + node.Components.Etcd != nil || + node.Components.KubeControllerManager != nil || + node.Components.KubeScheduler != nil { resourceType = "kubernetes_master_node" } return resourceType, &node } +func (l *loader) detectManagedEnvironment(flags map[string]string) *K8sManagedEnvConfig { + nodeLabels, ok := flags["--node-labels"] + if ok { + for _, label := range strings.Split(nodeLabels, ",") { + label = strings.TrimSpace(label) + switch { + case strings.HasPrefix(label, "cloud.google.com/gke"): + return &K8sManagedEnvConfig{ + Name: "gke", + } + case strings.HasPrefix(label, "eks.amazonaws.com/"): + env := &K8sManagedEnvConfig{ + Name: "eks", + } + eksMeta := l.loadConfigFileMeta("/etc/eks/release") + if eksMeta != nil { + env.Metadata = eksMeta.Content + } + return env + case strings.HasPrefix(label, "kubernetes.azure.com/"): + return &K8sManagedEnvConfig{ + Name: "aks", + } + } + } + } + return nil +} + func (l *loader) loadMeta(name string, loadContent bool) (string, os.FileInfo, []byte, bool) { name = filepath.Join(l.hostroot, name) info, err := os.Stat(name) From 5d2a814b5ad132796931f55484427dd35df4022e Mon Sep 17 00:00:00 2001 From: Paul Date: Tue, 19 Dec 2023 15:13:26 +0100 Subject: [PATCH 61/66] [RC-1440] Send log section in checks payload (#21563) * Send log section in checks payload * Fix CI * Fix test * Fix license * Address comment * Fix CI * Fix CI * Address feedback * Add extra fields * Fix CI * Cleanup * Add tests for new methods --- comp/logs/agent/agent.go | 4 ++ comp/logs/agent/component.go | 6 ++ comp/logs/agent/config/integration_config.go | 34 ++++++++++ .../agent/config/integration_config_test.go | 16 +++++ comp/logs/agent/mock.go | 10 +++ comp/metadata/bundle_test.go | 4 ++ comp/metadata/inventorychecks/README.md | 33 ++++++++++ .../inventorychecksimpl/inventorychecks.go | 55 ++++++++++++++-- .../inventorychecks_test.go | 63 +++++++++++++++++-- pkg/cli/subcommands/check/command.go | 5 ++ pkg/logs/internal/status/status.go | 17 +++++ pkg/logs/internal/status/status_test.go | 13 ++++ pkg/logs/schedulers/schedulers.go | 5 ++ 13 files changed, 254 insertions(+), 11 deletions(-) diff --git a/comp/logs/agent/agent.go b/comp/logs/agent/agent.go index f17e7464c4b99f..0eac6b408408dc 100644 --- a/comp/logs/agent/agent.go +++ b/comp/logs/agent/agent.go @@ -243,6 +243,10 @@ func (a *agent) AddScheduler(scheduler schedulers.Scheduler) { a.schedulers.AddScheduler(scheduler) } +func (a *agent) GetSources() *sources.LogSources { + return a.sources +} + func (a *agent) GetMessageReceiver() *diagnostic.BufferedMessageReceiver { return a.diagnosticMessageReceiver } diff --git a/comp/logs/agent/component.go b/comp/logs/agent/component.go index 37d2016ccf1832..f1f3d15638fb1a 100644 --- a/comp/logs/agent/component.go +++ b/comp/logs/agent/component.go @@ -14,6 +14,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" "github.com/DataDog/datadog-agent/pkg/logs/pipeline" "github.com/DataDog/datadog-agent/pkg/logs/schedulers" + "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -24,6 +25,9 @@ type Component interface { // AddScheduler adds an AD scheduler to the logs agent AddScheduler(scheduler schedulers.Scheduler) + // Get the logs sources + GetSources() *sources.LogSources + // GetMessageReceiver gets the diagnostic message receiver GetMessageReceiver() *diagnostic.BufferedMessageReceiver @@ -44,6 +48,8 @@ type ServerlessLogsAgent interface { // Mock implements mock-specific methods. type Mock interface { Component + + SetSources(sources *sources.LogSources) } // Module defines the fx options for this component. diff --git a/comp/logs/agent/config/integration_config.go b/comp/logs/agent/config/integration_config.go index d4c8db84b5d4a0..52192fee3f01fd 100644 --- a/comp/logs/agent/config/integration_config.go +++ b/comp/logs/agent/config/integration_config.go @@ -6,6 +6,7 @@ package config import ( + "encoding/json" "fmt" "strings" "sync" @@ -161,6 +162,39 @@ func (c *LogsConfig) Dump(multiline bool) string { return b.String() } +// PublicJSON serialize the structure to make sure we only export fields that can be relevant to customers. +// This is used to send the logs config to the backend as part of the metadata payload. +func (c *LogsConfig) PublicJSON() ([]byte, error) { + // Export only fields that are explicitly documented in the public documentation + return json.Marshal(&struct { + Type string `json:"type,omitempty"` + Port int `json:"port,omitempty"` // Network + Path string `json:"path,omitempty"` // File, Journald + Encoding string `json:"encoding,omitempty"` // File + ExcludePaths []string `json:"exclude_paths,omitempty"` // File + TailingMode string `json:"start_position,omitempty"` // File + ChannelPath string `json:"channel_path,omitempty"` // Windows Event + Service string `json:"service,omitempty"` + Source string `json:"source,omitempty"` + Tags []string `json:"tags,omitempty"` + ProcessingRules []*ProcessingRule `json:"log_processing_rules,omitempty"` + AutoMultiLine *bool `json:"auto_multi_line_detection,omitempty"` + }{ + Type: c.Type, + Port: c.Port, + Path: c.Path, + Encoding: c.Encoding, + ExcludePaths: c.ExcludePaths, + TailingMode: c.TailingMode, + ChannelPath: c.ChannelPath, + Service: c.Service, + Source: c.Source, + Tags: c.Tags, + ProcessingRules: c.ProcessingRules, + AutoMultiLine: c.AutoMultiLine, + }) +} + // TailingMode type type TailingMode uint8 diff --git a/comp/logs/agent/config/integration_config_test.go b/comp/logs/agent/config/integration_config_test.go index c24f5977633182..7318c5a1113ab2 100644 --- a/comp/logs/agent/config/integration_config_test.go +++ b/comp/logs/agent/config/integration_config_test.go @@ -91,3 +91,19 @@ func TestConfigDump(t *testing.T) { dump := config.Dump(true) assert.Contains(t, dump, `Path: "/var/log/foo.log",`) } + +func TestPublicJSON(t *testing.T) { + config := LogsConfig{ + Type: FileType, + Path: "/var/log/foo.log", + Encoding: "utf-8", + Service: "foo", + Tags: []string{"foo:bar"}, + Source: "bar", + } + ret, err := config.PublicJSON() + assert.NoError(t, err) + + expectedJSON := `{"type":"file","path":"/var/log/foo.log","encoding":"utf-8","service":"foo","source":"bar","tags":["foo:bar"]}` + assert.Equal(t, expectedJSON, string(ret)) +} diff --git a/comp/logs/agent/mock.go b/comp/logs/agent/mock.go index da21ba9a5bd129..7fed52e93c5d12 100644 --- a/comp/logs/agent/mock.go +++ b/comp/logs/agent/mock.go @@ -12,6 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" "github.com/DataDog/datadog-agent/pkg/logs/pipeline" "github.com/DataDog/datadog-agent/pkg/logs/schedulers" + "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/util/optional" "go.uber.org/fx" ) @@ -21,6 +22,7 @@ type mockLogsAgent struct { addedSchedulers []schedulers.Scheduler hasFlushed bool flushDelay time.Duration + logSources *sources.LogSources } func newMock(deps dependencies) optional.Option[Mock] { @@ -51,6 +53,10 @@ func (a *mockLogsAgent) AddScheduler(scheduler schedulers.Scheduler) { a.addedSchedulers = append(a.addedSchedulers, scheduler) } +func (a *mockLogsAgent) SetSources(sources *sources.LogSources) { + a.logSources = sources +} + func (a *mockLogsAgent) IsRunning() bool { return a.isRunning } @@ -59,6 +65,10 @@ func (a *mockLogsAgent) GetMessageReceiver() *diagnostic.BufferedMessageReceiver return nil } +func (a *mockLogsAgent) GetSources() *sources.LogSources { + return a.logSources +} + // Serverless methods func (a *mockLogsAgent) Start() error { return a.start(context.TODO()) diff --git a/comp/metadata/bundle_test.go b/comp/metadata/bundle_test.go index 6db29988fbc9f0..03bf8d13f1c9b8 100644 --- a/comp/metadata/bundle_test.go +++ b/comp/metadata/bundle_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/DataDog/datadog-agent/comp/core" + "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl" "github.com/DataDog/datadog-agent/pkg/collector" "github.com/DataDog/datadog-agent/pkg/serializer" @@ -24,6 +25,9 @@ func TestBundleDependencies(t *testing.T) { fx.Provide(func() optional.Option[collector.Collector] { return optional.NewOption[collector.Collector](collector.NewMock(nil)) }), + fx.Provide(func() optional.Option[agent.Component] { + return optional.NewNoneOption[agent.Component]() + }), ) } diff --git a/comp/metadata/inventorychecks/README.md b/comp/metadata/inventorychecks/README.md index 27a60e316953f9..88a8a5a947789d 100644 --- a/comp/metadata/inventorychecks/README.md +++ b/comp/metadata/inventorychecks/README.md @@ -29,6 +29,13 @@ The payload is a JSON dict with the following fields - `init_config` - **string**: the `init_config` part of the configuration for this check instance. - `instance_config` - **string**: the YAML configuration for this check instance - Any other metadata registered by the instance (instance version, version of the software monitored, ...). +- `logs_metadata` - **dict of string to list**: dictionary with the log source names as keys; values are a list of the metadata + for each instance of that log source. + Each instance is composed of: + - `config` - **string**: the canonical JSON of the log source configuration. + - `state` - **dict of string**: the current state of the log source. + - `status` - **string**: one of `pending`, `error` or `success`. + - `error` - **string**: the error description if any. ("scrubbed" indicates that secrets are removed from the field value just as they are in logs) @@ -104,6 +111,32 @@ Here an example of an inventory payload: } ] }, + "logs_metadata": { + "redisdb": [ + { + "config": "{\"path\":\"/var/log/redis_6379.log\",\"service\":\"myredis2\",\"source\":\"redis\",\"type\":\"file\",\"tags\":[\"env:prod\"]}", + "service": "awesome_cache", + "source": "source1", + "state": { + "error": "Error: cannot read file /var/log/redis_6379.log: stat /var/log/redis_6379.log: no such file or directory", + "status": "error" + }, + "tags": ["env:prod"] + } + ], + "nginx": [ + { + "config": "{\"path\":\"/var/log/nginx/access.log\",\"service\":\"nginx\",\"source\":\"nginx\",\"type\":\"file\"}", + "service": "nginx", + "source": "source2", + "state": { + "error": "", + "status": "success" + }, + "tags": [] + } + ] + } "hostname": "my-host", "timestamp": 1631281754507358895 } diff --git a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks.go b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks.go index fd1faf1339d03d..56d3181739ed41 100644 --- a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks.go +++ b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks.go @@ -17,12 +17,14 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" "github.com/DataDog/datadog-agent/comp/core/log" + logagent "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/metadata/internal/util" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl" "github.com/DataDog/datadog-agent/pkg/collector" "github.com/DataDog/datadog-agent/pkg/collector/check" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" + "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -43,9 +45,10 @@ type checksMetadata map[string][]metadata // Payload handles the JSON unmarshalling of the metadata payload type Payload struct { - Hostname string `json:"hostname"` - Timestamp int64 `json:"timestamp"` - Metadata map[string][]metadata `json:"check_metadata"` + Hostname string `json:"hostname"` + Timestamp int64 `json:"timestamp"` + Metadata map[string][]metadata `json:"check_metadata"` + LogsMetadata map[string][]metadata `json:"logs_metadata"` } // MarshalJSON serialization a Payload to JSON @@ -77,6 +80,7 @@ type inventorychecksImpl struct { log log.Component conf config.Component coll optional.Option[collector.Collector] + sources optional.Option[*sources.LogSources] hostname string } @@ -87,6 +91,7 @@ type dependencies struct { Config config.Component Serializer serializer.MetricSerializer Coll optional.Option[collector.Collector] + LogAgent optional.Option[logagent.Component] } type provides struct { @@ -103,6 +108,7 @@ func newInventoryChecksProvider(deps dependencies) provides { conf: deps.Config, log: deps.Log, coll: deps.Coll, + sources: optional.NewNoneOption[*sources.LogSources](), hostname: hname, data: map[string]instanceMetadata{}, } @@ -116,6 +122,10 @@ func newInventoryChecksProvider(deps dependencies) provides { coll.AddEventReceiver(func(_ checkid.ID, _ collector.EventType) { ic.Refresh() }) } + if logAgent, isSet := deps.LogAgent.Get(); isSet { + ic.sources.Set(logAgent.GetSources()) + } + return provides{ Comp: ic, Provider: ic.MetadataProvider(), @@ -197,9 +207,42 @@ func (ic *inventorychecksImpl) getPayload() marshaler.JSONMarshaler { } } + logsMetadata := make(map[string][]metadata) + if sources, isSet := ic.sources.Get(); isSet { + if sources != nil { + for _, logSource := range sources.GetSources() { + if _, found := logsMetadata[logSource.Name]; !found { + logsMetadata[logSource.Name] = []metadata{} + } + + parsedJSON, err := logSource.Config.PublicJSON() + if err != nil { + ic.log.Debugf("could not parse log configuration for source metadata %s: %v", logSource.Name, err) + continue + } + + tags := logSource.Config.Tags + if tags == nil { + tags = []string{} + } + logsMetadata[logSource.Name] = append(logsMetadata[logSource.Name], metadata{ + "config": string(parsedJSON), + "state": map[string]string{ + "error": logSource.Status.GetError(), + "status": logSource.Status.String(), + }, + "service": logSource.Config.Service, + "source": logSource.Config.Source, + "tags": tags, + }) + } + } + } + return &Payload{ - Hostname: ic.hostname, - Timestamp: time.Now().UnixNano(), - Metadata: payloadData, + Hostname: ic.hostname, + Timestamp: time.Now().UnixNano(), + Metadata: payloadData, + LogsMetadata: logsMetadata, } } diff --git a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go index e00c66deec8de7..7237c88e2797b2 100644 --- a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go +++ b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go @@ -6,23 +6,29 @@ package inventorychecksimpl import ( + "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "go.uber.org/fx" + "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" + logagent "github.com/DataDog/datadog-agent/comp/logs/agent" + logConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" + "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/pkg/collector" "github.com/DataDog/datadog-agent/pkg/collector/check" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" + "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" ) -func getTestInventoryChecks(t *testing.T, coll optional.Option[collector.Collector], overrides map[string]any) *inventorychecksImpl { +func getTestInventoryChecks(t *testing.T, coll optional.Option[collector.Collector], logAgent optional.Option[logagent.Component], overrides map[string]any) *inventorychecksImpl { p := newInventoryChecksProvider( fxutil.Test[dependencies]( t, @@ -33,13 +39,18 @@ func getTestInventoryChecks(t *testing.T, coll optional.Option[collector.Collect fx.Provide(func() optional.Option[collector.Collector] { return coll }), + fx.Provide(func() optional.Option[logagent.Component] { + return logAgent + }), ), ) return p.Comp.(*inventorychecksImpl) } func TestSet(t *testing.T) { - ic := getTestInventoryChecks(t, optional.NewNoneOption[collector.Collector](), nil) + ic := getTestInventoryChecks( + t, optional.NewNoneOption[collector.Collector](), optional.Option[logagent.Component]{}, nil, + ) ic.Set("instance_1", "key", "value") @@ -56,7 +67,9 @@ func TestSet(t *testing.T) { } func TestSetEmptyInstance(t *testing.T) { - ic := getTestInventoryChecks(t, optional.NewNoneOption[collector.Collector](), nil) + ic := getTestInventoryChecks( + t, optional.NewNoneOption[collector.Collector](), optional.Option[logagent.Component]{}, nil, + ) ic.Set("", "key", "value") @@ -64,7 +77,9 @@ func TestSetEmptyInstance(t *testing.T) { } func TestGetInstanceMetadata(t *testing.T) { - ic := getTestInventoryChecks(t, optional.NewNoneOption[collector.Collector](), nil) + ic := getTestInventoryChecks( + t, optional.NewNoneOption[collector.Collector](), optional.Option[logagent.Component]{}, nil, + ) ic.Set("instance_1", "key1", "value1") ic.Set("instance_1", "key2", "value2") @@ -115,8 +130,28 @@ func TestGetPayload(t *testing.T) { mockColl.On("AddEventReceiver", mock.AnythingOfType("EventReceiver")).Return() mockColl.On("MapOverChecks", mock.AnythingOfType("func([]check.Info)")).Return() + // Setup log sources + logSources := sources.NewLogSources() + src := sources.NewLogSource("redisdb", &logConfig.LogsConfig{ + Type: logConfig.FileType, + Path: "/var/log/redis/redis.log", + Identifier: "redisdb", + Service: "awesome_cache", + Source: "redis", + Tags: []string{"env:prod"}, + }) + // Register an error + src.Status.Error(fmt.Errorf("No such file or directory")) + logSources.AddSource(src) + mockLogAgent := fxutil.Test[optional.Option[logagent.Mock]]( + t, logagent.MockModule(), core.MockBundle(), inventoryagent.MockModule(), + ) + logsAgent, _ := mockLogAgent.Get() + logsAgent.SetSources(logSources) + ic := getTestInventoryChecks(t, optional.NewOption[collector.Collector](mockColl), + optional.NewOption[logagent.Component](logsAgent), overrides, ) @@ -156,9 +191,27 @@ func TestGetPayload(t *testing.T) { // Check that metadata linked to non-existing check were deleted assert.NotContains(t, "non_running_checkid", ic.data) + + // Check the log sources part of the metadata + assert.Len(t, p.LogsMetadata, 1) + actualSource, found := p.LogsMetadata["redisdb"] + assert.True(t, found) + assert.Len(t, actualSource, 1) + expectedSourceConfig := `{"type":"file","path":"/var/log/redis/redis.log","service":"awesome_cache","source":"redis","tags":["env:prod"]}` + assert.Equal(t, expectedSourceConfig, actualSource[0]["config"]) + expectedSourceStatus := map[string]string{ + "status": "error", + "error": "Error: No such file or directory", + } + assert.Equal(t, expectedSourceStatus, actualSource[0]["state"]) + assert.Equal(t, "awesome_cache", actualSource[0]["service"]) + assert.Equal(t, "redis", actualSource[0]["source"]) + assert.Equal(t, []string{"env:prod"}, actualSource[0]["tags"]) } func TestFlareProviderFilename(t *testing.T) { - ic := getTestInventoryChecks(t, optional.NewNoneOption[collector.Collector](), nil) + ic := getTestInventoryChecks( + t, optional.NewNoneOption[collector.Collector](), optional.Option[logagent.Component]{}, nil, + ) assert.Equal(t, "checks.json", ic.FlareFileName) } diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index 2b1857a98aea26..06dfed236706df 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -43,6 +43,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" orchestratorForwarderImpl "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" + logagent "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/metadata/host" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" @@ -154,6 +155,10 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { fx.Provide(func() optional.Option[collector.Collector] { return optional.NewNoneOption[collector.Collector]() }), + fx.Provide(func() optional.Option[logagent.Component] { + return optional.NewNoneOption[logagent.Component]() + + }), fx.Provide(func() serializer.MetricSerializer { return nil }), fx.Supply(defaultforwarder.Params{UseNoopForwarder: true}), demultiplexerimpl.Module(), diff --git a/pkg/logs/internal/status/status.go b/pkg/logs/internal/status/status.go index d6588fec376747..817dc42a5cec2a 100644 --- a/pkg/logs/internal/status/status.go +++ b/pkg/logs/internal/status/status.go @@ -95,3 +95,20 @@ func (s *LogStatus) Dump() string { } return fmt.Sprintf("&LogStatus{status: %s, err: %#v}", status, s.err) } + +// String returns a human readable representation of the status. +func (s *LogStatus) String() string { + s.mu.Lock() + defer s.mu.Unlock() + + switch s.status { + case isPending: + return "pending" + case isSuccess: + return "success" + case isError: + return "error" + default: + return fmt.Sprintf("unknown status: %d", s.status) + } +} diff --git a/pkg/logs/internal/status/status_test.go b/pkg/logs/internal/status/status_test.go index c8412711583b24..8fe6e3d377c6bd 100644 --- a/pkg/logs/internal/status/status_test.go +++ b/pkg/logs/internal/status/status_test.go @@ -43,6 +43,19 @@ func (s *LogStatusSuite) TestError() { s.Equal("Error: bar", s.status.GetError()) } +func (s *LogStatusSuite) TesString() { + s.status = NewLogStatus() + s.Equal("pending", s.status.String()) + + s.status.Error(errors.New("bar")) + + s.Equal("error", s.status.String()) + s.Equal("Error: bar", s.status.GetError()) + + s.status.Success() + s.Equal("success", s.status.String()) +} + func TestLogStatusSuite(t *testing.T) { suite.Run(t, new(LogStatusSuite)) } diff --git a/pkg/logs/schedulers/schedulers.go b/pkg/logs/schedulers/schedulers.go index 8a5eeb0db2d677..72f184eb6ecd91 100644 --- a/pkg/logs/schedulers/schedulers.go +++ b/pkg/logs/schedulers/schedulers.go @@ -40,6 +40,11 @@ func (ss *Schedulers) AddScheduler(scheduler Scheduler) { } } +// GetSources returns all the log source from the source manager. +func (ss *Schedulers) GetSources() []*sources.LogSource { + return ss.mgr.GetSources() +} + // Start starts all schedulers in the collection. func (ss *Schedulers) Start() { for _, s := range ss.schedulers { From a9a19609086ca9d05c83b52a048f546a335dfe93 Mon Sep 17 00:00:00 2001 From: Guy Arbitman Date: Tue, 19 Dec 2023 16:14:59 +0200 Subject: [PATCH 62/66] usm: http2: Split http2_frame_parser to http2_headers_parser and http2_eos_praser (#21627) usm: http2: Split http2_frame_parser to http2_headers_parser and http2_eos_praser --- .../ebpf/c/protocols/classification/defs.h | 3 +- .../ebpf/c/protocols/http2/decoding-defs.h | 14 +- pkg/network/ebpf/c/protocols/http2/decoding.h | 174 +++++++++++++----- pkg/network/protocols/ebpf.go | 6 +- pkg/network/protocols/http2/protocol.go | 14 +- 5 files changed, 159 insertions(+), 52 deletions(-) diff --git a/pkg/network/ebpf/c/protocols/classification/defs.h b/pkg/network/ebpf/c/protocols/classification/defs.h index 8f56a739783d1d..e6f1739c800699 100644 --- a/pkg/network/ebpf/c/protocols/classification/defs.h +++ b/pkg/network/ebpf/c/protocols/classification/defs.h @@ -137,7 +137,8 @@ typedef enum { PROG_HTTP, PROG_HTTP2_HANDLE_FIRST_FRAME, PROG_HTTP2_FRAME_FILTER, - PROG_HTTP2_FRAME_PARSER, + PROG_HTTP2_HEADERS_PARSER, + PROG_HTTP2_EOS_PARSER, PROG_KAFKA, PROG_GRPC, // Add before this value. diff --git a/pkg/network/ebpf/c/protocols/http2/decoding-defs.h b/pkg/network/ebpf/c/protocols/http2/decoding-defs.h index e7be1e6f2da499..4c8950e5a74aa6 100644 --- a/pkg/network/ebpf/c/protocols/http2/decoding-defs.h +++ b/pkg/network/ebpf/c/protocols/http2/decoding-defs.h @@ -5,7 +5,19 @@ #include "protocols/http2/defs.h" -#define HTTP2_FRAMES_PER_TAIL_CALL 7 +// Represents the maximum number of frames we'll process in a single tail call in `handle_eos_frames` program. +#define HTTP2_MAX_FRAMES_FOR_EOS_PARSER_PER_TAIL_CALL 200 +// Represents the maximum number of tail calls to process EOS frames. +// Currently we have up to 120 frames in a packet, thus 1 tail call is enough. +#define HTTP2_MAX_TAIL_CALLS_FOR_EOS_PARSER 1 +#define HTTP2_MAX_FRAMES_FOR_EOS_PARSER (HTTP2_MAX_FRAMES_FOR_EOS_PARSER_PER_TAIL_CALL * HTTP2_MAX_TAIL_CALLS_FOR_EOS_PARSER) + +// Represents the maximum number of frames we'll process in a single tail call in `handle_headers_frames` program. +#define HTTP2_MAX_FRAMES_FOR_HEADERS_PARSER_PER_TAIL_CALL 18 +// Represents the maximum number of tail calls to process headers frames. +// Currently we have up to 120 frames in a packet, thus 7 (7*18 = 126) tail calls is enough. +#define HTTP2_MAX_TAIL_CALLS_FOR_HEADERS_PARSER 7 +#define HTTP2_MAX_FRAMES_FOR_HEADERS_PARSER (HTTP2_MAX_FRAMES_FOR_HEADERS_PARSER_PER_TAIL_CALL * HTTP2_MAX_TAIL_CALLS_FOR_HEADERS_PARSER) // Maximum number of frames to be processed in a single TCP packet. That's also the number of tail calls we'll have. // NOTE: we may need to revisit this const if we need to capture more connections. #define HTTP2_MAX_FRAMES_ITERATIONS 120 diff --git a/pkg/network/ebpf/c/protocols/http2/decoding.h b/pkg/network/ebpf/c/protocols/http2/decoding.h index b3bb3da46c99a7..ac7d2f1b3642c7 100644 --- a/pkg/network/ebpf/c/protocols/http2/decoding.h +++ b/pkg/network/ebpf/c/protocols/http2/decoding.h @@ -367,42 +367,6 @@ static __always_inline void process_headers_frame(struct __sk_buff *skb, http2_s process_headers(skb, dynamic_index, current_stream, headers_to_process, interesting_headers, http2_tel); } -static __always_inline void parse_frame(struct __sk_buff *skb, skb_info_t *skb_info, conn_tuple_t *tup, http2_ctx_t *http2_ctx, struct http2_frame *current_frame, http2_telemetry_t *http2_tel) { - http2_ctx->http2_stream_key.stream_id = current_frame->stream_id; - http2_stream_t *current_stream = http2_fetch_stream(&http2_ctx->http2_stream_key); - if (current_stream == NULL) { - return; - } - - if (current_frame->type == kHeadersFrame) { - process_headers_frame(skb, current_stream, skb_info, tup, &http2_ctx->dynamic_index, current_frame, http2_tel); - } - - // When we accept an RST, it means that the current stream is terminated. - // See: https://datatracker.ietf.org/doc/html/rfc7540#section-6.4 - bool is_rst = current_frame->type == kRSTStreamFrame; - // If rst, and stream is empty (no status code, or no response) then delete from inflight - if (is_rst && (current_stream->response_status_code == 0 || current_stream->request_started == 0)) { - bpf_map_delete_elem(&http2_in_flight, &http2_ctx->http2_stream_key); - return; - } - - bool should_handle_end_of_stream = false; - if (is_rst) { - __sync_fetch_and_add(&http2_tel->end_of_stream_rst, 1); - should_handle_end_of_stream = true; - } else if ((current_frame->flags & HTTP2_END_OF_STREAM) == HTTP2_END_OF_STREAM) { - __sync_fetch_and_add(&http2_tel->end_of_stream, 1); - should_handle_end_of_stream = true; - } - - if (should_handle_end_of_stream) { - handle_end_of_stream(current_stream, &http2_ctx->http2_stream_key, http2_tel); - } - - return; -} - // A similar implementation of read_http2_frame_header, but instead of getting both a char array and an out parameter, // we get only the out parameter (equals to struct http2_frame * representation of the char array) and we perform the // field adjustments we have in read_http2_frame_header. @@ -733,14 +697,20 @@ int socket__http2_filter(struct __sk_buff *skb) { // We have couple of interesting headers, launching tail calls to handle them. if (bpf_map_update_elem(&http2_iterations, &dispatcher_args_copy, iteration_value, BPF_NOEXIST) >= 0) { // We managed to cache the iteration_value in the http2_iterations map. - bpf_tail_call_compat(skb, &protocols_progs, PROG_HTTP2_FRAME_PARSER); + bpf_tail_call_compat(skb, &protocols_progs, PROG_HTTP2_HEADERS_PARSER); } return 0; } -SEC("socket/http2_frames_parser") -int socket__http2_frames_parser(struct __sk_buff *skb) { +// The program is responsible for parsing all headers frames. For each headers frame we parse the headers, +// fill the dynamic table with the new interesting literal headers, and modifying the streams accordingly. +// The program can be called multiple times (via "self call" of tail calls) in case we have more frames to parse +// than the maximum number of frames we can process in a single tail call. +// The program is being called after socket__http2_filter, and it is being called only if we have interesting frames. +// The program calls socket__http2_eos_parser to finalize the streams and enqueue them to be sent to the user mode. +SEC("socket/http2_headers_parser") +int socket__http2_headers_parser(struct __sk_buff *skb) { dispatcher_arguments_t dispatcher_args_copy; bpf_memset(&dispatcher_args_copy, 0, sizeof(dispatcher_arguments_t)); if (!fetch_dispatching_arguments(&dispatcher_args_copy.tup, &dispatcher_args_copy.skb_info)) { @@ -782,8 +752,10 @@ int socket__http2_frames_parser(struct __sk_buff *skb) { normalize_tuple(&http2_ctx->http2_stream_key.tup); http2_ctx->dynamic_index.tup = dispatcher_args_copy.tup; - #pragma unroll(HTTP2_FRAMES_PER_TAIL_CALL) - for (__u16 index = 0; index < HTTP2_FRAMES_PER_TAIL_CALL; index++) { + http2_stream_t *current_stream = NULL; + + #pragma unroll(HTTP2_MAX_FRAMES_FOR_HEADERS_PARSER_PER_TAIL_CALL) + for (__u16 index = 0; index < HTTP2_MAX_FRAMES_FOR_HEADERS_PARSER_PER_TAIL_CALL; index++) { if (tail_call_state->iteration >= HTTP2_MAX_FRAMES_ITERATIONS) { break; } @@ -795,14 +767,27 @@ int socket__http2_frames_parser(struct __sk_buff *skb) { } tail_call_state->iteration += 1; - dispatcher_args_copy.skb_info.data_off = current_frame.offset; + if (current_frame.frame.type != kHeadersFrame) { + continue; + } - parse_frame(skb, &dispatcher_args_copy.skb_info, &dispatcher_args_copy.tup, http2_ctx, ¤t_frame.frame, http2_tel); + http2_ctx->http2_stream_key.stream_id = current_frame.frame.stream_id; + current_stream = http2_fetch_stream(&http2_ctx->http2_stream_key); + if (current_stream == NULL) { + continue; + } + dispatcher_args_copy.skb_info.data_off = current_frame.offset; + process_headers_frame(skb, current_stream, &dispatcher_args_copy.skb_info, &dispatcher_args_copy.tup, &http2_ctx->dynamic_index, ¤t_frame.frame, http2_tel); } - if (tail_call_state->iteration < HTTP2_MAX_FRAMES_ITERATIONS && tail_call_state->iteration < tail_call_state->frames_count) { - bpf_tail_call_compat(skb, &protocols_progs, PROG_HTTP2_FRAME_PARSER); + if (tail_call_state->iteration < HTTP2_MAX_FRAMES_ITERATIONS && + tail_call_state->iteration < tail_call_state->frames_count && + tail_call_state->iteration < HTTP2_MAX_FRAMES_FOR_HEADERS_PARSER) { + bpf_tail_call_compat(skb, &protocols_progs, PROG_HTTP2_HEADERS_PARSER); } + // Zeroing the iteration index to call EOS parser + tail_call_state->iteration = 0; + bpf_tail_call_compat(skb, &protocols_progs, PROG_HTTP2_EOS_PARSER); delete_iteration: // restoring the original value. @@ -812,4 +797,103 @@ int socket__http2_frames_parser(struct __sk_buff *skb) { return 0; } +// The program is responsible for parsing all frames that mark the end of a stream. +// We consider a frame as marking the end of a stream if it is either: +// - An headers or data frame with END_STREAM flag set. +// - An RST_STREAM frame. +// The program is being called after socket__http2_headers_parser, and it finalizes the streams and enqueue them +// to be sent to the user mode. +// The program is ready to be called multiple times (via "self call" of tail calls) in case we have more frames to +// process than the maximum number of frames we can process in a single tail call. +SEC("socket/http2_eos_parser") +int socket__http2_eos_parser(struct __sk_buff *skb) { + dispatcher_arguments_t dispatcher_args_copy; + bpf_memset(&dispatcher_args_copy, 0, sizeof(dispatcher_arguments_t)); + if (!fetch_dispatching_arguments(&dispatcher_args_copy.tup, &dispatcher_args_copy.skb_info)) { + return 0; + } + + // A single packet can contain multiple HTTP/2 frames, due to instruction limitations we have divided the + // processing into multiple tail calls, where each tail call process a single frame. We must have context when + // we are processing the frames, for example, to know how many bytes have we read in the packet, or it we reached + // to the maximum number of frames we can process. For that we are checking if the iteration context already exists. + // If not, creating a new one to be used for further processing + http2_tail_call_state_t *tail_call_state = bpf_map_lookup_elem(&http2_iterations, &dispatcher_args_copy); + if (tail_call_state == NULL) { + // We didn't find the cached context, aborting. + return 0; + } + + const __u32 zero = 0; + http2_telemetry_t *http2_tel = bpf_map_lookup_elem(&http2_telemetry, &zero); + if (http2_tel == NULL) { + goto delete_iteration; + } + + http2_frame_with_offset *frames_array = tail_call_state->frames_array; + http2_frame_with_offset current_frame; + + http2_ctx_t *http2_ctx = bpf_map_lookup_elem(&http2_ctx_heap, &zero); + if (http2_ctx == NULL) { + goto delete_iteration; + } + bpf_memset(http2_ctx, 0, sizeof(http2_ctx_t)); + http2_ctx->http2_stream_key.tup = dispatcher_args_copy.tup; + normalize_tuple(&http2_ctx->http2_stream_key.tup); + + bool is_rst = false, is_end_of_stream = false; + http2_stream_t *current_stream = NULL; + + #pragma unroll(HTTP2_MAX_FRAMES_FOR_EOS_PARSER_PER_TAIL_CALL) + for (__u16 index = 0; index < HTTP2_MAX_FRAMES_FOR_EOS_PARSER_PER_TAIL_CALL; index++) { + if (tail_call_state->iteration >= HTTP2_MAX_FRAMES_ITERATIONS) { + break; + } + + current_frame = frames_array[tail_call_state->iteration]; + // Having this condition after assignment and not before is due to a verifier issue. + if (tail_call_state->iteration >= tail_call_state->frames_count) { + break; + } + tail_call_state->iteration += 1; + + is_rst = current_frame.frame.type == kRSTStreamFrame; + is_end_of_stream = (current_frame.frame.flags & HTTP2_END_OF_STREAM) == HTTP2_END_OF_STREAM; + if (!is_rst && !is_end_of_stream) { + continue; + } + + http2_ctx->http2_stream_key.stream_id = current_frame.frame.stream_id; + current_stream = http2_fetch_stream(&http2_ctx->http2_stream_key); + if (current_stream == NULL) { + continue; + } + + // When we accept an RST, it means that the current stream is terminated. + // See: https://datatracker.ietf.org/doc/html/rfc7540#section-6.4 + // If rst, and stream is empty (no status code, or no response) then delete from inflight + if (is_rst && (current_stream->response_status_code == 0 || current_stream->request_started == 0)) { + bpf_map_delete_elem(&http2_in_flight, &http2_ctx->http2_stream_key); + continue; + } + + if (is_rst) { + __sync_fetch_and_add(&http2_tel->end_of_stream_rst, 1); + } else if ((current_frame.frame.flags & HTTP2_END_OF_STREAM) == HTTP2_END_OF_STREAM) { + __sync_fetch_and_add(&http2_tel->end_of_stream, 1); + } + handle_end_of_stream(current_stream, &http2_ctx->http2_stream_key, http2_tel); + } + + if (tail_call_state->iteration < HTTP2_MAX_FRAMES_ITERATIONS && + tail_call_state->iteration < tail_call_state->frames_count && + tail_call_state->iteration < HTTP2_MAX_FRAMES_FOR_EOS_PARSER) { + bpf_tail_call_compat(skb, &protocols_progs, PROG_HTTP2_EOS_PARSER); + } + +delete_iteration: + bpf_map_delete_elem(&http2_iterations, &dispatcher_args_copy); + + return 0; +} #endif diff --git a/pkg/network/protocols/ebpf.go b/pkg/network/protocols/ebpf.go index dc8b94b1dfa0fc..b7b082f34df72e 100644 --- a/pkg/network/protocols/ebpf.go +++ b/pkg/network/protocols/ebpf.go @@ -37,8 +37,10 @@ const ( ProgramHTTP2HandleFirstFrame ProgramType = C.PROG_HTTP2_HANDLE_FIRST_FRAME // ProgramHTTP2FrameFilter is the Golang representation of the C.PROG_HTTP2_HANDLE_FRAME enum ProgramHTTP2FrameFilter ProgramType = C.PROG_HTTP2_FRAME_FILTER - // ProgramHTTP2FrameParser is the Golang representation of the C.PROG_HTTP2_FRAME_PARSER enum - ProgramHTTP2FrameParser ProgramType = C.PROG_HTTP2_FRAME_PARSER + // ProgramHTTP2HeadersParser is the Golang representation of the C.PROG_HTTP2_HEADERS_PARSER enum + ProgramHTTP2HeadersParser ProgramType = C.PROG_HTTP2_HEADERS_PARSER + // ProgramHTTP2EOSParser is the Golang representation of the C.PROG_HTTP2_EOS_PARSER enum + ProgramHTTP2EOSParser ProgramType = C.PROG_HTTP2_EOS_PARSER // ProgramKafka is the Golang representation of the C.PROG_KAFKA enum ProgramKafka ProgramType = C.PROG_KAFKA ) diff --git a/pkg/network/protocols/http2/protocol.go b/pkg/network/protocols/http2/protocol.go index d8522c6fd67432..180e0a8722087a 100644 --- a/pkg/network/protocols/http2/protocol.go +++ b/pkg/network/protocols/http2/protocol.go @@ -56,7 +56,8 @@ const ( staticTable = "http2_static_table" firstFrameHandlerTailCall = "socket__http2_handle_first_frame" filterTailCall = "socket__http2_filter" - parserTailCall = "socket__http2_frames_parser" + headersParserTailCall = "socket__http2_headers_parser" + eosParserTailCall = "socket__http2_eos_parser" eventStream = "http2" telemetryMap = "http2_telemetry" ) @@ -110,9 +111,16 @@ var Spec = &protocols.ProtocolSpec{ }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, - Key: uint32(protocols.ProgramHTTP2FrameParser), + Key: uint32(protocols.ProgramHTTP2HeadersParser), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: parserTailCall, + EBPFFuncName: headersParserTailCall, + }, + }, + { + ProgArrayName: protocols.ProtocolDispatcherProgramsMap, + Key: uint32(protocols.ProgramHTTP2EOSParser), + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: eosParserTailCall, }, }, }, From cd791dfffc14f1c05780e5339748352417afdcdd Mon Sep 17 00:00:00 2001 From: Pierre Guilleminot Date: Tue, 19 Dec 2023 15:18:58 +0100 Subject: [PATCH 63/66] [CSPM] Fix fd leak in k8sconfig and aptconfig loaders (#21658) --- pkg/compliance/aptconfig/aptconfig.go | 1 + pkg/compliance/k8sconfig/loader.go | 1 + 2 files changed, 2 insertions(+) diff --git a/pkg/compliance/aptconfig/aptconfig.go b/pkg/compliance/aptconfig/aptconfig.go index a487b504accbeb..79b6cdeb210411 100644 --- a/pkg/compliance/aptconfig/aptconfig.go +++ b/pkg/compliance/aptconfig/aptconfig.go @@ -267,6 +267,7 @@ func readFileLimit(path string) (string, error) { if err != nil { return "", err } + defer f.Close() data, err := io.ReadAll(io.LimitReader(f, maxSize)) if err != nil { return "", err diff --git a/pkg/compliance/k8sconfig/loader.go b/pkg/compliance/k8sconfig/loader.go index d5056ec20eed0d..d1701ac71f0e67 100644 --- a/pkg/compliance/k8sconfig/loader.go +++ b/pkg/compliance/k8sconfig/loader.go @@ -166,6 +166,7 @@ func (l *loader) loadMeta(name string, loadContent bool) (string, os.FileInfo, [ if err != nil { l.pushError(err) } else { + defer f.Close() b, err = io.ReadAll(io.LimitReader(f, maxSize)) if err != nil { l.pushError(err) From 475913bc7f957314e39a09520ae5c7e010291ed5 Mon Sep 17 00:00:00 2001 From: Dustin Long Date: Mon, 18 Dec 2023 19:13:06 -0500 Subject: [PATCH 64/66] Test that the forwarder is unhealthy when the API key is invalid --- .../agent-subcommands/subcommands_test.go | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/test/new-e2e/tests/agent-subcommands/subcommands_test.go b/test/new-e2e/tests/agent-subcommands/subcommands_test.go index 1891fa3ec59839..de3202937389d5 100644 --- a/test/new-e2e/tests/agent-subcommands/subcommands_test.go +++ b/test/new-e2e/tests/agent-subcommands/subcommands_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/DataDog/datadog-agent/test/fakeintake/api" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" @@ -232,3 +233,27 @@ func (v *subcommandWithFakeIntakeSuite) TestDefaultInstallHealthy() { assert.NoError(v.T(), err) assert.Contains(v.T(), output, "Agent health: PASS") } + +func (v *subcommandWithFakeIntakeSuite) TestDefaultInstallUnhealthy() { + // the fakeintake says that any API key is invalid by sending a 403 code + override := api.ResponseOverride{ + Endpoint: "/api/v1/validate", + StatusCode: 403, + ContentType: "text/plain", + Body: []byte("invalid API key"), + } + v.Env().Fakeintake.Client.ConfigureOverride(override) + + // restart the agent, which validates the key using the fakeintake at startup + v.UpdateEnv(e2e.FakeIntakeStackDef( + e2e.WithAgentParams(agentparams.WithAgentConfig("log_level: info\n")), + )) + + // agent should be unhealthy because the key is invalid + _, err := v.Env().Agent.Health() + if err == nil { + assert.Fail(v.T(), "agent expected to be unhealthy, but no error found!") + return + } + assert.Contains(v.T(), err.Error(), "Agent health: FAIL") +} From 30261df670e843ee07fe2678512d53dfbcd53762 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Tue, 19 Dec 2023 16:00:42 +0100 Subject: [PATCH 65/66] [CWS] fix security go check (#21656) [CWS] fix security go check --- docs/cloud-workload-security/backend.md | 9 ++++++ .../rules/monitor/policy_monitor_easyjson.go | 32 +++++++++---------- tasks/security_agent.py | 2 +- 3 files changed, 26 insertions(+), 17 deletions(-) diff --git a/docs/cloud-workload-security/backend.md b/docs/cloud-workload-security/backend.md index 2408294824d253..2da537d3a897ed 100644 --- a/docs/cloud-workload-security/backend.md +++ b/docs/cloud-workload-security/backend.md @@ -221,6 +221,10 @@ CSM Threats logs have the following JSON schema: }, "type": "array", "description": "The list of rules that the event matched (only valid in the context of an anomaly)" + }, + "origin": { + "type": "string", + "description": "Origin of the event" } }, "additionalProperties": false, @@ -1758,6 +1762,10 @@ CSM Threats logs have the following JSON schema: }, "type": "array", "description": "The list of rules that the event matched (only valid in the context of an anomaly)" + }, + "origin": { + "type": "string", + "description": "Origin of the event" } }, "additionalProperties": false, @@ -1774,6 +1782,7 @@ CSM Threats logs have the following JSON schema: | `outcome` | Event outcome | | `async` | True if the event was asynchronous | | `matched_rules` | The list of rules that the event matched (only valid in the context of an anomaly) | +| `origin` | Origin of the event | ## `ExitEvent` diff --git a/pkg/security/rules/monitor/policy_monitor_easyjson.go b/pkg/security/rules/monitor/policy_monitor_easyjson.go index 7494866f9165ee..d03c2f281c6e6b 100644 --- a/pkg/security/rules/monitor/policy_monitor_easyjson.go +++ b/pkg/security/rules/monitor/policy_monitor_easyjson.go @@ -17,7 +17,7 @@ var ( _ easyjson.Marshaler ) -func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules(in *jlexer.Lexer, out *RulesetLoadedEvent) { +func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor(in *jlexer.Lexer, out *RulesetLoadedEvent) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -83,7 +83,7 @@ func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules(in *jlex in.Consumed() } } -func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules(out *jwriter.Writer, in RulesetLoadedEvent) { +func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor(out *jwriter.Writer, in RulesetLoadedEvent) { out.RawByte('{') first := true _ = first @@ -122,14 +122,14 @@ func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules(out *jwr // MarshalEasyJSON supports easyjson.Marshaler interface func (v RulesetLoadedEvent) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules(w, v) + easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *RulesetLoadedEvent) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules(l, v) + easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor(l, v) } -func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules1(in *jlexer.Lexer, out *RuleState) { +func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor1(in *jlexer.Lexer, out *RuleState) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -188,7 +188,7 @@ func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules1(in *jle in.Consumed() } } -func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules1(out *jwriter.Writer, in RuleState) { +func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor1(out *jwriter.Writer, in RuleState) { out.RawByte('{') first := true _ = first @@ -241,14 +241,14 @@ func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules1(out *jw // MarshalEasyJSON supports easyjson.Marshaler interface func (v RuleState) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules1(w, v) + easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor1(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *RuleState) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules1(l, v) + easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor1(l, v) } -func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules2(in *jlexer.Lexer, out *PolicyState) { +func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor2(in *jlexer.Lexer, out *PolicyState) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -314,7 +314,7 @@ func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules2(in *jle in.Consumed() } } -func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules2(out *jwriter.Writer, in PolicyState) { +func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor2(out *jwriter.Writer, in PolicyState) { out.RawByte('{') first := true _ = first @@ -358,14 +358,14 @@ func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules2(out *jw // MarshalEasyJSON supports easyjson.Marshaler interface func (v PolicyState) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules2(w, v) + easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor2(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *PolicyState) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules2(l, v) + easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor2(l, v) } -func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules3(in *jlexer.Lexer, out *HeartbeatEvent) { +func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor3(in *jlexer.Lexer, out *HeartbeatEvent) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -410,7 +410,7 @@ func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules3(in *jle in.Consumed() } } -func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules3(out *jwriter.Writer, in HeartbeatEvent) { +func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor3(out *jwriter.Writer, in HeartbeatEvent) { out.RawByte('{') first := true _ = first @@ -438,10 +438,10 @@ func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules3(out *jw // MarshalEasyJSON supports easyjson.Marshaler interface func (v HeartbeatEvent) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules3(w, v) + easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor3(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *HeartbeatEvent) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules3(l, v) + easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor3(l, v) } diff --git a/tasks/security_agent.py b/tasks/security_agent.py index c18673a684d42a..8b7d00502a29b9 100644 --- a/tasks/security_agent.py +++ b/tasks/security_agent.py @@ -708,7 +708,7 @@ def generate_cws_proto(ctx): def get_git_dirty_files(): - dirty_stats = check_output(["git", "status", "--porcelain=v1", "untracked-files=no"]).decode('utf-8') + dirty_stats = check_output(["git", "status", "--porcelain=v1", "--untracked-files=no"]).decode('utf-8') paths = [] # see https://git-scm.com/docs/git-status#_short_format for format documentation From 2618fb9dbe77462431f8bcd5e790d08706216721 Mon Sep 17 00:00:00 2001 From: maxime mouial Date: Tue, 19 Dec 2023 16:43:26 +0100 Subject: [PATCH 66/66] Updating codeowner to better reflect actual ownership (#21651) Updating codeowner to better reflect actual ownership --- .github/CODEOWNERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bcf2998e991e34..a2da6aad4999d5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -28,7 +28,7 @@ # Todo: is this file still needed? /Makefile.trace @DataDog/agent-platform -/release.json @DataDog/agent-platform @DataDog/agent-shared-components @DataDog/agent-metrics-logs @DataDog/windows-kernel-integrations +/release.json @DataDog/agent-platform @DataDog/agent-metrics-logs @DataDog/windows-kernel-integrations /requirements.txt @DataDog/agent-platform /pyproject.toml @DataDog/agent-platform /setup.cfg @DataDog/agent-platform @@ -340,7 +340,6 @@ /pkg/proto/datadog/trace @DataDog/agent-apm /pkg/remoteconfig/ @DataDog/remote-config /pkg/runtime/ @DataDog/agent-shared-components -/pkg/secrets/ @DataDog/agent-shared-components /pkg/serializer/ @DataDog/agent-metrics-logs /pkg/tagger/ @DataDog/container-integrations /pkg/tagset/ @DataDog/agent-shared-components