From 5e87efc96827414ef17254842baad83250990851 Mon Sep 17 00:00:00 2001 From: Hasan Mahmood <6599778+hmahmood@users.noreply.github.com> Date: Thu, 11 Apr 2024 14:22:27 -0500 Subject: [PATCH 01/99] [NPM-3239] Make local resolver address and pid to container caches persistent across updates (#24122) * Persist address and pid for containers in resolver cache * Add tests for max * Fix tests * Fix data races in tests * Add telemetry --- pkg/process/checks/net.go | 7 +- pkg/process/net/resolver/resolver.go | 119 +++++++--- pkg/process/net/resolver/resolver_test.go | 275 +++++++++++++++++++++- 3 files changed, 369 insertions(+), 32 deletions(-) diff --git a/pkg/process/checks/net.go b/pkg/process/checks/net.go index 8fb787d9dfb04..63177520f1ffc 100644 --- a/pkg/process/checks/net.go +++ b/pkg/process/checks/net.go @@ -32,6 +32,11 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/subscriptions" ) +const ( + maxResolverPidCacheSize = 32768 + maxResolverAddrCacheSize = 4096 +) + var ( // ErrTracerStillNotInitialized signals that the tracer is _still_ not ready, so we shouldn't log additional errors ErrTracerStillNotInitialized = errors.New("remote tracer is still not initialized") @@ -113,7 +118,7 @@ func (c *ConnectionsCheck) Init(syscfg *SysProbeConfig, hostInfo *HostInfo, _ bo c.processData.Register(c.serviceExtractor) // LocalResolver is a singleton LocalResolver - c.localresolver = resolver.NewLocalResolver(proccontainers.GetSharedContainerProvider(c.wmeta), clock.New()) + c.localresolver = resolver.NewLocalResolver(proccontainers.GetSharedContainerProvider(c.wmeta), clock.New(), maxResolverAddrCacheSize, maxResolverPidCacheSize) c.localresolver.Run() return nil diff --git a/pkg/process/net/resolver/resolver.go b/pkg/process/net/resolver/resolver.go index 1fd5395bdcc09..fc196bfe37f18 100644 --- a/pkg/process/net/resolver/resolver.go +++ b/pkg/process/net/resolver/resolver.go @@ -12,35 +12,55 @@ import ( "sync" "time" - "github.com/benbjohnson/clock" - model "github.com/DataDog/agent-payload/v5/process" + "github.com/benbjohnson/clock" + "go4.org/intern" procutil "github.com/DataDog/datadog-agent/pkg/process/util" proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" + "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" ) -const defaultTTL = 10 * time.Second -const cacheValidityNoRT = 2 * time.Second +const ( + cacheValidityNoRT = 2 * time.Second +) + +var resolverTelemetry = struct { + cacheSize telemetry.Gauge + cacheDrops telemetry.Counter +}{ + telemetry.NewGauge("net_local_resolver", "cache_size", []string{"cache"}, "Gauge for cache sizes"), + telemetry.NewCounter("net_local_resolver", "cache_drops", []string{"cache"}, "Gauge for cache drops"), +} + +type containerIDEntry struct { + cid *intern.Value + inUse bool +} // LocalResolver is responsible resolving the raddr of connections when they are local containers type LocalResolver struct { - mux sync.RWMutex - addrToCtrID map[model.ContainerAddr]string - ctrForPid map[int]string - updated time.Time + mux sync.Mutex + addrToCtrID map[model.ContainerAddr]*containerIDEntry + maxAddrToCtrIDSize int + ctrForPid map[int]*containerIDEntry + maxCtrForPidSize int lastContainerRates map[string]*proccontainers.ContainerRateMetrics Clock clock.Clock ContainerProvider proccontainers.ContainerProvider done chan bool } -func NewLocalResolver(containerProvider proccontainers.ContainerProvider, clock clock.Clock) *LocalResolver { +func NewLocalResolver(containerProvider proccontainers.ContainerProvider, clock clock.Clock, maxAddrCacheSize, maxPidCacheSize int) *LocalResolver { return &LocalResolver{ - ContainerProvider: containerProvider, - Clock: clock, - done: make(chan bool), + ContainerProvider: containerProvider, + Clock: clock, + done: make(chan bool), + addrToCtrID: make(map[model.ContainerAddr]*containerIDEntry), + maxAddrToCtrIDSize: maxAddrCacheSize, + ctrForPid: make(map[int]*containerIDEntry), + maxCtrForPidSize: maxPidCacheSize, } } @@ -59,10 +79,6 @@ func (l *LocalResolver) pullContainers(ticker *clock.Ticker) { for { select { case <-ticker.C: - var containers []*model.Container - var pidToCid map[int]string - var lastContainerRates map[string]*proccontainers.ContainerRateMetrics - containers, lastContainerRates, pidToCid, err := l.ContainerProvider.GetContainers(cacheValidityNoRT, l.lastContainerRates) if err == nil { l.lastContainerRates = lastContainerRates @@ -84,22 +100,50 @@ func (l *LocalResolver) LoadAddrs(containers []*model.Container, pidToCid map[in l.mux.Lock() defer l.mux.Unlock() - if time.Since(l.updated) < defaultTTL { - return + // mark everything not in use + for _, c := range l.addrToCtrID { + c.inUse = false + } + for _, c := range l.ctrForPid { + c.inUse = false } - l.updated = time.Now() - l.addrToCtrID = make(map[model.ContainerAddr]string) - l.ctrForPid = pidToCid +containersLoop: for _, ctr := range containers { for _, networkAddr := range ctr.Addresses { + if len(l.addrToCtrID) >= l.maxAddrToCtrIDSize { + log.Warnf("address to container ID cache has reached max size of %d entries", l.maxAddrToCtrIDSize) + resolverTelemetry.cacheDrops.Inc("addr_cache") + break containersLoop + } + parsedAddr := procutil.AddressFromString(networkAddr.Ip) if parsedAddr.IsLoopback() { continue } - l.addrToCtrID[*networkAddr] = ctr.Id + l.addrToCtrID[*networkAddr] = &containerIDEntry{ + cid: intern.GetByString(ctr.Id), + inUse: true, + } } } + + resolverTelemetry.cacheSize.Set(float64(len(l.addrToCtrID)), "addr_cache") + + for pid, cid := range pidToCid { + if len(l.ctrForPid) >= l.maxCtrForPidSize { + log.Warnf("pid to container ID cache has reached max size of %d entries", l.maxCtrForPidSize) + resolverTelemetry.cacheDrops.Inc("pid_cache") + break + } + + l.ctrForPid[pid] = &containerIDEntry{ + cid: intern.GetByString(cid), + inUse: true, + } + } + + resolverTelemetry.cacheSize.Set(float64(len(l.ctrForPid)), "pid_cache") } // Resolve binds container IDs to the Raddr of connections @@ -122,8 +166,25 @@ func (l *LocalResolver) LoadAddrs(containers []*model.Container, pidToCid map[in // If lookup by table fails above, we fall back to using // the l.addrToCtrID map func (l *LocalResolver) Resolve(c *model.Connections) { - l.mux.RLock() - defer l.mux.RUnlock() + l.mux.Lock() + defer l.mux.Unlock() + + defer func() { + // remove all not in use entries + for pid, ctr := range l.ctrForPid { + if !ctr.inUse { + delete(l.ctrForPid, pid) + } + } + for addr, ctr := range l.addrToCtrID { + if !ctr.inUse { + delete(l.addrToCtrID, addr) + } + } + + resolverTelemetry.cacheSize.Set(float64(len(l.ctrForPid)), "pid_cache") + resolverTelemetry.cacheSize.Set(float64(len(l.addrToCtrID)), "addr_cache") + }() type connKey struct { laddr, raddr netip.AddrPort @@ -144,7 +205,9 @@ func (l *LocalResolver) Resolve(c *model.Connections) { // first cid := conn.Laddr.ContainerId if cid == "" { - cid = l.ctrForPid[int(conn.Pid)] + if v, ok := l.ctrForPid[int(conn.Pid)]; ok { + cid = v.cid.Get().(string) + } } if cid == "" { @@ -220,11 +283,13 @@ func (l *LocalResolver) Resolve(c *model.Connections) { } } - if conn.Raddr.ContainerId = l.addrToCtrID[model.ContainerAddr{ + if v, ok := l.addrToCtrID[model.ContainerAddr{ Ip: raddr.Addr().String(), Port: int32(raddr.Port()), Protocol: conn.Type, - }]; conn.Raddr.ContainerId == "" { + }]; ok { + conn.Raddr.ContainerId = v.cid.Get().(string) + } else { log.Tracef("could not resolve raddr %v", conn.Raddr) } } diff --git a/pkg/process/net/resolver/resolver_test.go b/pkg/process/net/resolver/resolver_test.go index 2f2066eaef574..5e3b3ed33d172 100644 --- a/pkg/process/net/resolver/resolver_test.go +++ b/pkg/process/net/resolver/resolver_test.go @@ -9,18 +9,18 @@ import ( "testing" "time" + model "github.com/DataDog/agent-payload/v5/process" "github.com/benbjohnson/clock" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - model "github.com/DataDog/agent-payload/v5/process" proccontainersmocks "github.com/DataDog/datadog-agent/pkg/process/util/containers/mocks" ) func TestLocalResolver(t *testing.T) { assert := assert.New(t) - resolver := &LocalResolver{} + resolver := NewLocalResolver(nil, nil, 10, 0) containers := []*model.Container{ { Id: "container-1", @@ -372,7 +372,7 @@ func TestResolveLoopbackConnections(t *testing.T) { }, } - resolver := &LocalResolver{} + resolver := NewLocalResolver(nil, nil, 20, 20) resolver.LoadAddrs(nil, map[int]string{ 1: "foo1", 2: "foo2", @@ -406,7 +406,7 @@ func TestLocalResolverPeriodicUpdates(t *testing.T) { mockCtrl := gomock.NewController(t) mockedClock := clock.NewMock() mockContainerProvider := proccontainersmocks.NewMockContainerProvider(mockCtrl) - resolver := NewLocalResolver(mockContainerProvider, mockedClock) + resolver := NewLocalResolver(mockContainerProvider, mockedClock, 10, 10) containers := []*model.Container{ { Id: "container-1", @@ -490,3 +490,270 @@ func TestLocalResolverPeriodicUpdates(t *testing.T) { assert.Equal("container-2", connections.Conns[2].Raddr.ContainerId) assert.Equal("container-3", connections.Conns[3].Raddr.ContainerId) } + +func TestLocalResolverCachePersistence(t *testing.T) { + assert := assert.New(t) + mockCtrl := gomock.NewController(t) + mockedClock := clock.NewMock() + mockContainerProvider := proccontainersmocks.NewMockContainerProvider(mockCtrl) + resolver := NewLocalResolver(mockContainerProvider, mockedClock, 10, 10) + containers := []*model.Container{ + { + Id: "container-1", + Addresses: []*model.ContainerAddr{ + { + Ip: "10.0.2.15", + Port: 32769, + Protocol: model.ConnectionType_tcp, + }, + { + Ip: "172.17.0.4", + Port: 6379, + Protocol: model.ConnectionType_tcp, + }, + }, + }, + { + Id: "container-2", + Addresses: []*model.ContainerAddr{ + { + Ip: "172.17.0.2", + Port: 80, + Protocol: model.ConnectionType_tcp, + }, + }, + }, + { + Id: "container-3", + Addresses: []*model.ContainerAddr{ + { + Ip: "10.0.2.15", + Port: 32769, + Protocol: model.ConnectionType_udp, + }, + }, + }, + } + mockContainerProvider.EXPECT().GetContainers(2*time.Second, nil).Return(containers, nil, nil, nil).MaxTimes(1) + resolver.Run() + mockedClock.Add(11 * time.Second) + + func() { + resolver.mux.Lock() + defer resolver.mux.Unlock() + + assert.Len(resolver.addrToCtrID, 4) + for _, cid := range resolver.addrToCtrID { + assert.True(cid.inUse) + } + }() + + connections := &model.Connections{ + Conns: []*model.Connection{ + // connection 0 + { + Type: model.ConnectionType_tcp, + Raddr: &model.Addr{ + Ip: "10.0.2.15", + Port: 32769, + }, + }, + // connection 1 + { + Type: model.ConnectionType_tcp, + Raddr: &model.Addr{ + Ip: "172.17.0.4", + Port: 6379, + }, + }, + // connection 2 + { + Type: model.ConnectionType_tcp, + Raddr: &model.Addr{ + Ip: "172.17.0.2", + Port: 80, + }, + }, + // connection 3 + { + Type: model.ConnectionType_udp, + Raddr: &model.Addr{ + Ip: "10.0.2.15", + Port: 32769, + }, + }, + }, + } + resolver.Resolve(connections) + assert.Equal("container-1", connections.Conns[0].Raddr.ContainerId) + assert.Equal("container-1", connections.Conns[1].Raddr.ContainerId) + assert.Equal("container-2", connections.Conns[2].Raddr.ContainerId) + assert.Equal("container-3", connections.Conns[3].Raddr.ContainerId) + + func() { + // have to lock here otherwise + // we get data race errors + resolver.mux.Lock() + defer resolver.mux.Unlock() + + assert.Len(resolver.addrToCtrID, 4) + for _, cid := range resolver.addrToCtrID { + assert.True(cid.inUse) + } + }() + + // now do another container update but with the entries + // for container-1 missing + containers = []*model.Container{ + { + Id: "container-2", + Addresses: []*model.ContainerAddr{ + { + Ip: "172.17.0.2", + Port: 80, + Protocol: model.ConnectionType_tcp, + }, + }, + }, + { + Id: "container-3", + Addresses: []*model.ContainerAddr{ + { + Ip: "10.0.2.15", + Port: 32769, + Protocol: model.ConnectionType_udp, + }, + }, + }, + } + + mockContainerProvider.EXPECT().GetContainers(2*time.Second, nil).Return(containers, nil, nil, nil) + mockedClock.Add(10 * time.Second) + + // still should have 4 entries in the addr cache, + // missing entries should be just marked as not + // in use + func() { + resolver.mux.Lock() + defer resolver.mux.Unlock() + + assert.Len(resolver.addrToCtrID, 4) + }() + + missingAddrs := []model.ContainerAddr{ + { + Ip: "10.0.2.15", + Port: 32769, + Protocol: model.ConnectionType_tcp, + }, + { + Ip: "172.17.0.4", + Port: 6379, + Protocol: model.ConnectionType_tcp, + }, + } + + // verify the missing address entries were marked + // as not in use + func() { + resolver.mux.Lock() + defer resolver.mux.Unlock() + + addrLoop: + for addr, cid := range resolver.addrToCtrID { + for _, missing := range missingAddrs { + if missing == addr { + assert.False(cid.inUse) + break addrLoop + } + } + + assert.True(cid.inUse) + } + }() + + // all connections should still resolve since we haven't removed + // the not in use entries yet + resolver.Resolve(connections) + assert.Equal("container-1", connections.Conns[0].Raddr.ContainerId) + assert.Equal("container-1", connections.Conns[1].Raddr.ContainerId) + assert.Equal("container-2", connections.Conns[2].Raddr.ContainerId) + assert.Equal("container-3", connections.Conns[3].Raddr.ContainerId) + + func() { + resolver.mux.Lock() + defer resolver.mux.Unlock() + + // the not in use entries should have been removed + for _, missing := range missingAddrs { + assert.NotContains(resolver.addrToCtrID, missing) + } + }() +} + +func TestLocalResolverCacheLimits(t *testing.T) { + assert := assert.New(t) + mockCtrl := gomock.NewController(t) + mockedClock := clock.NewMock() + mockContainerProvider := proccontainersmocks.NewMockContainerProvider(mockCtrl) + resolver := NewLocalResolver(mockContainerProvider, mockedClock, 1, 1) + containers := []*model.Container{ + { + Id: "container-1", + Addresses: []*model.ContainerAddr{ + { + Ip: "10.0.2.15", + Port: 32769, + Protocol: model.ConnectionType_tcp, + }, + { + Ip: "172.17.0.4", + Port: 6379, + Protocol: model.ConnectionType_tcp, + }, + }, + }, + { + Id: "container-2", + Addresses: []*model.ContainerAddr{ + { + Ip: "172.17.0.2", + Port: 80, + Protocol: model.ConnectionType_tcp, + }, + }, + }, + { + Id: "container-3", + Addresses: []*model.ContainerAddr{ + { + Ip: "10.0.2.15", + Port: 32769, + Protocol: model.ConnectionType_udp, + }, + }, + }, + } + pidToCid := map[int]string{ + 1: "container-1", + 2: "container-1", + } + + mockContainerProvider.EXPECT().GetContainers(2*time.Second, nil).Return(containers, nil, pidToCid, nil).MaxTimes(1) + resolver.Run() + mockedClock.Add(11 * time.Second) + + func() { + resolver.mux.Lock() + defer resolver.mux.Unlock() + + assert.Len(resolver.addrToCtrID, 1) + assert.Contains(resolver.addrToCtrID, model.ContainerAddr{ + Ip: "10.0.2.15", + Port: 32769, + Protocol: model.ConnectionType_tcp, + }) + + assert.Len(resolver.ctrForPid, 1) + }() +} From 74fae3f4afeb77720e6584cccbbe853b5c9d677d Mon Sep 17 00:00:00 2001 From: Arthur Bellal Date: Thu, 11 Apr 2024 23:27:09 +0200 Subject: [PATCH 02/99] (fleet) fix macos tests (#24621) https://github.com/DataDog/datadog-agent/pull/24494 broke macos tests. This PR FiXeS it by shortening the tests name as it was leading to paths over golang's limit of 104 chars on macos: ``` could not create telemetry: listen unix /var/folders/24/8k48jl6d249_n_qfxwsl6xvm0000gn/T/TestUpdaterBootstrapDefault399540277/004/telemetry.sock: bind: invalid argument ``` --- pkg/updater/install_test.go | 2 +- pkg/updater/updater_test.go | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/updater/install_test.go b/pkg/updater/install_test.go index fc51044119a36..49c8080608562 100644 --- a/pkg/updater/install_test.go +++ b/pkg/updater/install_test.go @@ -125,7 +125,7 @@ func TestInstallExperiment(t *testing.T) { assertEqualFS(t, s.ConfigFS(fixtureSimpleV2), installer.ConfigFS(fixtureSimpleV2)) } -func TestPromoteExperiment(t *testing.T) { +func TestInstallPromoteExperiment(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() installer := newTestInstaller(t) diff --git a/pkg/updater/updater_test.go b/pkg/updater/updater_test.go index 517f3e7ad09a5..f39273828eb86 100644 --- a/pkg/updater/updater_test.go +++ b/pkg/updater/updater_test.go @@ -99,7 +99,7 @@ func newTestUpdaterWithPaths(t *testing.T, s *testFixturesServer, rcc *testRemot return u, rootPath, locksPath } -func TestUpdaterBootstrapDefault(t *testing.T) { +func TestBootstrapDefault(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() @@ -116,7 +116,7 @@ func TestUpdaterBootstrapDefault(t *testing.T) { assertEqualFS(t, s.PackageFS(fixtureSimpleV1), r.StableFS()) } -func TestUpdaterBootstrapURL(t *testing.T) { +func TestBootstrapURL(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() @@ -133,7 +133,7 @@ func TestUpdaterBootstrapURL(t *testing.T) { assertEqualFS(t, s.PackageFS(fixtureSimpleV1), r.StableFS()) } -func TestUpdaterPurge(t *testing.T) { +func TestPurge(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() @@ -177,7 +177,7 @@ func assertDirExistAndEmpty(t *testing.T, path string) { assert.Len(t, entry, 0) } -func TestUpdaterBootstrapWithRC(t *testing.T) { +func TestBootstrapWithRC(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() @@ -199,7 +199,7 @@ func TestUpdaterBootstrapWithRC(t *testing.T) { assertEqualFS(t, s.PackageFS(fixtureSimpleV2), r.StableFS()) } -func TestUpdaterBootstrapCatalogUpdate(t *testing.T) { +func TestBootstrapCatalogUpdate(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() @@ -213,7 +213,7 @@ func TestUpdaterBootstrapCatalogUpdate(t *testing.T) { assert.NoError(t, err) } -func TestUpdaterStartExperiment(t *testing.T) { +func TestStartExperiment(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() @@ -241,7 +241,7 @@ func TestUpdaterStartExperiment(t *testing.T) { assertEqualFS(t, s.PackageFS(fixtureSimpleV2), r.ExperimentFS()) } -func TestUpdaterPromoteExperiment(t *testing.T) { +func TestPromoteExperiment(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() @@ -278,7 +278,7 @@ func TestUpdaterPromoteExperiment(t *testing.T) { assertEqualFS(t, s.PackageFS(fixtureSimpleV2), r.StableFS()) } -func TestUpdaterStopExperiment(t *testing.T) { +func TestStopExperiment(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() From 964d2b8b5c1fbe5ca8dc1a6bc5bdad2ed0135eb2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Apr 2024 02:07:58 +0000 Subject: [PATCH 03/99] Bump ts-graphviz/setup-graphviz from 1 to 2 (#22589) Co-authored-by: duncanpharvey <35278470+duncanpharvey@users.noreply.github.com> --- .github/workflows/serverless-binary-size.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/serverless-binary-size.yml b/.github/workflows/serverless-binary-size.yml index b7b6aca91cf80..8432b780d3106 100644 --- a/.github/workflows/serverless-binary-size.yml +++ b/.github/workflows/serverless-binary-size.yml @@ -77,7 +77,7 @@ jobs: ### Steps below only run if size diff > SIZE_ALLOWANCE ### - name: Install graphviz - uses: ts-graphviz/setup-graphviz@v1 + uses: ts-graphviz/setup-graphviz@v2 if: steps.compare.outputs.diff > env.SIZE_ALLOWANCE - name: Install digraph From 9d5da021a6a998e00f3241eb6230b23c80a4646a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 12 Apr 2024 07:42:46 +0000 Subject: [PATCH 04/99] CWS: sync BTFhub constants (#24625) Co-authored-by: --- .../probe/constantfetch/btfhub/constants.json | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants.json index 913ed3b7b9de6..4da99a3c0375a 100644 --- a/pkg/security/probe/constantfetch/btfhub/constants.json +++ b/pkg/security/probe/constantfetch/btfhub/constants.json @@ -1,5 +1,5 @@ { - "commit": "c16f25544261a7a848c39c47693e5cb27d9283c1", + "commit": "12621ce1b438e29b9172cf8d9d4440efb487109c", "constants": [ { "binprm_file_offset": 168, @@ -14687,6 +14687,13 @@ "uname_release": "4.14.35-2047.534.3.el7uek.aarch64", "cindex": 89 }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.535.2.1.el7uek.aarch64", + "cindex": 89 + }, { "distrib": "ol", "version": "7", @@ -19867,6 +19874,13 @@ "uname_release": "4.14.35-2047.535.1.el7uek.x86_64", "cindex": 97 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.535.2.1.el7uek.x86_64", + "cindex": 97 + }, { "distrib": "ol", "version": "7", @@ -19881,6 +19895,13 @@ "uname_release": "4.14.35-2047.536.1.el7uek.x86_64", "cindex": 97 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.536.3.el7uek.x86_64", + "cindex": 97 + }, { "distrib": "ol", "version": "7", From 51ce82155fa23581d3e7d006f5ad83338b3604e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Mathieu?= Date: Fri, 12 Apr 2024 09:53:37 +0200 Subject: [PATCH 05/99] logs: proper stop sequence for the processor to delete the SDS instance. (#24615) * logs: proper stop sequence for the processor to delete the SDS instance. * Add an unit test reproducing the race which has been fixed. --- pkg/logs/processor/processor.go | 6 ++- pkg/logs/sds/scanner.go | 10 +++- pkg/logs/sds/scanner_test.go | 94 +++++++++++++++++++++++++++++++++ 3 files changed, 106 insertions(+), 4 deletions(-) diff --git a/pkg/logs/processor/processor.go b/pkg/logs/processor/processor.go index d51b2a4336292..f58f4deac77cf 100644 --- a/pkg/logs/processor/processor.go +++ b/pkg/logs/processor/processor.go @@ -65,12 +65,14 @@ func (p *Processor) Start() { // Stop stops the Processor, // this call blocks until inputChan is flushed func (p *Processor) Stop() { + close(p.inputChan) + <-p.done + // once the processor mainloop is not running, it's safe + // to delete the sds scanner instance. if p.sds != nil { p.sds.Delete() p.sds = nil } - close(p.inputChan) - <-p.done } // Flush processes synchronously the messages that this processor has to process. diff --git a/pkg/logs/sds/scanner.go b/pkg/logs/sds/scanner.go index ea665698a6f83..64d874ea9a894 100644 --- a/pkg/logs/sds/scanner.go +++ b/pkg/logs/sds/scanner.go @@ -29,8 +29,11 @@ const SDSEnabled = true // has to ensure of the thread safety. type Scanner struct { *sds.Scanner + // lock used to separate between the lifecycle of the scanner (Reconfigure, Delete) + // and the use of the scanner (Scan). sync.Mutex - + // standard rules as received through the remote configuration, indexed + // by the standard rule ID for O(1) access when receiving user configurations. standardRules map[string]StandardRuleConfig // rawConfig is the raw config previously received through RC. rawConfig []byte @@ -287,8 +290,11 @@ func (s *Scanner) GetRuleByIdx(idx uint32) (RuleConfig, error) { } // Delete deallocates the internal SDS scanner. -// This method is NOT thread safe, caller has to ensure the thread safety. +// This method is thread safe, a reconfiguration or a scan can't happen at the same time. func (s *Scanner) Delete() { + s.Lock() + defer s.Unlock() + if s.Scanner != nil { s.Scanner.Delete() s.rawConfig = nil diff --git a/pkg/logs/sds/scanner_test.go b/pkg/logs/sds/scanner_test.go index 87bc2c16c31b7..0b36e57e0286c 100644 --- a/pkg/logs/sds/scanner_test.go +++ b/pkg/logs/sds/scanner_test.go @@ -11,6 +11,7 @@ package sds import ( "bytes" "testing" + "time" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/stretchr/testify/require" @@ -359,3 +360,96 @@ func TestScan(t *testing.T) { require.Equal(string(processed), v.event, "incorrect result") } } + +// TestCloseCycleScan validates that the close cycle works well (not blocking, not racing). +// by trying hard to reproduce a possible race on close. +func TestCloseCycleScan(t *testing.T) { + require := require.New(t) + + standardRules := []byte(` + {"priority":1,"rules":[ + { + "id":"zero-0", + "description":"zero desc", + "name":"zero", + "pattern":"zero" + } + ]} + `) + agentConfig := []byte(` + {"is_enabled":true,"rules":[ + { + "id":"random-00000", + "definition":{"standard_rule_id":"zero-0"}, + "name":"zero", + "match_action":{"type":"Redact","placeholder":"[redacted]"}, + "is_enabled":true + },{ + "id":"random-11111", + "definition":{"standard_rule_id":"zero-0"}, + "name":"one", + "match_action":{"type":"Redact","placeholder":"[REDACTED]"}, + "is_enabled":true + } + ]} + `) + + // scanner creation + // ----- + + for i := 0; i < 10; i++ { + s := CreateScanner() + require.NotNil(s, "the returned scanner should not be nil") + + _ = s.Reconfigure(ReconfigureOrder{ + Type: StandardRules, + Config: standardRules, + }) + _ = s.Reconfigure(ReconfigureOrder{ + Type: AgentConfig, + Config: agentConfig, + }) + + require.True(s.IsReady(), "at this stage, the scanner should be considered ready") + type result struct { + matched bool + event string + matchCount int + } + + tests := map[string]result{ + "one two three go!": { + matched: true, + event: "[REDACTED] two three go!", + matchCount: 1, + }, + "after zero comes one, after one comes two, and the rest is history": { + matched: true, + event: "after [redacted] comes [REDACTED], after [REDACTED] comes two, and the rest is history", + matchCount: 3, + }, + "and so we go": { + matched: false, + event: "", + matchCount: 0, + }, + } + + go func() { + for { + for k, _ := range tests { + msg := message.Message{} + if s.IsReady() { + _, _, err := s.Scan([]byte(k), &msg) + require.NoError(err) + } else { + return + } + } + } + }() + + time.Sleep(100 * time.Millisecond) + s.Delete() + } +} From 9dfef01f50fe15d21f8dff881e8eb546d9cc9c9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillermo=20Juli=C3=A1n?= Date: Fri, 12 Apr 2024 10:25:07 +0200 Subject: [PATCH 06/99] [EBPF] Minor fixes for KMT system-probe build (#24598) * Ensure binaries on /root are accesible * Use correct SSH options on instances * Fix SSH names when multiple domains have the same tag * Fix paths in system-probe.build * Add kmt.tmux task * Use all vms by default in kmt.build * Document kmt.tmux --- tasks/kernel_matrix_testing/README.md | 6 ++ tasks/kernel_matrix_testing/compiler.py | 1 + tasks/kmt.py | 77 ++++++++++++++++++++++--- 3 files changed, 76 insertions(+), 8 deletions(-) diff --git a/tasks/kernel_matrix_testing/README.md b/tasks/kernel_matrix_testing/README.md index 27c65a125271e..9cb41aec1a763 100644 --- a/tasks/kernel_matrix_testing/README.md +++ b/tasks/kernel_matrix_testing/README.md @@ -143,6 +143,12 @@ Then connect to the VM as follows ssh -i /home/kernel-version-testing/ddvm_rsa -o StrictHostKeyChecking=no root@ ``` +#### Connecting to all VMs with tmux + +You can connect to all VMs at once using the `kmt.tmux` task. It will automatically create a new session for your stack (deleting it if it already exists), will open a new window for each instance, and a new panel for each VM in the window. + +A useful command for tmux in these cases is `:set synchronize-panes on`, which will send the same command to all panes at once. This is useful for running the same command in all VMs at once, specially running system-probe all at once. + ### Destroy stack Tear down the stack diff --git a/tasks/kernel_matrix_testing/compiler.py b/tasks/kernel_matrix_testing/compiler.py index d9a348803e3ca..91ff9d1bfdb68 100644 --- a/tasks/kernel_matrix_testing/compiler.py +++ b/tasks/kernel_matrix_testing/compiler.py @@ -119,6 +119,7 @@ def start(self) -> None: f"chown {uid}:{gid} {CONTAINER_AGENT_PATH} && chown -R {uid}:{gid} {CONTAINER_AGENT_PATH}", user="root" ) + self.exec("chmod a+rx /root", user="root") # Some binaries will be in /root and need to be readable self.exec("apt install sudo", user="root") self.exec("usermod -aG sudo compiler && echo 'compiler ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers", user="root") self.exec("echo conda activate ddpy3 >> /home/compiler/.bashrc", user="compiler") diff --git a/tasks/kmt.py b/tasks/kmt.py index 642988faff1a7..d7614ccc73a36 100644 --- a/tasks/kmt.py +++ b/tasks/kmt.py @@ -430,6 +430,10 @@ def tests_archive(self): def tools(self): return self.root / self.arch / "tools" + @property + def shared_archive(self): + return self.arch_dir / "shared.tar" + def build_tests_package(ctx: Context, source_dir: str, stack: str, arch: Arch, ci: bool, verbose=True): paths = KMTPaths(stack, arch) @@ -690,7 +694,7 @@ def test( @task( help={ - "vms": "Comma seperated list of vms to target when running tests", + "vms": "Comma seperated list of vms to target when running tests. If None, use all VMs", "stack": "Stack in which the VMs exist. If not provided stack is autogenerated based on branch name", "ssh-key": "SSH key to use for connecting to a remote EC2 instance hosting the target VM. Can be either a name of a file in ~/.ssh, a key name (the comment in the public key) or a full path", "full-rebuild": "Do a full rebuild of all test dependencies to share with VMs, before running tests. Useful when changes are not being picked up correctly", @@ -700,7 +704,7 @@ def test( ) def build( ctx: Context, - vms: str, + vms: Optional[str] = None, stack: Optional[str] = None, ssh_key: Optional[str] = None, full_rebuild=False, @@ -715,6 +719,10 @@ def build( if arch is None: arch = "local" + if vms is None: + vms = ",".join(stacks.get_all_vms_in_stack(stack)) + info(f"[+] Running tests on all VMs in stack {stack}: vms={vms}") + arch = full_arch(arch) paths = KMTPaths(stack, arch) paths.arch_dir.mkdir(parents=True, exist_ok=True) @@ -742,21 +750,21 @@ def build( d.run_cmd(ctx, f"/root/fetch_dependencies.sh {arch_mapping[platform.machine()]}") info(f"[+] Dependencies shared with target VM {d}") - shared_archive = os.path.join(CONTAINER_AGENT_PATH, os.path.relpath(paths.arch_dir / "shared.tar", paths.repo_root)) + shared_archive_rel = os.path.join(CONTAINER_AGENT_PATH, os.path.relpath(paths.shared_archive, paths.repo_root)) cc.exec( f"cd {CONTAINER_AGENT_PATH} && git config --global --add safe.directory {CONTAINER_AGENT_PATH} && inv -e system-probe.build --no-bundle", ) - cc.exec(f"tar cf {shared_archive} {EMBEDDED_SHARE_DIR}") + cc.exec(f"tar cf {shared_archive_rel} {EMBEDDED_SHARE_DIR}") if not os.path.exists(system_probe_yaml): raise Exit(f"file {system_probe_yaml} not found") for d in domains: d.copy(ctx, "./bin/system-probe", "/root") - d.copy(ctx, shared_archive, "/") + d.copy(ctx, paths.shared_archive, "/") d.run_cmd(ctx, "tar xf /shared.tar -C /", verbose=verbose) - d.run_cmd(ctx, "mkdir /opt/datadog-agent/run") - d.run_cmd(ctx, "mkdir /etc/datadog-agent") + d.run_cmd(ctx, "mkdir -p /opt/datadog-agent/run") + d.run_cmd(ctx, "mkdir -p /etc/datadog-agent") d.copy(ctx, DEFAULT_CONFIG_PATH, "/etc/datadog-agent/system-probe.yaml") info(f"[+] system-probe built for {d.name} @ /root") @@ -831,10 +839,21 @@ def ssh_config( if instance.ssh_key_path is not None: print(f" IdentityFile {instance.ssh_key_path}") print(" IdentitiesOnly yes") + for key, value in SSH_OPTIONS.items(): + print(f" {key} {value}") print("") + multiple_instances_with_same_tag = len({i.tag for i in instance.microvms}) != len(instance.microvms) + for domain in instance.microvms: - print(f"Host kmt-{stack_name}-{instance.arch}-{domain.tag}") + domain_name = domain.tag + if multiple_instances_with_same_tag: + id_parts = domain.name.split('-') + mem = id_parts[-1] + cpu = id_parts[-2] + domain_name += f"-mem{mem}-cpu{cpu}" + + print(f"Host kmt-{stack_name}-{instance.arch}-{domain_name}") print(f" HostName {domain.ip}") if instance.arch != "local": print(f" ProxyJump kmt-{stack_name}-{instance.arch}") @@ -1123,3 +1142,45 @@ def groupby_arch_comp(job: KMTTestRunJob) -> Tuple[str, str]: headers=["Distro", "Login prompt found", "setup-ddvm ok", "Assigned IP", "Downloaded boot log"], ) ) + + +@task() +def tmux(ctx: Context, stack: Optional[str] = None): + """Create a tmux session with panes for each VM in the stack. + + Note that this task requires the tmux command to be available on the system, and the SSH + config to have been generated with the kmt.ssh-config task. + """ + stack = check_and_get_stack(stack) + stack_name = stack.replace('-ddvm', '') + + ctx.run(f"tmux kill-session -t kmt-{stack_name} || true") + ctx.run(f"tmux new-session -d -s kmt-{stack_name}") + + for i, (_, instance) in enumerate(build_infrastructure(stack, try_get_ssh_key(ctx, None)).items()): + window_name = instance.arch + if i == 0: + ctx.run(f"tmux rename-window -t kmt-{stack_name} {window_name}") + else: + ctx.run(f"tmux new-window -t kmt-{stack_name} -n {window_name}") + + multiple_instances_with_same_tag = len({i.tag for i in instance.microvms}) != len(instance.microvms) + + needs_split = False + for domain in instance.microvms: + domain_name = domain.tag + if multiple_instances_with_same_tag: + id_parts = domain.name.split('-') + mem = id_parts[-1] + cpu = id_parts[-2] + domain_name += f"-mem{mem}-cpu{cpu}" + ssh_name = f"kmt-{stack_name}-{instance.arch}-{domain_name}" + + if needs_split: + ctx.run(f"tmux split-window -h -t kmt-{stack_name}:{i}") + needs_split = True + + ctx.run(f"tmux send-keys -t kmt-{stack_name}:{i} 'ssh {ssh_name}' Enter") + ctx.run(f"tmux select-layout -t kmt-{stack_name}:{i} tiled") + + info(f"[+] Tmux session kmt-{stack_name} created. Attach with 'tmux attach -t kmt-{stack_name}'") From 295c091bb91a8e74ec1cdd3f71fce53db58385ca Mon Sep 17 00:00:00 2001 From: tbavelier <97530782+tbavelier@users.noreply.github.com> Date: Fri, 12 Apr 2024 11:32:49 +0200 Subject: [PATCH 07/99] [Podman] Supports SQLite containers database back-end to support newer versions (4.8+) (#24373) * go import + licenses + release note * sqlite client + modifies wlm podman init * new podman detection and config * Doc suggestion on release note formatting Co-authored-by: Alicia Scott * Use pure-go SQLite driver instead of CGo implementatioon * Increase max_dsd binary size to 42 MB --------- Co-authored-by: Alicia Scott --- .../collectors/internal/podman/podman.go | 48 +++++++- pkg/config/config_template.yaml | 5 +- pkg/config/env/environment_containers.go | 11 +- pkg/config/setup/config.go | 2 +- pkg/util/podman/sqlite_db_client.go | 114 ++++++++++++++++++ ...lite-backend-support-8437c6d5254b39ef.yaml | 6 + tasks/dogstatsd.py | 2 +- 7 files changed, 178 insertions(+), 10 deletions(-) create mode 100644 pkg/util/podman/sqlite_db_client.go create mode 100644 releasenotes/notes/podman-sqlite-backend-support-8437c6d5254b39ef.yaml diff --git a/comp/core/workloadmeta/collectors/internal/podman/podman.go b/comp/core/workloadmeta/collectors/internal/podman/podman.go index ba7f13f7dcd8a..a5b3b23a68e6e 100644 --- a/comp/core/workloadmeta/collectors/internal/podman/podman.go +++ b/comp/core/workloadmeta/collectors/internal/podman/podman.go @@ -11,6 +11,7 @@ package podman import ( "context" "errors" + "os" "sort" "strings" @@ -25,8 +26,10 @@ import ( ) const ( - collectorID = "podman" - componentName = "workloadmeta-podman" + collectorID = "podman" + componentName = "workloadmeta-podman" + defaultBoltDBPath = "/var/lib/containers/storage/libpod/bolt_state.db" + defaultSqlitePath = "/var/lib/containers/storage/db.sql" ) type podmanClient interface { @@ -63,7 +66,38 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error return dderrors.NewDisabled(componentName, "Podman not detected") } - c.client = podman.NewDBClient(config.Datadog.GetString("podman_db_path")) + var dbPath string + dbPath = config.Datadog.GetString("podman_db_path") + + // We verify the user-provided path exists to prevent the collector entering a failing loop. + if dbPath != "" && !dbIsAccessible(dbPath) { + return dderrors.NewDisabled(componentName, "podman_db_path is misconfigured/not accessible") + } + + // If dbPath is empty (default value of `podman_db_path`), attempts to use the default rootfull database (BoltDB first, then SQLite) as podman feature was detected (existence of /var/lib/containers/storage) + if dbPath == "" { + if dbIsAccessible(defaultBoltDBPath) { + log.Infof("Podman feature detected and podman_db_path not configured, defaulting to: %s", defaultBoltDBPath) + dbPath = defaultBoltDBPath + } else if dbIsAccessible(defaultSqlitePath) { + log.Infof("Podman feature detected and podman_db_path not configured, defaulting to: %s", defaultSqlitePath) + dbPath = defaultSqlitePath + } else { + // `/var/lib/containers/storage` exists but the Agent cannot list out its content. + return dderrors.NewDisabled(componentName, "Podman feature detected but the default location for the containers DB is not accessible") + } + } + + // As the containers database file is hard-coded in Podman (non-user customizable), the client to use is determined thanks to the file extension. + if strings.HasSuffix(dbPath, ".sql") { + log.Debugf("Using SQLite client for Podman DB as provided path ends with .sql") + c.client = podman.NewSQLDBClient(dbPath) + } else if strings.HasSuffix(dbPath, ".db") { + log.Debugf("Using BoltDB client for Podman DB as provided path ends with .db") + c.client = podman.NewDBClient(dbPath) + } else { + return dderrors.NewDisabled(componentName, "Podman detected but podman_db_path does not end in a known-format (.db or .sql)") + } c.store = store return nil @@ -270,3 +304,11 @@ func status(state podman.ContainerStatus) workloadmeta.ContainerStatus { return workloadmeta.ContainerStatusUnknown } + +// dbIsAccessible verifies whether or not the provided file is accessible by the Agent +func dbIsAccessible(dbPath string) bool { + if _, err := os.Stat(dbPath); err == nil { + return true + } + return false +} diff --git a/pkg/config/config_template.yaml b/pkg/config/config_template.yaml index 7a6494a28bb8b..fb2276819bbab 100644 --- a/pkg/config/config_template.yaml +++ b/pkg/config/config_template.yaml @@ -2857,10 +2857,11 @@ api_key: # # listen_address: /var/vcap/data/garden/garden.sock -## @param podman_db_path - string - optional - default: /var/lib/containers/storage/libpod/bolt_state.db +## @param podman_db_path - string - optional - default: "" +## @env DD_PODMAN_DB_PATH - string - optional - default: "" ## Settings for Podman DB that Datadog Agent collects container metrics. # -# podman_db_path: /var/lib/containers/storage/libpod/bolt_state.db +# podman_db_path: "" {{ end -}} {{- if .ClusterAgent }} diff --git a/pkg/config/env/environment_containers.go b/pkg/config/env/environment_containers.go index f5f186ff152dd..7e1c267123e12 100644 --- a/pkg/config/env/environment_containers.go +++ b/pkg/config/env/environment_containers.go @@ -26,7 +26,7 @@ const ( defaultWindowsContainerdSocketPath = "//./pipe/containerd-containerd" defaultLinuxCrioSocket = "/var/run/crio/crio.sock" defaultHostMountPrefix = "/host" - defaultPodmanContainersStoragePath = "/var/lib/containers" + defaultPodmanContainersStoragePath = "/var/lib/containers/storage" unixSocketPrefix = "unix://" winNamedPipePrefix = "npipe://" @@ -66,7 +66,7 @@ func detectContainerFeatures(features FeatureMap, cfg model.Reader) { detectContainerd(features, cfg) detectAWSEnvironments(features, cfg) detectCloudFoundry(features, cfg) - detectPodman(features) + detectPodman(features, cfg) } func detectKubernetes(features FeatureMap, cfg model.Reader) { @@ -195,7 +195,12 @@ func detectCloudFoundry(features FeatureMap, cfg model.Reader) { } } -func detectPodman(features FeatureMap) { +func detectPodman(features FeatureMap, cfg model.Reader) { + podmanDbPath := cfg.GetString("podman_db_path") + if podmanDbPath != "" { + features[Podman] = struct{}{} + return + } for _, defaultPath := range getDefaultPodmanPaths() { if _, err := os.Stat(defaultPath); err == nil { features[Podman] = struct{}{} diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index fe40c02e6becb..85419ff93ce6f 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -633,7 +633,7 @@ func InitConfig(config pkgconfigmodel.Config) { config.BindEnvAndSetDefault("container_labels_as_tags", map[string]string{}) // Podman - config.BindEnvAndSetDefault("podman_db_path", "/var/lib/containers/storage/libpod/bolt_state.db") + config.BindEnvAndSetDefault("podman_db_path", "") // Kubernetes config.BindEnvAndSetDefault("kubernetes_kubelet_host", "") diff --git a/pkg/util/podman/sqlite_db_client.go b/pkg/util/podman/sqlite_db_client.go new file mode 100644 index 0000000000000..6b95f29ab96e1 --- /dev/null +++ b/pkg/util/podman/sqlite_db_client.go @@ -0,0 +1,114 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build podman + +package podman + +import ( + "database/sql" + "encoding/json" + "fmt" + "path/filepath" + + // SQLite backend for database/sql + _ "modernc.org/sqlite" + + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// Same strategy as for BoltDB : we do not need the full podman go package. +// This reduces the number of dependencies and the size of the ultimately shipped binary. +// +// The functions in this file have been copied from +// https://github.com/containers/podman/blob/v5.0.0/libpod/sqlite_state.go +// The code has been adapted a bit to our needs. The only functions of that file +// that we need are AllContainers() and NewSqliteState(). +// +// This code could break in future versions of Podman. This has been tried with +// v4.9.2 and v5.0.0. + +// SQLDBClient is a client for the podman's state database in the SQLite format. +type SQLDBClient struct { + DBPath string +} + +const ( + // Deal with timezone automatically. + sqliteOptionLocation = "_loc=auto" + // Read-only mode (https://www.sqlite.org/pragma.html#pragma_query_only) + sqliteOptionQueryOnly = "&_query_only=true" + // Make sure busy timeout is set to high value to keep retrying when the db is locked. + // Timeout is in ms, so set it to 100s to have enough time to retry the operations. + sqliteOptionBusyTimeout = "&_busy_timeout=100000" + + // Assembled sqlite options used when opening the database. + sqliteOptions = "?" + sqliteOptionLocation + sqliteOptionQueryOnly + sqliteOptionBusyTimeout +) + +// NewSQLDBClient returns a DB client that uses the DB stored in dbPath. +func NewSQLDBClient(dbPath string) *SQLDBClient { + return &SQLDBClient{ + DBPath: dbPath, + } +} + +// getDBCon opens a connection to the SQLite-backed state database. +// Note: original function comes from https://github.com/containers/podman/blob/e71ec6f1d94d2d97fb3afe08aae0d8adaf8bddf0/libpod/sqlite_state.go#L57-L96 +// It was adapted as we don't need to write any information to the DB. +func (client *SQLDBClient) getDBCon() (*sql.DB, error) { + conn, err := sql.Open("sqlite", filepath.Join(client.DBPath, sqliteOptions)) + if err != nil { + return nil, fmt.Errorf("opening sqlite database: %w", err) + } + return conn, nil +} + +// GetAllContainers retrieves all the containers in the database. +// We retrieve the state always. +func (client *SQLDBClient) GetAllContainers() ([]Container, error) { + var res []Container + + conn, err := client.getDBCon() + if err != nil { + return nil, err + } + defer func() { + if errClose := conn.Close(); errClose != nil { + log.Warnf("failed to close sqlite db: %q", err) + } + }() + + rows, err := conn.Query("SELECT ContainerConfig.JSON, ContainerState.JSON AS StateJSON FROM ContainerConfig INNER JOIN ContainerState ON ContainerConfig.ID = ContainerState.ID;") + if err != nil { + return nil, fmt.Errorf("retrieving all containers from database: %w", err) + } + defer rows.Close() + + for rows.Next() { + var configJSON, stateJSON string + if err := rows.Scan(&configJSON, &stateJSON); err != nil { + return nil, fmt.Errorf("scanning container from database: %w", err) + } + + ctr := new(Container) + ctr.Config = new(ContainerConfig) + ctr.State = new(ContainerState) + + if err := json.Unmarshal([]byte(configJSON), ctr.Config); err != nil { + return nil, fmt.Errorf("unmarshalling container config: %w", err) + } + if err := json.Unmarshal([]byte(stateJSON), ctr.State); err != nil { + return nil, fmt.Errorf("unmarshalling container %s state: %w", ctr.Config.ID, err) + } + + res = append(res, *ctr) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return res, nil +} diff --git a/releasenotes/notes/podman-sqlite-backend-support-8437c6d5254b39ef.yaml b/releasenotes/notes/podman-sqlite-backend-support-8437c6d5254b39ef.yaml new file mode 100644 index 0000000000000..c2fe41be5d244 --- /dev/null +++ b/releasenotes/notes/podman-sqlite-backend-support-8437c6d5254b39ef.yaml @@ -0,0 +1,6 @@ +--- +enhancements: + - | + Supports Podman newer versions (4.8+) using SQLite instead of BoltDB for the containers database backend. + Setting ``podman_db_path`` to the path with the ``db.sql`` file (e.g. ``/var/lib/containers/storage/db.sql``) will make the Datadog Agent use the SQLite format. + **Note**: If ``podman_db_path`` is not set (default), the Datadog Agent attempts to use the default file ``libpod/bolt_state.db`` and ``db.sql`` from ``/var/lib/containers/storage``. diff --git a/tasks/dogstatsd.py b/tasks/dogstatsd.py index f0eeee90b926f..f050f70d03310 100644 --- a/tasks/dogstatsd.py +++ b/tasks/dogstatsd.py @@ -18,7 +18,7 @@ # constants DOGSTATSD_BIN_PATH = os.path.join(".", "bin", "dogstatsd") STATIC_BIN_PATH = os.path.join(".", "bin", "static") -MAX_BINARY_SIZE = 39 * 1024 +MAX_BINARY_SIZE = 42 * 1024 DOGSTATSD_TAG = "datadog/dogstatsd:master" From 69903016c7cc510c8f9cc056ef171720e0a6cd67 Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Fri, 12 Apr 2024 12:09:31 +0200 Subject: [PATCH 08/99] =?UTF-8?q?Revert=20"Revert=20"Run=20docker=20fake?= =?UTF-8?q?=20intake=20test=20with=20new=20version=20of=20the=20fakeintake?= =?UTF-8?q?=20if=20mod=E2=80=A6""=20(#24587)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Revert "Revert "Run docker fake intake test with new version of the fakeintak…" This reverts commit ffb080be77f4a88a207e80271251ce95289c054a. * Remove hyphen --- .gitlab-ci.yml | 47 ++++++++++++------- .gitlab/dev_container_deploy/fakeintake.yml | 4 +- .gitlab/e2e_pre_test/e2e_pre_test.yml | 6 ++- .../test-infra-definition/docker_test.go | 11 ++++- 4 files changed, 47 insertions(+), 21 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5834cc894f9fd..c5bd424543daa 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -77,7 +77,6 @@ stages: - pkg_metrics - packaging - kitchen_deploy - - e2e_pre_test - kitchen_testing - container_build - container_scan @@ -92,6 +91,7 @@ stages: - choco_deploy - internal_image_deploy - install_script_testing + - e2e_pre_test - e2e - kitchen_cleanup - functional_test @@ -359,6 +359,15 @@ variables: .if_mergequeue: &if_mergequeue if: $CI_COMMIT_BRANCH =~ /^mq-working-branch-/ +.fakeintake_changes: &fakeintake_changes + changes: + paths: + - "test/fakeintake/**/*" + - .gitlab/binary_build/fakeintake.yml + - .gitlab/container_build/fakeintake.yml + - .gitlab/dev_container_deploy/fakeintake.yml + compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 + # # Workflow rules # Rules used to define whether a pipeline should run, and with which variables @@ -764,6 +773,23 @@ workflow: # New E2E related rules +.on_e2e_or_fakeintake_changes_or_manual: + - <<: *if_main_branch + - !reference [.except_mergequeue] + - <<: *fakeintake_changes + variables: + FAKEINTAKE_IMAGE_OVERRIDE: "public.ecr.aws/datadog/fakeintake:v$CI_COMMIT_SHORT_SHA" + when: on_success + - changes: + paths: + - test/new-e2e/pkg/**/* + - test/new-e2e/test-infra-definition/* + - test/new-e2e/go.mod + compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 + when: on_success + - when: manual + allow_failure: true + .on_e2e_main_release_or_rc: # This rule is used as a base for all new-e2e rules - <<: *if_disable_e2e_tests @@ -994,23 +1020,12 @@ workflow: - .go-version compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 -.on_fakeintake_changes: &on_fakeintake_changes - changes: - - "test/fakeintake/**/*" - - .gitlab/binary_build/fakeintake.yml - - .gitlab/container_build/fakeintake.yml - - .gitlab/dev_container_deploy/fakeintake.yml - -.on_fakeintake_changes_on_main_or_manual: - - <<: *on_fakeintake_changes - if: $CI_COMMIT_BRANCH == "main" - - <<: *on_fakeintake_changes - when: manual - allow_failure: true +.on_fakeintake_changes: + - <<: *fakeintake_changes .on_fakeintake_changes_on_main: - - <<: *on_fakeintake_changes - if: $CI_COMMIT_BRANCH == "main" + - <<: *fakeintake_changes + <<: *if_main_branch .fast_on_dev_branch_only: - <<: *if_main_branch diff --git a/.gitlab/dev_container_deploy/fakeintake.yml b/.gitlab/dev_container_deploy/fakeintake.yml index f95086a71c12d..326def46873f9 100644 --- a/.gitlab/dev_container_deploy/fakeintake.yml +++ b/.gitlab/dev_container_deploy/fakeintake.yml @@ -4,7 +4,7 @@ publish_fakeintake: stage: dev_container_deploy rules: - !reference [.except_mergequeue] - - !reference [.on_fakeintake_changes_on_main_or_manual] + - !reference [.on_fakeintake_changes] needs: - job: docker_build_fakeintake optional: false @@ -27,4 +27,4 @@ publish_fakeintake_latest: IMG_SOURCES: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent/fakeintake:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA} IMG_DESTINATIONS: fakeintake:latest IMG_REGISTRIES: public - IMG_SIGNING: "false" \ No newline at end of file + IMG_SIGNING: "false" diff --git a/.gitlab/e2e_pre_test/e2e_pre_test.yml b/.gitlab/e2e_pre_test/e2e_pre_test.yml index c9b20a6f37708..0489a7ad7d2a0 100644 --- a/.gitlab/e2e_pre_test/e2e_pre_test.yml +++ b/.gitlab/e2e_pre_test/e2e_pre_test.yml @@ -2,10 +2,12 @@ # Contains jobs which runs e2e tests to validate the new-e2e framework. e2e_pre_test: rules: - - !reference [.on_e2e_changes_or_manual] + - !reference [.on_e2e_or_fakeintake_changes_or_manual] stage: e2e_pre_test extends: .new_e2e_template - needs: [] + needs: + - job: publish_fakeintake + optional: true script: - inv -e new-e2e-tests.run --targets ./test-infra-definition --junit-tar junit-${CI_JOB_ID}.tgz ${EXTRA_PARAMS} after_script: diff --git a/test/new-e2e/test-infra-definition/docker_test.go b/test/new-e2e/test-infra-definition/docker_test.go index 76a8e85279a9e..1ede9e48f9d66 100644 --- a/test/new-e2e/test-infra-definition/docker_test.go +++ b/test/new-e2e/test-infra-definition/docker_test.go @@ -7,6 +7,7 @@ package testinfradefinition import ( "fmt" + "os" "regexp" "testing" "time" @@ -15,6 +16,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awsdocker "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/docker" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient" + "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake" "github.com/stretchr/testify/assert" ) @@ -23,7 +25,14 @@ type dockerSuite struct { } func TestDocker(t *testing.T) { - e2e.Run(t, &dockerSuite{}, e2e.WithProvisioner(awsdocker.Provisioner())) + var fakeintakeOpts []fakeintake.Option + + // When we modify the fakeintake, this test will run with the new version of the fakeintake + if fakeintakeImage, ok := os.LookupEnv("FAKEINTAKE_IMAGE_OVERRIDE"); ok { + t.Logf("Running with fakeintake image %s", fakeintakeImage) + fakeintakeOpts = append(fakeintakeOpts, fakeintake.WithImageURL(fakeintakeImage)) + } + e2e.Run(t, &dockerSuite{}, e2e.WithProvisioner(awsdocker.Provisioner(awsdocker.WithFakeIntakeOptions(fakeintakeOpts...)))) } func (v *dockerSuite) TestExecuteCommand() { From 565e695a96d108b5cc87fe4995b6c3bcc7d69375 Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Fri, 12 Apr 2024 12:09:36 +0200 Subject: [PATCH 09/99] ebpf: Check `log_debug()` format strings (#24611) * ebpf: Fix all log_debug() format string errors * ebpf: Check log_debug() format strings --- .../corechecks/ebpf/c/runtime/ebpf-kern.c | 8 +-- pkg/ebpf/c/bpf_helpers_custom.h | 3 ++ pkg/network/ebpf/c/co-re/tracer-fentry.c | 20 +++---- pkg/network/ebpf/c/conntrack/helpers.h | 2 +- pkg/network/ebpf/c/prebuilt/usm.c | 2 +- .../ebpf/c/protocols/classification/routing.h | 4 +- pkg/network/ebpf/c/protocols/events.h | 8 +-- pkg/network/ebpf/c/protocols/http/http.h | 4 +- pkg/network/ebpf/c/protocols/tls/https.h | 2 +- .../c/protocols/tls/java/erpc_dispatcher.h | 4 +- .../ebpf/c/protocols/tls/java/erpc_handlers.h | 2 +- pkg/network/ebpf/c/protocols/tls/native-tls.h | 42 +++++++-------- pkg/network/ebpf/c/runtime/usm.c | 52 +++++++++---------- pkg/network/ebpf/c/sock.h | 6 +-- pkg/network/ebpf/c/tracer.c | 52 +++++++++---------- pkg/network/ebpf/c/tracer/bind.h | 8 +-- pkg/network/ebpf/c/tracer/tcp_recv.h | 2 +- 17 files changed, 112 insertions(+), 109 deletions(-) diff --git a/pkg/collector/corechecks/ebpf/c/runtime/ebpf-kern.c b/pkg/collector/corechecks/ebpf/c/runtime/ebpf-kern.c index d965c9967758c..fc1d53cd96edc 100644 --- a/pkg/collector/corechecks/ebpf/c/runtime/ebpf-kern.c +++ b/pkg/collector/corechecks/ebpf/c/runtime/ebpf-kern.c @@ -285,7 +285,7 @@ int tp_mmap_enter(struct tracepoint_syscalls_sys_enter_mmap_t *args) { } margs.map_id = *map_idp; margs.offset = args->offset; - log_debug("tracepoint_sys_enter_mmap: fd=%d len=%d", key.fd, args->len); + log_debug("tracepoint_sys_enter_mmap: fd=%d len=%lu", key.fd, args->len); bpf_map_update_elem(&mmap_args, &pid_tgid, &margs, BPF_ANY); return 0; } @@ -327,7 +327,7 @@ int tp_mmap_exit(struct tracepoint_raw_syscalls_sys_exit_t *args) { } // store address of mmap region val->addr = args->ret; - log_debug("tracepoint_sys_exit_mmap: len=%d addr=%x", val->len, val->addr); + log_debug("tracepoint_sys_exit_mmap: len=%lu addr=%lx", val->len, val->addr); cleanup: bpf_map_delete_elem(&mmap_args, &pid_tgid); @@ -369,14 +369,14 @@ int BPF_KPROBE(k_map_update, int cmd, union bpf_attr *attr) { // pivot from perf_event_fd+pid -> mmap region mmap_region_t *infop = bpf_map_lookup_elem(&perf_event_mmap, &key); if (infop == NULL) { - log_debug("kprobe/map_update_elem: no mmap data cpu=%d fd=%d fdptr=%llx", pb_key.cpu, key.fd, fdp); + log_debug("kprobe/map_update_elem: no mmap data cpu=%d fd=%d fdptr=%p", pb_key.cpu, key.fd, fdp); return 0; } // make a stack copy of mmap data and store by map_id+cpu, which userspace can know mmap_region_t stackinfo = {}; bpf_probe_read_kernel(&stackinfo, sizeof(mmap_region_t), infop); - log_debug("map_update_elem: map_id=%d cpu=%d len=%d", pb_key.map_id, pb_key.cpu, stackinfo.len); + log_debug("map_update_elem: map_id=%d cpu=%d len=%lu", pb_key.map_id, pb_key.cpu, stackinfo.len); bpf_map_update_elem(&perf_buffers, &pb_key, &stackinfo, BPF_ANY); bpf_map_delete_elem(&perf_event_mmap, &key); return 0; diff --git a/pkg/ebpf/c/bpf_helpers_custom.h b/pkg/ebpf/c/bpf_helpers_custom.h index a899d2805787a..42c83c272ed11 100644 --- a/pkg/ebpf/c/bpf_helpers_custom.h +++ b/pkg/ebpf/c/bpf_helpers_custom.h @@ -3,6 +3,8 @@ #include "bpf_cross_compile.h" +extern void __format_check(const char *fmt, ...) __attribute__ ((format(printf, 1, 2))); + /* * Macro to output debug logs to /sys/kernel/debug/tracing/trace_pipe * @@ -18,6 +20,7 @@ #define log_debug(fmt, ...) \ ({ \ char ____fmt[] = fmt "\n"; \ + if (0) __format_check(fmt, ##__VA_ARGS__); \ bpf_trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \ }) #else diff --git a/pkg/network/ebpf/c/co-re/tracer-fentry.c b/pkg/network/ebpf/c/co-re/tracer-fentry.c index 69c286683bec7..7299813e3a1dd 100644 --- a/pkg/network/ebpf/c/co-re/tracer-fentry.c +++ b/pkg/network/ebpf/c/co-re/tracer-fentry.c @@ -62,7 +62,7 @@ static __always_inline int read_conn_tuple_partial_from_flowi4(conn_tuple_t *t, } if (t->saddr_l == 0 || t->daddr_l == 0) { - log_debug("ERR(fl4): src/dst addr not set src:%d,dst:%d", t->saddr_l, t->daddr_l); + log_debug("ERR(fl4): src/dst addr not set src:%llu,dst:%llu", t->saddr_l, t->daddr_l); return 0; } @@ -97,11 +97,11 @@ static __always_inline int read_conn_tuple_partial_from_flowi6(conn_tuple_t *t, } if (!(t->saddr_h || t->saddr_l)) { - log_debug("ERR(fl6): src addr not set src_l:%d,src_h:%d", t->saddr_l, t->saddr_h); + log_debug("ERR(fl6): src addr not set src_l:%llu,src_h:%llu", t->saddr_l, t->saddr_h); return 0; } if (!(t->daddr_h || t->daddr_l)) { - log_debug("ERR(fl6): dst addr not set dst_l:%d,dst_h:%d", t->daddr_l, t->daddr_h); + log_debug("ERR(fl6): dst addr not set dst_l:%llu,dst_h:%llu", t->daddr_l, t->daddr_h); return 0; } @@ -143,7 +143,7 @@ int BPF_PROG(tcp_sendmsg_exit, struct sock *sk, struct msghdr *msg, size_t size, } u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("fexit/tcp_sendmsg: pid_tgid: %d, sent: %d, sock: %llx", pid_tgid, sent, sk); + log_debug("fexit/tcp_sendmsg: pid_tgid: %llu, sent: %d, sock: %p", pid_tgid, sent, sk); conn_tuple_t t = {}; if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { @@ -168,7 +168,7 @@ RETURN_IF_NOT_IN_SYSPROBE_TASK("fexit/tcp_sendpage"); } u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("fexit/tcp_sendpage: pid_tgid: %d, sent: %d, sock: %llx", pid_tgid, sent, sk); + log_debug("fexit/tcp_sendpage: pid_tgid: %llu, sent: %d, sock: %p", pid_tgid, sent, sk); conn_tuple_t t = {}; if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { @@ -193,7 +193,7 @@ int BPF_PROG(udp_sendpage_exit, struct sock *sk, struct page *page, int offset, } u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("fexit/udp_sendpage: pid_tgid: %d, sent: %d, sock: %llx", pid_tgid, sent, sk); + log_debug("fexit/udp_sendpage: pid_tgid: %llu, sent: %d, sock: %p", pid_tgid, sent, sk); conn_tuple_t t = {}; if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_UDP)) { @@ -235,7 +235,7 @@ int BPF_PROG(tcp_close, struct sock *sk, long timeout) { bpf_map_delete_elem(&tcp_ongoing_connect_pid, &sk); // Get network namespace id - log_debug("fentry/tcp_close: tgid: %u, pid: %u", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); + log_debug("fentry/tcp_close: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { return 0; } @@ -447,7 +447,7 @@ SEC("fentry/tcp_connect") int BPF_PROG(tcp_connect, struct sock *sk) { RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/tcp_connect"); u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("fentry/tcp_connect: tgid: %u, pid: %u", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); + log_debug("fentry/tcp_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &sk, &pid_tgid, BPF_ANY); @@ -464,7 +464,7 @@ int BPF_PROG(tcp_finish_connect, struct sock *sk, struct sk_buff *skb, int rc) { u64 pid_tgid = *pid_tgid_p; bpf_map_delete_elem(&tcp_ongoing_connect_pid, &sk); - log_debug("fentry/tcp_finish_connect: tgid: %u, pid: %u", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); + log_debug("fentry/tcp_finish_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); conn_tuple_t t = {}; if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { @@ -487,7 +487,7 @@ int BPF_PROG(inet_csk_accept_exit, struct sock *_sk, int flags, int *err, bool k } u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("fexit/inet_csk_accept: tgid: %u, pid: %u", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); + log_debug("fexit/inet_csk_accept: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); conn_tuple_t t = {}; if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { diff --git a/pkg/network/ebpf/c/conntrack/helpers.h b/pkg/network/ebpf/c/conntrack/helpers.h index e0d0ed968c647..6399e706d2988 100644 --- a/pkg/network/ebpf/c/conntrack/helpers.h +++ b/pkg/network/ebpf/c/conntrack/helpers.h @@ -67,7 +67,7 @@ static __always_inline int nf_conntrack_tuple_to_conntrack_tuple(conntrack_tuple t->daddr_l = ct->dst.u3.ip; if (!t->saddr_l || !t->daddr_l) { - log_debug("ERR(to_conn_tuple.v4): src/dst addr not set src:%u, dst:%u", t->saddr_l, t->daddr_l); + log_debug("ERR(to_conn_tuple.v4): src/dst addr not set src:%llu, dst:%llu", t->saddr_l, t->daddr_l); return 0; } } else if (ct->src.l3num == AF_INET6 && (is_tcpv6_enabled() || is_udpv6_enabled())) { diff --git a/pkg/network/ebpf/c/prebuilt/usm.c b/pkg/network/ebpf/c/prebuilt/usm.c index 3da9ecf7ae791..faf75beec160c 100644 --- a/pkg/network/ebpf/c/prebuilt/usm.c +++ b/pkg/network/ebpf/c/prebuilt/usm.c @@ -34,7 +34,7 @@ int socket__protocol_dispatcher_kafka(struct __sk_buff *skb) { SEC("kprobe/tcp_sendmsg") int BPF_KPROBE(kprobe__tcp_sendmsg, struct sock *sk) { - log_debug("kprobe/tcp_sendmsg: sk=%llx", sk); + log_debug("kprobe/tcp_sendmsg: sk=%p", sk); // map connection tuple during SSL_do_handshake(ctx) map_ssl_ctx_to_sock(sk); diff --git a/pkg/network/ebpf/c/protocols/classification/routing.h b/pkg/network/ebpf/c/protocols/classification/routing.h index 906a2fbdbe54b..8e2b092e0afba 100644 --- a/pkg/network/ebpf/c/protocols/classification/routing.h +++ b/pkg/network/ebpf/c/protocols/classification/routing.h @@ -39,11 +39,11 @@ static __always_inline classification_prog_t __get_next_program(usm_context_t *u static __always_inline void classification_next_program(struct __sk_buff *skb, usm_context_t *usm_ctx) { classification_prog_t next_program = __get_next_program(usm_ctx); if (next_program == CLASSIFICATION_PROG_UNKNOWN || next_program == CLASSIFICATION_PROG_MAX) { - log_debug("classification tail-call: skb=%llu tail-end", skb); + log_debug("classification tail-call: skb=%p tail-end", skb); return; } - log_debug("classification tail-call: skb=%llu from=%d to=%d", skb, usm_ctx->routing_current_program, next_program); + log_debug("classification tail-call: skb=%p from=%d to=%d", skb, usm_ctx->routing_current_program, next_program); usm_ctx->routing_current_program = next_program; bpf_tail_call_compat(skb, &classification_progs, next_program); } diff --git a/pkg/network/ebpf/c/protocols/events.h b/pkg/network/ebpf/c/protocols/events.h index a7336e4b011c4..8aba74be87708 100644 --- a/pkg/network/ebpf/c/protocols/events.h +++ b/pkg/network/ebpf/c/protocols/events.h @@ -63,13 +63,13 @@ sizeof(batch_data_t)); \ } \ if (perf_ret < 0) { \ - _LOG(name, "batch flush error: cpu: %d idx: %d err: %d", \ + _LOG(name, "batch flush error: cpu: %d idx: %llu err: %ld", \ key.cpu, batch->idx, perf_ret); \ batch->failed_flushes++; \ return; \ } \ \ - _LOG(name, "batch flushed: cpu: %d idx: %d", key.cpu, batch->idx); \ + _LOG(name, "batch flushed: cpu: %d idx: %llu", key.cpu, batch->idx); \ batch->dropped_events = 0; \ batch->failed_flushes = 0; \ batch->len = 0; \ @@ -95,7 +95,7 @@ enough */ \ if (name##_batch_full(batch)) { \ batch->dropped_events++; \ - _LOG(name, "enqueue error: cpu: %d batch_idx: %d dropping event because batch is full.", \ + _LOG(name, "enqueue error: cpu: %d batch_idx: %llu dropping event because batch is full.", \ key.cpu, batch->idx); \ return; \ } \ @@ -110,7 +110,7 @@ batch->event_size = sizeof(value); \ batch->idx = batch_state->idx; \ \ - _LOG(name, "event enqueued: cpu: %d batch_idx: %d len: %d", \ + _LOG(name, "event enqueued: cpu: %d batch_idx: %llu len: %d", \ key.cpu, batch_state->idx, batch->len); \ /* if we have filled up the batch we move to the next one. notice the batch will be sent "asynchronously" to userspace during the diff --git a/pkg/network/ebpf/c/protocols/http/http.h b/pkg/network/ebpf/c/protocols/http/http.h index 4986ef7f8ece6..9e8926f97df41 100644 --- a/pkg/network/ebpf/c/protocols/http/http.h +++ b/pkg/network/ebpf/c/protocols/http/http.h @@ -23,7 +23,7 @@ static __always_inline void http_begin_request(http_transaction_t *http, http_me http->response_last_seen = 0; http->response_status_code = 0; bpf_memcpy(&http->request_fragment, buffer, HTTP_BUFFER_SIZE); - log_debug("http_begin_request: htx=%llx method=%d start=%llx", http, http->request_method, http->request_started); + log_debug("http_begin_request: htx=%p method=%d start=%llx", http, http->request_method, http->request_started); } static __always_inline void http_begin_response(http_transaction_t *http, const char *buffer) { @@ -32,7 +32,7 @@ static __always_inline void http_begin_response(http_transaction_t *http, const status_code += (buffer[HTTP_STATUS_OFFSET+1]-'0') * 10; status_code += (buffer[HTTP_STATUS_OFFSET+2]-'0') * 1; http->response_status_code = status_code; - log_debug("http_begin_response: htx=%llx status=%d", http, status_code); + log_debug("http_begin_response: htx=%p status=%d", http, status_code); } static __always_inline void http_batch_enqueue_wrapper(conn_tuple_t *tuple, http_transaction_t *http) { diff --git a/pkg/network/ebpf/c/protocols/tls/https.h b/pkg/network/ebpf/c/protocols/tls/https.h index 7edda5dff99c1..eb6d111a84126 100644 --- a/pkg/network/ebpf/c/protocols/tls/https.h +++ b/pkg/network/ebpf/c/protocols/tls/https.h @@ -248,7 +248,7 @@ static __always_inline tls_offsets_data_t* get_offsets_data() { key.device_id_major = MAJOR(dev_id); key.device_id_minor = MINOR(dev_id); - log_debug("get_offsets_data: task binary inode number: %ld; device ID %x:%x", key.ino, key.device_id_major, key.device_id_minor); + log_debug("get_offsets_data: task binary inode number: %llu; device ID %x:%x", key.ino, key.device_id_major, key.device_id_minor); return bpf_map_lookup_elem(&offsets_data, &key); } diff --git a/pkg/network/ebpf/c/protocols/tls/java/erpc_dispatcher.h b/pkg/network/ebpf/c/protocols/tls/java/erpc_dispatcher.h index 99b8a9f3f5592..135c093dfe7eb 100644 --- a/pkg/network/ebpf/c/protocols/tls/java/erpc_dispatcher.h +++ b/pkg/network/ebpf/c/protocols/tls/java/erpc_dispatcher.h @@ -31,7 +31,7 @@ static void __always_inline handle_erpc_request(struct pt_regs *ctx) { u8 op = 0; if (0 != bpf_probe_read_user(&op, sizeof(op), req)){ - log_debug("[java_tls_handle_erpc_request] failed to parse opcode of java tls erpc request for: pid %d", pid); + log_debug("[java_tls_handle_erpc_request] failed to parse opcode of java tls erpc request for: pid %llu", pid); return; } @@ -39,7 +39,7 @@ static void __always_inline handle_erpc_request(struct pt_regs *ctx) { #ifdef DEBUG log_debug("[java_tls_handle_erpc_request] received %d op", op); if (op >= MAX_MESSAGE_TYPE){ - log_debug("[java_tls_handle_erpc_request] got unsupported erpc request %x for: pid %d",op, pid); + log_debug("[java_tls_handle_erpc_request] got unsupported erpc request %x for: pid %llu",op, pid); } #endif diff --git a/pkg/network/ebpf/c/protocols/tls/java/erpc_handlers.h b/pkg/network/ebpf/c/protocols/tls/java/erpc_handlers.h index 18fb9c468a89f..07570f6cbc0d5 100644 --- a/pkg/network/ebpf/c/protocols/tls/java/erpc_handlers.h +++ b/pkg/network/ebpf/c/protocols/tls/java/erpc_handlers.h @@ -42,7 +42,7 @@ int kprobe_handle_sync_payload(struct pt_regs *ctx) { #ifdef DEBUG u64 pid_tgid = bpf_get_current_pid_tgid(); u64 pid = pid_tgid >> 32; - log_debug("[handle_sync_payload] failed reading message length location for pid %d", pid); + log_debug("[handle_sync_payload] failed reading message length location for pid %lld", pid); #endif return 1; } diff --git a/pkg/network/ebpf/c/protocols/tls/native-tls.h b/pkg/network/ebpf/c/protocols/tls/native-tls.h index 42776de78e009..298b41cf1caea 100644 --- a/pkg/network/ebpf/c/protocols/tls/native-tls.h +++ b/pkg/network/ebpf/c/protocols/tls/native-tls.h @@ -10,7 +10,7 @@ SEC("uprobe/SSL_do_handshake") int uprobe__SSL_do_handshake(struct pt_regs *ctx) { u64 pid_tgid = bpf_get_current_pid_tgid(); void *ssl_ctx = (void *)PT_REGS_PARM1(ctx); - log_debug("uprobe/SSL_do_handshake: pid_tgid=%llx ssl_ctx=%llx", pid_tgid, ssl_ctx); + log_debug("uprobe/SSL_do_handshake: pid_tgid=%llx ssl_ctx=%p", pid_tgid, ssl_ctx); bpf_map_update_with_telemetry(ssl_ctx_by_pid_tgid, &pid_tgid, &ssl_ctx, BPF_ANY); return 0; } @@ -27,7 +27,7 @@ SEC("uprobe/SSL_connect") int uprobe__SSL_connect(struct pt_regs *ctx) { u64 pid_tgid = bpf_get_current_pid_tgid(); void *ssl_ctx = (void *)PT_REGS_PARM1(ctx); - log_debug("uprobe/SSL_connect: pid_tgid=%llx ssl_ctx=%llx", pid_tgid, ssl_ctx); + log_debug("uprobe/SSL_connect: pid_tgid=%llx ssl_ctx=%p", pid_tgid, ssl_ctx); bpf_map_update_with_telemetry(ssl_ctx_by_pid_tgid, &pid_tgid, &ssl_ctx, BPF_ANY); return 0; } @@ -45,7 +45,7 @@ SEC("uprobe/SSL_set_fd") int uprobe__SSL_set_fd(struct pt_regs *ctx) { void *ssl_ctx = (void *)PT_REGS_PARM1(ctx); u32 socket_fd = (u32)PT_REGS_PARM2(ctx); - log_debug("uprobe/SSL_set_fd: ctx=%llx fd=%d", ssl_ctx, socket_fd); + log_debug("uprobe/SSL_set_fd: ctx=%p fd=%d", ssl_ctx, socket_fd); init_ssl_sock(ssl_ctx, socket_fd); return 0; } @@ -83,7 +83,7 @@ SEC("uprobe/SSL_set_bio") int uprobe__SSL_set_bio(struct pt_regs *ctx) { void *ssl_ctx = (void *)PT_REGS_PARM1(ctx); void *bio = (void *)PT_REGS_PARM2(ctx); - log_debug("uprobe/SSL_set_bio: ctx=%llx bio=%llx", ssl_ctx, bio); + log_debug("uprobe/SSL_set_bio: ctx=%p bio=%p", ssl_ctx, bio); u32 *socket_fd = bpf_map_lookup_elem(&fd_by_ssl_bio, &bio); if (socket_fd == NULL) { return 0; @@ -99,7 +99,7 @@ int uprobe__SSL_read(struct pt_regs *ctx) { args.ctx = (void *)PT_REGS_PARM1(ctx); args.buf = (void *)PT_REGS_PARM2(ctx); u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("uprobe/SSL_read: pid_tgid=%llx ctx=%llx", pid_tgid, args.ctx); + log_debug("uprobe/SSL_read: pid_tgid=%llx ctx=%p", pid_tgid, args.ctx); bpf_map_update_with_telemetry(ssl_read_args, &pid_tgid, &args, BPF_ANY); // Trigger mapping of SSL context to connection tuple in case it is missing. @@ -124,7 +124,7 @@ static __always_inline int SSL_read_ret(struct pt_regs *ctx, __u64 tags) { void *ssl_ctx = args->ctx; conn_tuple_t *t = tup_from_ssl_ctx(ssl_ctx, pid_tgid); if (t == NULL) { - log_debug("uretprobe/SSL_read: pid_tgid=%llx ctx=%llx: no conn tuple", pid_tgid, ssl_ctx); + log_debug("uretprobe/SSL_read: pid_tgid=%llx ctx=%p: no conn tuple", pid_tgid, ssl_ctx); goto cleanup; } @@ -165,7 +165,7 @@ int uprobe__SSL_write(struct pt_regs* ctx) { args.ctx = (void *)PT_REGS_PARM1(ctx); args.buf = (void *)PT_REGS_PARM2(ctx); u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("uprobe/SSL_write: pid_tgid=%llx ctx=%llx", pid_tgid, args.ctx); + log_debug("uprobe/SSL_write: pid_tgid=%llx ctx=%p", pid_tgid, args.ctx); bpf_map_update_with_telemetry(ssl_write_args, &pid_tgid, &args, BPF_ANY); return 0; } @@ -226,7 +226,7 @@ int uprobe__SSL_read_ex(struct pt_regs* ctx) { args.buf = (void *)PT_REGS_PARM2(ctx); args.size_out_param = (size_t *)PT_REGS_PARM4(ctx); u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("uprobe/SSL_read_ex: pid_tgid=%llx ctx=%llx", pid_tgid, args.ctx); + log_debug("uprobe/SSL_read_ex: pid_tgid=%llx ctx=%p", pid_tgid, args.ctx); bpf_map_update_elem(&ssl_read_ex_args, &pid_tgid, &args, BPF_ANY); // Trigger mapping of SSL context to connection tuple in case it is missing. @@ -256,14 +256,14 @@ static __always_inline int SSL_read_ex_ret(struct pt_regs* ctx, __u64 tags) { size_t bytes_count = 0; bpf_probe_read_user(&bytes_count, sizeof(bytes_count), args->size_out_param); if ( bytes_count <= 0) { - log_debug("uretprobe/SSL_read_ex: read non positive number of bytes (pid_tgid=%llx len=%d)", pid_tgid, bytes_count); + log_debug("uretprobe/SSL_read_ex: read non positive number of bytes (pid_tgid=%llx len=%zu)", pid_tgid, bytes_count); goto cleanup; } void *ssl_ctx = args->ctx; conn_tuple_t *conn_tuple = tup_from_ssl_ctx(ssl_ctx, pid_tgid); if (conn_tuple == NULL) { - log_debug("uretprobe/SSL_read_ex: pid_tgid=%llx ctx=%llx: no conn tuple", pid_tgid, ssl_ctx); + log_debug("uretprobe/SSL_read_ex: pid_tgid=%llx ctx=%p: no conn tuple", pid_tgid, ssl_ctx); goto cleanup; } @@ -300,7 +300,7 @@ int uprobe__SSL_write_ex(struct pt_regs* ctx) { args.buf = (void *)PT_REGS_PARM2(ctx); args.size_out_param = (size_t *)PT_REGS_PARM4(ctx); u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("uprobe/SSL_write_ex: pid_tgid=%llx ctx=%llx", pid_tgid, args.ctx); + log_debug("uprobe/SSL_write_ex: pid_tgid=%llx ctx=%p", pid_tgid, args.ctx); bpf_map_update_elem(&ssl_write_ex_args, &pid_tgid, &args, BPF_ANY); return 0; } @@ -327,7 +327,7 @@ static __always_inline int SSL_write_ex_ret(struct pt_regs* ctx, __u64 tags) { size_t bytes_count = 0; bpf_probe_read_user(&bytes_count, sizeof(bytes_count), args->size_out_param); if ( bytes_count <= 0) { - log_debug("uretprobe/SSL_write_ex: wrote non positive number of bytes (pid_tgid=%llx len=%d)", pid_tgid, bytes_count); + log_debug("uretprobe/SSL_write_ex: wrote non positive number of bytes (pid_tgid=%llx len=%zu)", pid_tgid, bytes_count); goto cleanup; } @@ -367,7 +367,7 @@ SEC("uprobe/SSL_shutdown") int uprobe__SSL_shutdown(struct pt_regs *ctx) { void *ssl_ctx = (void *)PT_REGS_PARM1(ctx); u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("uprobe/SSL_shutdown: pid_tgid=%llx ctx=%llx", pid_tgid, ssl_ctx); + log_debug("uprobe/SSL_shutdown: pid_tgid=%llx ctx=%p", pid_tgid, ssl_ctx); conn_tuple_t *t = tup_from_ssl_ctx(ssl_ctx, pid_tgid); if (t == NULL) { return 0; @@ -406,7 +406,7 @@ int uprobe__gnutls_transport_set_int2(struct pt_regs *ctx) { // Use the recv_fd and ignore the send_fd; // in most real-world scenarios, they are the same. int recv_fd = (int)PT_REGS_PARM2(ctx); - log_debug("gnutls_transport_set_int2: ctx=%llx fd=%d", ssl_session, recv_fd); + log_debug("gnutls_transport_set_int2: ctx=%p fd=%d", ssl_session, recv_fd); init_ssl_sock(ssl_session, (u32)recv_fd); return 0; @@ -419,7 +419,7 @@ int uprobe__gnutls_transport_set_ptr(struct pt_regs *ctx) { void *ssl_session = (void *)PT_REGS_PARM1(ctx); // This is a void*, but it might contain the socket fd cast as a pointer. int fd = (int)PT_REGS_PARM2(ctx); - log_debug("gnutls_transport_set_ptr: ctx=%llx fd=%d", ssl_session, fd); + log_debug("gnutls_transport_set_ptr: ctx=%p fd=%d", ssl_session, fd); init_ssl_sock(ssl_session, (u32)fd); return 0; @@ -434,7 +434,7 @@ int uprobe__gnutls_transport_set_ptr2(struct pt_regs *ctx) { // in most real-world scenarios, they are the same. // This is a void*, but it might contain the socket fd cast as a pointer. int recv_fd = (int)PT_REGS_PARM2(ctx); - log_debug("gnutls_transport_set_ptr2: ctx=%llx fd=%d", ssl_session, recv_fd); + log_debug("gnutls_transport_set_ptr2: ctx=%p fd=%d", ssl_session, recv_fd); init_ssl_sock(ssl_session, (u32)recv_fd); return 0; @@ -452,7 +452,7 @@ int uprobe__gnutls_record_recv(struct pt_regs *ctx) { .buf = data, }; u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("gnutls_record_recv: pid=%llu ctx=%llx", pid_tgid, ssl_session); + log_debug("gnutls_record_recv: pid=%llu ctx=%p", pid_tgid, ssl_session); bpf_map_update_with_telemetry(ssl_read_args, &pid_tgid, &args, BPF_ANY); return 0; } @@ -473,7 +473,7 @@ int uretprobe__gnutls_record_recv(struct pt_regs *ctx) { } void *ssl_session = args->ctx; - log_debug("uret/gnutls_record_recv: pid=%llu ctx=%llx", pid_tgid, ssl_session); + log_debug("uret/gnutls_record_recv: pid=%llu ctx=%p", pid_tgid, ssl_session); conn_tuple_t *t = tup_from_ssl_ctx(ssl_session, pid_tgid); if (t == NULL) { goto cleanup; @@ -502,7 +502,7 @@ int uprobe__gnutls_record_send(struct pt_regs *ctx) { args.ctx = (void *)PT_REGS_PARM1(ctx); args.buf = (void *)PT_REGS_PARM2(ctx); u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("uprobe/gnutls_record_send: pid=%llu ctx=%llx", pid_tgid, args.ctx); + log_debug("uprobe/gnutls_record_send: pid=%llu ctx=%p", pid_tgid, args.ctx); bpf_map_update_with_telemetry(ssl_write_args, &pid_tgid, &args, BPF_ANY); return 0; } @@ -511,7 +511,7 @@ SEC("uretprobe/gnutls_record_send") int uretprobe__gnutls_record_send(struct pt_regs *ctx) { u64 pid_tgid = bpf_get_current_pid_tgid(); ssize_t write_len = (ssize_t)PT_REGS_RC(ctx); - log_debug("uretprobe/gnutls_record_send: pid=%llu len=%d", pid_tgid, write_len); + log_debug("uretprobe/gnutls_record_send: pid=%llu len=%zd", pid_tgid, write_len); if (write_len <= 0) { goto cleanup; } @@ -544,7 +544,7 @@ int uretprobe__gnutls_record_send(struct pt_regs *ctx) { static __always_inline void gnutls_goodbye(struct pt_regs *ctx, void *ssl_session) { u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("gnutls_goodbye: pid=%llu ctx=%llx", pid_tgid, ssl_session); + log_debug("gnutls_goodbye: pid=%llu ctx=%p", pid_tgid, ssl_session); conn_tuple_t *t = tup_from_ssl_ctx(ssl_session, pid_tgid); if (t == NULL) { return; diff --git a/pkg/network/ebpf/c/runtime/usm.c b/pkg/network/ebpf/c/runtime/usm.c index d900f1addc3ed..f6717ed5731d7 100644 --- a/pkg/network/ebpf/c/runtime/usm.c +++ b/pkg/network/ebpf/c/runtime/usm.c @@ -46,7 +46,7 @@ int socket__protocol_dispatcher_kafka(struct __sk_buff *skb) { SEC("kprobe/tcp_sendmsg") int BPF_KPROBE(kprobe__tcp_sendmsg, struct sock *sk) { - log_debug("kprobe/tcp_sendmsg: sk=%llx", sk); + log_debug("kprobe/tcp_sendmsg: sk=%p", sk); // map connection tuple during SSL_do_handshake(ctx) map_ssl_ctx_to_sock(sk); @@ -74,7 +74,7 @@ int uprobe__crypto_tls_Conn_Write(struct pt_regs *ctx) { u64 pid = pid_tgid >> 32; tls_offsets_data_t* od = get_offsets_data(); if (od == NULL) { - log_debug("[go-tls-write] no offsets data in map for pid %d", pid); + log_debug("[go-tls-write] no offsets data in map for pid %llu", pid); return 0; } @@ -82,7 +82,7 @@ int uprobe__crypto_tls_Conn_Write(struct pt_regs *ctx) { go_tls_function_args_key_t call_key = {0}; call_key.pid = pid; if (read_goroutine_id(ctx, &od->goroutine_id, &call_key.goroutine_id)) { - log_debug("[go-tls-write] failed reading go routine id for pid %d", pid); + log_debug("[go-tls-write] failed reading go routine id for pid %llu", pid); return 0; } @@ -90,17 +90,17 @@ int uprobe__crypto_tls_Conn_Write(struct pt_regs *ctx) { // (since the parameters might not be live by the time the return probe is hit). go_tls_write_args_data_t call_data = {0}; if (read_location(ctx, &od->write_conn_pointer, sizeof(call_data.conn_pointer), &call_data.conn_pointer)) { - log_debug("[go-tls-write] failed reading conn pointer for pid %d", pid); + log_debug("[go-tls-write] failed reading conn pointer for pid %llu", pid); return 0; } if (read_location(ctx, &od->write_buffer.ptr, sizeof(call_data.b_data), &call_data.b_data)) { - log_debug("[go-tls-write] failed reading buffer pointer for pid %d", pid); + log_debug("[go-tls-write] failed reading buffer pointer for pid %llu", pid); return 0; } if (read_location(ctx, &od->write_buffer.len, sizeof(call_data.b_len), &call_data.b_len)) { - log_debug("[go-tls-write] failed reading buffer length for pid %d", pid); + log_debug("[go-tls-write] failed reading buffer length for pid %llu", pid); return 0; } @@ -115,7 +115,7 @@ int uprobe__crypto_tls_Conn_Write__return(struct pt_regs *ctx) { u64 pid = pid_tgid >> 32; tls_offsets_data_t* od = get_offsets_data(); if (od == NULL) { - log_debug("[go-tls-write-return] no offsets data in map for pid %d", pid); + log_debug("[go-tls-write-return] no offsets data in map for pid %llu", pid); return 0; } @@ -124,41 +124,41 @@ int uprobe__crypto_tls_Conn_Write__return(struct pt_regs *ctx) { call_key.pid = pid; if (read_goroutine_id(ctx, &od->goroutine_id, &call_key.goroutine_id)) { - log_debug("[go-tls-write-return] failed reading go routine id for pid %d", pid); + log_debug("[go-tls-write-return] failed reading go routine id for pid %llu", pid); return 0; } uint64_t bytes_written = 0; if (read_location(ctx, &od->write_return_bytes, sizeof(bytes_written), &bytes_written)) { bpf_map_delete_elem(&go_tls_write_args, &call_key); - log_debug("[go-tls-write-return] failed reading write return bytes location for pid %d", pid); + log_debug("[go-tls-write-return] failed reading write return bytes location for pid %llu", pid); return 0; } if (bytes_written <= 0) { bpf_map_delete_elem(&go_tls_write_args, &call_key); - log_debug("[go-tls-write-return] write returned non-positive for amount of bytes written for pid: %d", pid); + log_debug("[go-tls-write-return] write returned non-positive for amount of bytes written for pid: %llu", pid); return 0; } uint64_t err_ptr = 0; if (read_location(ctx, &od->write_return_error, sizeof(err_ptr), &err_ptr)) { bpf_map_delete_elem(&go_tls_write_args, &call_key); - log_debug("[go-tls-write-return] failed reading write return error location for pid %d", pid); + log_debug("[go-tls-write-return] failed reading write return error location for pid %llu", pid); return 0; } // check if err != nil if (err_ptr != 0) { bpf_map_delete_elem(&go_tls_write_args, &call_key); - log_debug("[go-tls-write-return] error in write for pid %d: data will be ignored", pid); + log_debug("[go-tls-write-return] error in write for pid %llu: data will be ignored", pid); return 0; } go_tls_write_args_data_t *call_data_ptr = bpf_map_lookup_elem(&go_tls_write_args, &call_key); if (call_data_ptr == NULL) { bpf_map_delete_elem(&go_tls_write_args, &call_key); - log_debug("[go-tls-write-return] no write information in write-return for pid %d", pid); + log_debug("[go-tls-write-return] no write information in write-return for pid %llu", pid); return 0; } @@ -168,8 +168,8 @@ int uprobe__crypto_tls_Conn_Write__return(struct pt_regs *ctx) { return 0; } - log_debug("[go-tls-write] processing %s", call_data_ptr->b_data); char *buffer_ptr = (char*)call_data_ptr->b_data; + log_debug("[go-tls-write] processing %s", buffer_ptr); bpf_map_delete_elem(&go_tls_write_args, &call_key); conn_tuple_t copy = {0}; bpf_memcpy(©, t, sizeof(conn_tuple_t)); @@ -189,7 +189,7 @@ int uprobe__crypto_tls_Conn_Read(struct pt_regs *ctx) { u64 pid = pid_tgid >> 32; tls_offsets_data_t* od = get_offsets_data(); if (od == NULL) { - log_debug("[go-tls-read] no offsets data in map for pid %d", pid_tgid >> 32); + log_debug("[go-tls-read] no offsets data in map for pid %llu", pid_tgid >> 32); return 0; } @@ -197,7 +197,7 @@ int uprobe__crypto_tls_Conn_Read(struct pt_regs *ctx) { go_tls_function_args_key_t call_key = {0}; call_key.pid = pid; if (read_goroutine_id(ctx, &od->goroutine_id, &call_key.goroutine_id)) { - log_debug("[go-tls-read] failed reading go routine id for pid %d", pid_tgid >> 32); + log_debug("[go-tls-read] failed reading go routine id for pid %llu", pid_tgid >> 32); return 0; } @@ -205,11 +205,11 @@ int uprobe__crypto_tls_Conn_Read(struct pt_regs *ctx) { // (since the parameters might not be live by the time the return probe is hit). go_tls_read_args_data_t call_data = {0}; if (read_location(ctx, &od->read_conn_pointer, sizeof(call_data.conn_pointer), &call_data.conn_pointer)) { - log_debug("[go-tls-read] failed reading conn pointer for pid %d", pid_tgid >> 32); + log_debug("[go-tls-read] failed reading conn pointer for pid %llu", pid_tgid >> 32); return 0; } if (read_location(ctx, &od->read_buffer.ptr, sizeof(call_data.b_data), &call_data.b_data)) { - log_debug("[go-tls-read] failed reading buffer pointer for pid %d", pid_tgid >> 32); + log_debug("[go-tls-read] failed reading buffer pointer for pid %llu", pid_tgid >> 32); return 0; } @@ -224,7 +224,7 @@ int uprobe__crypto_tls_Conn_Read__return(struct pt_regs *ctx) { u64 pid = pid_tgid >> 32; tls_offsets_data_t* od = get_offsets_data(); if (od == NULL) { - log_debug("[go-tls-read-return] no offsets data in map for pid %d", pid); + log_debug("[go-tls-read-return] no offsets data in map for pid %llu", pid); return 0; } @@ -233,7 +233,7 @@ int uprobe__crypto_tls_Conn_Read__return(struct pt_regs *ctx) { __s64 goroutine_id = 0; // Read the PID and goroutine ID to make the partial call key if (read_goroutine_id(ctx, &od->goroutine_id, &goroutine_id)) { - log_debug("[go-tls-read-return] failed reading go routine id for pid %d", pid); + log_debug("[go-tls-read-return] failed reading go routine id for pid %llu", pid); return 0; } @@ -243,13 +243,13 @@ int uprobe__crypto_tls_Conn_Read__return(struct pt_regs *ctx) { go_tls_read_args_data_t* call_data_ptr = bpf_map_lookup_elem(&go_tls_read_args, &call_key); if (call_data_ptr == NULL) { - log_debug("[go-tls-read-return] no read information in read-return for pid %d", pid); + log_debug("[go-tls-read-return] no read information in read-return for pid %llu", pid); return 0; } uint64_t bytes_read = 0; if (read_location(ctx, &od->read_return_bytes, sizeof(bytes_read), &bytes_read)) { - log_debug("[go-tls-read-return] failed reading return bytes location for pid %d", pid); + log_debug("[go-tls-read-return] failed reading return bytes location for pid %llu", pid); bpf_map_delete_elem(&go_tls_read_args, &call_key); return 0; } @@ -259,7 +259,7 @@ int uprobe__crypto_tls_Conn_Read__return(struct pt_regs *ctx) { // For now for success validation we chose to check only the amount of bytes read // and make sure it's greater than zero. if (bytes_read <= 0) { - log_debug("[go-tls-read-return] read returned non-positive for amount of bytes read for pid: %d", pid); + log_debug("[go-tls-read-return] read returned non-positive for amount of bytes read for pid: %llu", pid); bpf_map_delete_elem(&go_tls_read_args, &call_key); return 0; } @@ -290,7 +290,7 @@ int uprobe__crypto_tls_Conn_Close(struct pt_regs *ctx) { u64 pid_tgid = bpf_get_current_pid_tgid(); tls_offsets_data_t* od = get_offsets_data(); if (od == NULL) { - log_debug("[go-tls-close] no offsets data in map for pid %d", pid_tgid >> 32); + log_debug("[go-tls-close] no offsets data in map for pid %llu", pid_tgid >> 32); return 0; } @@ -304,13 +304,13 @@ int uprobe__crypto_tls_Conn_Close(struct pt_regs *ctx) { void* conn_pointer = NULL; if (read_location(ctx, &od->close_conn_pointer, sizeof(conn_pointer), &conn_pointer)) { - log_debug("[go-tls-close] failed reading close conn pointer for pid %d", pid_tgid >> 32); + log_debug("[go-tls-close] failed reading close conn pointer for pid %llu", pid_tgid >> 32); return 0; } conn_tuple_t* t = conn_tup_from_tls_conn(od, conn_pointer, pid_tgid); if (t == NULL) { - log_debug("[go-tls-close] failed getting conn tup from tls conn for pid %d", pid_tgid >> 32); + log_debug("[go-tls-close] failed getting conn tup from tls conn for pid %llu", pid_tgid >> 32); return 0; } diff --git a/pkg/network/ebpf/c/sock.h b/pkg/network/ebpf/c/sock.h index 97eebb3137477..c71e48a544f1d 100644 --- a/pkg/network/ebpf/c/sock.h +++ b/pkg/network/ebpf/c/sock.h @@ -196,7 +196,7 @@ static __always_inline int read_conn_tuple_partial(conn_tuple_t* t, struct sock* } if (t->saddr_l == 0 || t->daddr_l == 0) { - log_debug("ERR(read_conn_tuple.v4): src or dst addr not set src=%d, dst=%d", t->saddr_l, t->daddr_l); + log_debug("ERR(read_conn_tuple.v4): src or dst addr not set src=%llu, dst=%llu", t->saddr_l, t->daddr_l); err = 1; } } else if (family == AF_INET6) { @@ -214,13 +214,13 @@ static __always_inline int read_conn_tuple_partial(conn_tuple_t* t, struct sock* /* We can only pass 4 args to bpf_trace_printk */ /* so split those 2 statements to be able to log everything */ if (!(t->saddr_h || t->saddr_l)) { - log_debug("ERR(read_conn_tuple.v6): src addr not set: src_l:%d,src_h:%d", + log_debug("ERR(read_conn_tuple.v6): src addr not set: src_l:%llu,src_h:%llu", t->saddr_l, t->saddr_h); err = 1; } if (!(t->daddr_h || t->daddr_l)) { - log_debug("ERR(read_conn_tuple.v6): dst addr not set: dst_l:%d,dst_h:%d", + log_debug("ERR(read_conn_tuple.v6): dst addr not set: dst_l:%llu,dst_h:%llu", t->daddr_l, t->daddr_h); err = 1; } diff --git a/pkg/network/ebpf/c/tracer.c b/pkg/network/ebpf/c/tracer.c index 26e4acc4d9cd1..43a7bcf001f5a 100644 --- a/pkg/network/ebpf/c/tracer.c +++ b/pkg/network/ebpf/c/tracer.c @@ -45,13 +45,13 @@ int socket__classifier_grpc(struct __sk_buff *skb) { SEC("kprobe/tcp_sendmsg") int kprobe__tcp_sendmsg(struct pt_regs *ctx) { u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("kprobe/tcp_sendmsg: pid_tgid: %d", pid_tgid); + log_debug("kprobe/tcp_sendmsg: pid_tgid: %llu", pid_tgid); #if defined(COMPILE_RUNTIME) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct sock *skp = (struct sock *)PT_REGS_PARM2(ctx); #else struct sock *skp = (struct sock *)PT_REGS_PARM1(ctx); #endif - log_debug("kprobe/tcp_sendmsg: pid_tgid: %d, sock: %llx", pid_tgid, skp); + log_debug("kprobe/tcp_sendmsg: pid_tgid: %llu, sock: %p", pid_tgid, skp); bpf_map_update_with_telemetry(tcp_sendmsg_args, &pid_tgid, &skp, BPF_ANY); return 0; } @@ -60,7 +60,7 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx) { SEC("kprobe/tcp_sendmsg") int kprobe__tcp_sendmsg__pre_4_1_0(struct pt_regs *ctx) { u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("kprobe/tcp_sendmsg: pid_tgid: %d", pid_tgid); + log_debug("kprobe/tcp_sendmsg: pid_tgid: %llu", pid_tgid); struct sock *skp = (struct sock *)PT_REGS_PARM2(ctx); bpf_map_update_with_telemetry(tcp_sendmsg_args, &pid_tgid, &skp, BPF_ANY); return 0; @@ -88,7 +88,7 @@ int kretprobe__tcp_sendmsg(struct pt_regs *ctx) { return 0; } - log_debug("kretprobe/tcp_sendmsg: pid_tgid: %d, sent: %d, sock: %llx", pid_tgid, sent, skp); + log_debug("kretprobe/tcp_sendmsg: pid_tgid: %llu, sent: %d, sock: %p", pid_tgid, sent, skp); conn_tuple_t t = {}; if (!read_conn_tuple(&t, skp, pid_tgid, CONN_TYPE_TCP)) { return 0; @@ -106,7 +106,7 @@ int kretprobe__tcp_sendmsg(struct pt_regs *ctx) { SEC("kprobe/tcp_sendpage") int kprobe__tcp_sendpage(struct pt_regs *ctx) { u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("kprobe/tcp_sendpage: pid_tgid: %d", pid_tgid); + log_debug("kprobe/tcp_sendpage: pid_tgid: %llu", pid_tgid); struct sock *skp = (struct sock *)PT_REGS_PARM1(ctx); bpf_map_update_with_telemetry(tcp_sendpage_args, &pid_tgid, &skp, BPF_ANY); return 0; @@ -133,7 +133,7 @@ int kretprobe__tcp_sendpage(struct pt_regs *ctx) { return 0; } - log_debug("kretprobe/tcp_sendpage: pid_tgid: %d, sent: %d, sock: %x", pid_tgid, sent, skp); + log_debug("kretprobe/tcp_sendpage: pid_tgid: %llu, sent: %d, sock: %p", pid_tgid, sent, skp); conn_tuple_t t = {}; if (!read_conn_tuple(&t, skp, pid_tgid, CONN_TYPE_TCP)) { return 0; @@ -151,7 +151,7 @@ int kretprobe__tcp_sendpage(struct pt_regs *ctx) { SEC("kprobe/udp_sendpage") int kprobe__udp_sendpage(struct pt_regs *ctx) { u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("kprobe/udp_sendpage: pid_tgid: %d", pid_tgid); + log_debug("kprobe/udp_sendpage: pid_tgid: %llu", pid_tgid); struct sock *skp = (struct sock *)PT_REGS_PARM1(ctx); bpf_map_update_with_telemetry(udp_sendpage_args, &pid_tgid, &skp, BPF_ANY); return 0; @@ -177,7 +177,7 @@ int kretprobe__udp_sendpage(struct pt_regs *ctx) { return 0; } - log_debug("kretprobe/udp_sendpage: pid_tgid: %d, sent: %d, sock: %x", pid_tgid, sent, skp); + log_debug("kretprobe/udp_sendpage: pid_tgid: %llu, sent: %d, sock: %p", pid_tgid, sent, skp); conn_tuple_t t = {}; if (!read_conn_tuple(&t, skp, pid_tgid, CONN_TYPE_UDP)) { return 0; @@ -199,7 +199,7 @@ int kprobe__tcp_close(struct pt_regs *ctx) { } // Get network namespace id - log_debug("kprobe/tcp_close: tgid: %u, pid: %u", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); + log_debug("kprobe/tcp_close: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { return 0; } @@ -307,14 +307,14 @@ static __always_inline int handle_ip6_skb(struct sock *sk, size_t size, struct f #endif fl6_saddr(fl6, &t.saddr_h, &t.saddr_l); if (!(t.saddr_h || t.saddr_l)) { - log_debug("ERR(fl6): src addr not set src_l:%d,src_h:%d", t.saddr_l, t.saddr_h); + log_debug("ERR(fl6): src addr not set src_l:%llu,src_h:%llu", t.saddr_l, t.saddr_h); increment_telemetry_count(udp_send_missed); return 0; } fl6_daddr(fl6, &t.daddr_h, &t.daddr_l); if (!(t.daddr_h || t.daddr_l)) { - log_debug("ERR(fl6): dst addr not set dst_l:%d,dst_h:%d", t.daddr_l, t.daddr_h); + log_debug("ERR(fl6): dst addr not set dst_l:%llu,dst_h:%llu", t.daddr_l, t.daddr_h); increment_telemetry_count(udp_send_missed); return 0; } @@ -343,7 +343,7 @@ static __always_inline int handle_ip6_skb(struct sock *sk, size_t size, struct f t.dport = bpf_ntohs(t.dport); } - log_debug("kprobe/ip6_make_skb: pid_tgid: %d, size: %d", pid_tgid, size); + log_debug("kprobe/ip6_make_skb: pid_tgid: %llu, size: %zu", pid_tgid, size); handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 0, 0, PACKET_COUNT_NONE, sk); increment_telemetry_count(udp_send_processed); @@ -496,7 +496,7 @@ static __always_inline int handle_ip_skb(struct sock *sk, size_t size, struct fl if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_UDP)) { #ifdef COMPILE_PREBUILT if (!are_fl4_offsets_known()) { - log_debug("ERR: src/dst addr not set src:%d,dst:%d. fl4 offsets are not known", t.saddr_l, t.daddr_l); + log_debug("ERR: src/dst addr not set src:%llu,dst:%llu. fl4 offsets are not known", t.saddr_l, t.daddr_l); increment_telemetry_count(udp_send_missed); return 0; } @@ -506,7 +506,7 @@ static __always_inline int handle_ip_skb(struct sock *sk, size_t size, struct fl t.daddr_l = fl4_daddr(fl4); if (!t.saddr_l || !t.daddr_l) { - log_debug("ERR(fl4): src/dst addr not set src:%d,dst:%d", t.saddr_l, t.daddr_l); + log_debug("ERR(fl4): src/dst addr not set src:%llu,dst:%llu", t.saddr_l, t.daddr_l); increment_telemetry_count(udp_send_missed); return 0; } @@ -524,7 +524,7 @@ static __always_inline int handle_ip_skb(struct sock *sk, size_t size, struct fl t.dport = bpf_ntohs(t.dport); } - log_debug("kprobe/ip_make_skb: pid_tgid: %d, size: %d", pid_tgid, size); + log_debug("kprobe/ip_make_skb: pid_tgid: %llu, size: %zu", pid_tgid, size); // segment count is not currently enabled on prebuilt. // to enable, change PACKET_COUNT_NONE => PACKET_COUNT_INCREMENT @@ -666,7 +666,7 @@ int kretprobe__udpv6_recvmsg(struct pt_regs *ctx) { static __always_inline int handle_ret_udp_recvmsg_pre_4_7_0(int copied, void *udp_sock_map) { u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("kretprobe/udp_recvmsg: tgid: %u, pid: %u", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); + log_debug("kretprobe/udp_recvmsg: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); // Retrieve socket pointer from kprobe via pid/tgid udp_recv_sock_t *st = bpf_map_lookup_elem(udp_sock_map, &pid_tgid); @@ -675,7 +675,7 @@ static __always_inline int handle_ret_udp_recvmsg_pre_4_7_0(int copied, void *ud } if (copied < 0) { // Non-zero values are errors (or a peek) (e.g -EINVAL) - log_debug("kretprobe/udp_recvmsg: ret=%d < 0, pid_tgid=%d", copied, pid_tgid); + log_debug("kretprobe/udp_recvmsg: ret=%d < 0, pid_tgid=%llu", copied, pid_tgid); // Make sure we clean up the key bpf_map_delete_elem(udp_sock_map, &pid_tgid); return 0; @@ -692,13 +692,13 @@ static __always_inline int handle_ret_udp_recvmsg_pre_4_7_0(int copied, void *ud } if (!read_conn_tuple_partial(&t, st->sk, pid_tgid, CONN_TYPE_UDP)) { - log_debug("ERR(kretprobe/udp_recvmsg): error reading conn tuple, pid_tgid=%d", pid_tgid); + log_debug("ERR(kretprobe/udp_recvmsg): error reading conn tuple, pid_tgid=%llu", pid_tgid); bpf_map_delete_elem(udp_sock_map, &pid_tgid); return 0; } bpf_map_delete_elem(udp_sock_map, &pid_tgid); - log_debug("kretprobe/udp_recvmsg: pid_tgid: %d, return: %d", pid_tgid, copied); + log_debug("kretprobe/udp_recvmsg: pid_tgid: %llu, return: %d", pid_tgid, copied); // segment count is not currently enabled on prebuilt. // to enable, change PACKET_COUNT_NONE => PACKET_COUNT_INCREMENT handle_message(&t, 0, copied, CONN_DIRECTION_UNKNOWN, 0, 1, PACKET_COUNT_NONE, st->sk); @@ -895,7 +895,7 @@ int kretprobe__tcp_retransmit_skb(struct pt_regs *ctx) { SEC("kprobe/tcp_connect") int kprobe__tcp_connect(struct pt_regs *ctx) { u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("kprobe/tcp_connect: tgid: %u, pid: %u", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); + log_debug("kprobe/tcp_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); struct sock *skp = (struct sock *)PT_REGS_PARM1(ctx); bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &skp, &pid_tgid, BPF_ANY); @@ -913,7 +913,7 @@ int kprobe__tcp_finish_connect(struct pt_regs *ctx) { u64 pid_tgid = *pid_tgid_p; bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp); - log_debug("kprobe/tcp_finish_connect: tgid: %u, pid: %u", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); + log_debug("kprobe/tcp_finish_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); conn_tuple_t t = {}; if (!read_conn_tuple(&t, skp, pid_tgid, CONN_TYPE_TCP)) { @@ -936,7 +936,7 @@ int kretprobe__inet_csk_accept(struct pt_regs *ctx) { } u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("kretprobe/inet_csk_accept: tgid: %u, pid: %u", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); + log_debug("kretprobe/inet_csk_accept: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); conn_tuple_t t = {}; if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { @@ -1025,7 +1025,7 @@ SEC("kprobe/inet_bind") int kprobe__inet_bind(struct pt_regs *ctx) { struct socket *sock = (struct socket *)PT_REGS_PARM1(ctx); struct sockaddr *addr = (struct sockaddr *)PT_REGS_PARM2(ctx); - log_debug("kprobe/inet_bind: sock=%llx, umyaddr=%x", sock, addr); + log_debug("kprobe/inet_bind: sock=%p, umyaddr=%p", sock, addr); return sys_enter_bind(sock, addr); } @@ -1033,21 +1033,21 @@ SEC("kprobe/inet6_bind") int kprobe__inet6_bind(struct pt_regs *ctx) { struct socket *sock = (struct socket *)PT_REGS_PARM1(ctx); struct sockaddr *addr = (struct sockaddr *)PT_REGS_PARM2(ctx); - log_debug("kprobe/inet6_bind: sock=%llx, umyaddr=%x", sock, addr); + log_debug("kprobe/inet6_bind: sock=%p, umyaddr=%p", sock, addr); return sys_enter_bind(sock, addr); } SEC("kretprobe/inet_bind") int kretprobe__inet_bind(struct pt_regs *ctx) { __s64 ret = PT_REGS_RC(ctx); - log_debug("kretprobe/inet_bind: ret=%d", ret); + log_debug("kretprobe/inet_bind: ret=%lld", ret); return sys_exit_bind(ret); } SEC("kretprobe/inet6_bind") int kretprobe__inet6_bind(struct pt_regs *ctx) { __s64 ret = PT_REGS_RC(ctx); - log_debug("kretprobe/inet6_bind: ret=%d", ret); + log_debug("kretprobe/inet6_bind: ret=%lld", ret); return sys_exit_bind(ret); } diff --git a/pkg/network/ebpf/c/tracer/bind.h b/pkg/network/ebpf/c/tracer/bind.h index 4d6d27b7c15a3..67a01a2da4f4c 100644 --- a/pkg/network/ebpf/c/tracer/bind.h +++ b/pkg/network/ebpf/c/tracer/bind.h @@ -28,14 +28,14 @@ static __always_inline int sys_enter_bind(struct socket *sock, struct sockaddr * } if (addr == NULL) { - log_debug("sys_enter_bind: could not read sockaddr, sock=%llx, tid=%u", sock, tid); + log_debug("sys_enter_bind: could not read sockaddr, sock=%p, tid=%llu", sock, tid); return 0; } // ignore binds to port 0, as these are most // likely from clients, not servers if (sockaddr_sin_port(addr) == 0) { - log_debug("sys_enter_bind: ignoring bind to 0 port, sock=%llx", sock); + log_debug("sys_enter_bind: ignoring bind to 0 port, sock=%p", sock); return 0; } @@ -50,7 +50,7 @@ static __always_inline int sys_enter_bind(struct socket *sock, struct sockaddr * args.addr = addr; bpf_map_update_with_telemetry(pending_bind, &tid, &args, BPF_ANY); - log_debug("sys_enter_bind: started a bind on UDP sock=%llx tid=%u", sock, tid); + log_debug("sys_enter_bind: started a bind on UDP sock=%p tid=%llu", sock, tid); return 0; } @@ -61,7 +61,7 @@ static __always_inline int sys_exit_bind(__s64 ret) { // bail if this bind() is not the one we're instrumenting bind_syscall_args_t *args = bpf_map_lookup_elem(&pending_bind, &tid); - log_debug("sys_exit_bind: tid=%u, ret=%d", tid, ret); + log_debug("sys_exit_bind: tid=%llu, ret=%lld", tid, ret); if (args == NULL) { log_debug("sys_exit_bind: was not a UDP bind, will not process"); diff --git a/pkg/network/ebpf/c/tracer/tcp_recv.h b/pkg/network/ebpf/c/tracer/tcp_recv.h index 3a2270c02ebe6..9023a706afa7d 100644 --- a/pkg/network/ebpf/c/tracer/tcp_recv.h +++ b/pkg/network/ebpf/c/tracer/tcp_recv.h @@ -46,7 +46,7 @@ int kprobe__tcp_recvmsg__pre_5_19_0(struct pt_regs *ctx) { SEC("kprobe/tcp_recvmsg") int kprobe__tcp_recvmsg__pre_4_1_0(struct pt_regs* ctx) { u64 pid_tgid = bpf_get_current_pid_tgid(); - log_debug("kprobe/tcp_recvmsg: pid_tgid: %d", pid_tgid); + log_debug("kprobe/tcp_recvmsg: pid_tgid: %llu", pid_tgid); int flags = (int)PT_REGS_PARM6(ctx); if (flags & MSG_PEEK) { return 0; From f5c61d2690eb49af8471c294b6120910ebee443d Mon Sep 17 00:00:00 2001 From: AliDatadog <125997632+AliDatadog@users.noreply.github.com> Date: Fri, 12 Apr 2024 12:31:59 +0200 Subject: [PATCH 10/99] [CONTINT-4069] Fix flaky sbom queue tests (#24617) * Pass the map of collectors as argument for the scanner * increase timeout --- pkg/sbom/scanner/scanner.go | 18 ++++++++++-------- pkg/sbom/scanner/scanner_test.go | 21 ++++++++++++--------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/pkg/sbom/scanner/scanner.go b/pkg/sbom/scanner/scanner.go index ec7bb5a719fc4..c35bab704b835 100644 --- a/pkg/sbom/scanner/scanner.go +++ b/pkg/sbom/scanner/scanner.go @@ -45,12 +45,13 @@ type Scanner struct { // It cannot be cleaned when a scan is running cacheMutex sync.Mutex - wmeta optional.Option[workloadmeta.Component] + wmeta optional.Option[workloadmeta.Component] + collectors map[string]collectors.Collector } // NewScanner creates a new SBOM scanner. Call Start to start the store and its // collectors. -func NewScanner(cfg config.Config, wmeta optional.Option[workloadmeta.Component]) *Scanner { +func NewScanner(cfg config.Config, collectors map[string]collectors.Collector, wmeta optional.Option[workloadmeta.Component]) *Scanner { return &Scanner{ scanQueue: workqueue.NewRateLimitingQueueWithConfig( workqueue.NewItemExponentialFailureRateLimiter( @@ -62,8 +63,9 @@ func NewScanner(cfg config.Config, wmeta optional.Option[workloadmeta.Component] MetricsProvider: telemetry.QueueMetricProvider{}, }, ), - disk: filesystem.NewDisk(), - wmeta: wmeta, + disk: filesystem.NewDisk(), + wmeta: wmeta, + collectors: collectors, } } @@ -85,7 +87,7 @@ func CreateGlobalScanner(cfg config.Config, wmeta optional.Option[workloadmeta.C } } - globalScanner = NewScanner(cfg, wmeta) + globalScanner = NewScanner(cfg, collectors.Collectors, wmeta) return globalScanner, nil } @@ -158,7 +160,7 @@ func (s *Scanner) startCacheCleaner(ctx context.Context) { case <-cleanTicker.C: s.cacheMutex.Lock() log.Debug("cleaning SBOM cache") - for _, collector := range collectors.Collectors { + for _, collector := range s.collectors { if err := collector.CleanCache(); err != nil { _ = log.Warnf("could not clean SBOM cache: %v", err) } @@ -192,7 +194,7 @@ func (s *Scanner) startScanRequestHandler(ctx context.Context) { s.handleScanRequest(ctx, r) s.scanQueue.Done(r) } - for _, collector := range collectors.Collectors { + for _, collector := range s.collectors { collector.Shutdown() } }() @@ -207,7 +209,7 @@ func (s *Scanner) handleScanRequest(ctx context.Context, r interface{}) { } telemetry.SBOMAttempts.Inc(request.Collector(), request.Type()) - collector, ok := collectors.Collectors[request.Collector()] + collector, ok := s.collectors[request.Collector()] if !ok { _ = log.Errorf("invalid collector '%s'", request.Collector()) s.scanQueue.Forget(request) diff --git a/pkg/sbom/scanner/scanner_test.go b/pkg/sbom/scanner/scanner_test.go index 699b795334b88..269c87241aff4 100644 --- a/pkg/sbom/scanner/scanner_test.go +++ b/pkg/sbom/scanner/scanner_test.go @@ -100,9 +100,8 @@ func TestRetryLogic_Error(t *testing.T) { mockCollector.On("Scan", mock.Anything, mock.Anything).Return(errorResult).Twice() mockCollector.On("Scan", mock.Anything, mock.Anything).Return(expectedResult).Once() mockCollector.On("Channel").Return(resultCh) - mockCollector.On("Shutdown") + shutdown := mockCollector.On("Shutdown") mockCollector.On("Type").Return(collectors.ContainerImageScanType) - collectors.RegisterCollector(collName, mockCollector) // Set up the configuration as the default one is too slow cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) @@ -110,7 +109,7 @@ func TestRetryLogic_Error(t *testing.T) { cfg.Set("sbom.scan_queue.max_backoff", "3s", model.SourceAgentRuntime) // Create a scanner and start it - scanner := NewScanner(cfg, optional.NewOption[workloadmeta.Component](workloadmetaStore)) + scanner := NewScanner(cfg, map[string]collectors.Collector{collName: mockCollector}, optional.NewOption[workloadmeta.Component](workloadmetaStore)) ctx, cancel := context.WithCancel(context.Background()) scanner.Start(ctx) @@ -135,6 +134,8 @@ func TestRetryLogic_Error(t *testing.T) { } cancel() + // Ensure the collector is stopped + shutdown.WaitUntil(time.After(5 * time.Second)) } func TestRetryLogic_ImageDeleted(t *testing.T) { @@ -165,9 +166,8 @@ func TestRetryLogic_ImageDeleted(t *testing.T) { mockCollector.On("Options").Return(sbom.ScanOptions{}) mockCollector.On("Scan", mock.Anything, mock.Anything).Return(errorResult).Twice() mockCollector.On("Channel").Return(resultCh) - mockCollector.On("Shutdown") + shutdown := mockCollector.On("Shutdown") mockCollector.On("Type").Return(collectors.ContainerImageScanType) - collectors.RegisterCollector(collName, mockCollector) // Set up the configuration as the default one is too slow cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) @@ -175,7 +175,7 @@ func TestRetryLogic_ImageDeleted(t *testing.T) { cfg.Set("sbom.scan_queue.max_backoff", "3s", model.SourceAgentRuntime) // Create a scanner and start it - scanner := NewScanner(cfg, optional.NewOption[workloadmeta.Component](workloadmetaStore)) + scanner := NewScanner(cfg, map[string]collectors.Collector{collName: mockCollector}, optional.NewOption[workloadmeta.Component](workloadmetaStore)) ctx, cancel := context.WithCancel(context.Background()) scanner.Start(ctx) @@ -199,6 +199,8 @@ func TestRetryLogic_ImageDeleted(t *testing.T) { } }, 15*time.Second, 1*time.Second) cancel() + // Ensure the collector is stopped + shutdown.WaitUntil(time.After(5 * time.Second)) } func TestRetryLogic_Host(t *testing.T) { @@ -210,9 +212,8 @@ func TestRetryLogic_Host(t *testing.T) { mockCollector.On("Options").Return(sbom.ScanOptions{}) mockCollector.On("Scan", mock.Anything, mock.Anything).Return(errorResult).Twice() mockCollector.On("Channel").Return(resultCh) - mockCollector.On("Shutdown") + shutdown := mockCollector.On("Shutdown") mockCollector.On("Type").Return(collectors.HostScanType) - collectors.RegisterCollector(collName, mockCollector) // Set up the configuration as the default one is too slow cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) @@ -220,7 +221,7 @@ func TestRetryLogic_Host(t *testing.T) { cfg.Set("sbom.scan_queue.max_backoff", "3s", model.SourceAgentRuntime) // Create a scanner and start it - scanner := NewScanner(cfg, optional.NewNoneOption[workloadmeta.Component]()) + scanner := NewScanner(cfg, map[string]collectors.Collector{collName: mockCollector}, optional.NewNoneOption[workloadmeta.Component]()) ctx, cancel := context.WithCancel(context.Background()) scanner.Start(ctx) @@ -239,4 +240,6 @@ func TestRetryLogic_Host(t *testing.T) { case <-time.After(4 * time.Second): } cancel() + // Ensure the collector is stopped + shutdown.WaitUntil(time.After(5 * time.Second)) } From ab5ed913c1bafff303122bded045be62077b6e46 Mon Sep 17 00:00:00 2001 From: Jeremy Hanna Date: Fri, 12 Apr 2024 06:56:35 -0400 Subject: [PATCH 11/99] [ASCII-1158] Add e2e test for /agent/version and /agent/flare api routes (#24616) * Add e2e tests for some internal agent api routes * Add copyrights for linter * Embed the endpoint struct for cleaner test cases in api e2e test * Fix the conditioinal logic when api e2e testcase does not have data --- .../agent-shared-components/api/api_test.go | 115 ++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 test/new-e2e/tests/agent-shared-components/api/api_test.go diff --git a/test/new-e2e/tests/agent-shared-components/api/api_test.go b/test/new-e2e/tests/agent-shared-components/api/api_test.go new file mode 100644 index 0000000000000..b8e6c00ef4174 --- /dev/null +++ b/test/new-e2e/tests/agent-shared-components/api/api_test.go @@ -0,0 +1,115 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package api + +import ( + "fmt" + "net" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + agentCmdPort = 5001 +) + +type apiSuite struct { + e2e.BaseSuite[environments.Host] +} + +func TestApiSuite(t *testing.T) { + e2e.Run(t, &apiSuite{}, e2e.WithProvisioner(awshost.ProvisionerNoFakeIntake())) +} + +type agentEndpointInfo struct { + name string + scheme string + port int + endpoint string + method string + data string +} + +func (endpointInfo *agentEndpointInfo) url() *url.URL { + return &url.URL{ + Scheme: endpointInfo.scheme, + Host: net.JoinHostPort("localhost", strconv.Itoa(endpointInfo.port)), + Path: endpointInfo.endpoint, + } +} + +func (endpointInfo *agentEndpointInfo) fetchCommand(authtoken string) string { + data := endpointInfo.data + if len(endpointInfo.data) == 0 { + data = "{}" + } + + // -s: silent so we don't show auth token in output + // -k: allow insecure server connections since we self-sign the TLS cert + // -H: add a header with the auth token + // -X: http request method + // -d: request data (json) + return fmt.Sprintf( + `curl -s -k -H "authorization: Bearer %s" -X %s "%s" -d "%s"`, + authtoken, + endpointInfo.method, + endpointInfo.url().String(), + data, + ) +} + +func (v *apiSuite) TestInternalAgentAPIEndpoints() { + testcases := []struct { + agentEndpointInfo + want string + }{ + { + agentEndpointInfo: agentEndpointInfo{ + name: "version", + scheme: "https", + port: agentCmdPort, + endpoint: "/agent/version", + method: "GET", + data: "", + }, + want: `"Major":7,"Minor":5`, + }, + { + agentEndpointInfo: agentEndpointInfo{ + name: "flare", + scheme: "https", + port: agentCmdPort, + endpoint: "/agent/flare", + method: "POST", + data: "{}", + }, + want: `Z-info.zip`, + }, + } + + authTokenFilePath := "/etc/datadog-agent/auth_token" + authtokenContent := v.Env().RemoteHost.MustExecute("sudo cat " + authTokenFilePath) + authtoken := strings.TrimSpace(authtokenContent) + + for _, tc := range testcases { + cmd := tc.fetchCommand(authtoken) + host := v.Env().RemoteHost + require.EventuallyWithT(v.T(), func(t *assert.CollectT) { + resp, err := host.Execute(cmd) + require.NoError(t, err) + assert.Contains(t, resp, tc.want, "%s %s returned: %s, wanted: %s", tc.method, tc.endpoint, resp, tc.want) + }, 2*time.Minute, 10*time.Second) + } +} From d00770f293f42df6bdd4ca9235d6eaba6907dd8e Mon Sep 17 00:00:00 2001 From: Arthur Bellal Date: Fri, 12 Apr 2024 13:52:01 +0200 Subject: [PATCH 12/99] (fleet) add OCI registry auth to the installer (#24603) * (fleet) add OCI registry auth to the installer Add credential helpers to the OCI downloader in the installer. We will use in order: - The docker credential file - GCP metadata endpoints - AWS metadata endpoints * license & tidy * make the auth configurable * fix * default --- .copyright-overrides.yml | 1 + LICENSE-3rdparty.csv | 12 ++++++++ go.mod | 29 ++++++++++-------- go.sum | 58 ++++++++++++++++++++---------------- pkg/config/setup/config.go | 1 + pkg/updater/download.go | 19 ++++++++++-- pkg/updater/download_test.go | 8 +++-- pkg/updater/updater.go | 2 +- 8 files changed, 86 insertions(+), 44 deletions(-) diff --git a/.copyright-overrides.yml b/.copyright-overrides.yml index e740f8e6ffdc2..b223ed57feabe 100644 --- a/.copyright-overrides.yml +++ b/.copyright-overrides.yml @@ -20,6 +20,7 @@ github.com/GoogleCloudPlatform/docker-credential-gcr: Copyright 2016 Google, Inc github.com/aws/aws-lambda-go: Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. github.com/aws/aws-sdk-go: ["Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.", "Copyright 2014-2015 Stripe, Inc."] github.com/aws/aws-sdk-go-v2: ["Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.", "Copyright 2014-2015 Stripe, Inc."] +github.com/awslabs/amazon-ecr-credential-helper/*: "Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved." github.com/containerd/*: Copyright 2012-2015 Docker, Inc. github.com/coreos/*: Copyright 2017 CoreOS, Inc github.com/docker/*: Copyright 2012-2017 Docker, Inc. diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 6e75c045ad361..917ae8882a640 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -473,6 +473,12 @@ core,github.com/aws/aws-sdk-go-v2/service/ebs/types,Apache-2.0,"Copyright 2014-2 core,github.com/aws/aws-sdk-go-v2/service/ec2,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go-v2/service/ec2/internal/endpoints,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go-v2/service/ec2/types,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." +core,github.com/aws/aws-sdk-go-v2/service/ecr,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." +core,github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." +core,github.com/aws/aws-sdk-go-v2/service/ecr/types,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." +core,github.com/aws/aws-sdk-go-v2/service/ecrpublic,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." +core,github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." +core,github.com/aws/aws-sdk-go-v2/service/ecrpublic/types,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go-v2/service/internal/presigned-url,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go-v2/service/kms,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." @@ -565,6 +571,11 @@ core,github.com/aws/smithy-go/time,Apache-2.0,"Copyright Amazon.com, Inc. or its core,github.com/aws/smithy-go/transport/http,Apache-2.0,"Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved" core,github.com/aws/smithy-go/transport/http/internal/io,Apache-2.0,"Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved" core,github.com/aws/smithy-go/waiter,Apache-2.0,"Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved" +core,github.com/awslabs/amazon-ecr-credential-helper/ecr-login,Apache-2.0,"Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved." +core,github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api,Apache-2.0,"Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved." +core,github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache,Apache-2.0,"Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved." +core,github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config,Apache-2.0,"Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved." +core,github.com/awslabs/amazon-ecr-credential-helper/ecr-login/version,Apache-2.0,"Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/bahlo/generic-list-go,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,github.com/beevik/ntp,BSD-2-Clause,Anton Tolchanov (knyar) | Ask Bjørn Hansen (abh) | Brett Vickers (beevik) | Christopher Batey (chbatey) | Copyright 2015-2017 Brett Vickers. All rights reserved | Leonid Evdokimov (darkk) | Meng Zhuo (mengzhuo) | Mikhail Salosin (AlphaB) core,github.com/benbjohnson/clock,MIT,Copyright (c) 2014 Ben Johnson @@ -1017,6 +1028,7 @@ core,github.com/google/go-containerregistry/pkg/logs,Apache-2.0,Copyright 2018 G core,github.com/google/go-containerregistry/pkg/name,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/v1,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/v1/empty,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. +core,github.com/google/go-containerregistry/pkg/v1/google,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/v1/layout,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/v1/match,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/v1/mutate,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. diff --git a/go.mod b/go.mod index 69dc4a1a776d9..c24c970730b67 100644 --- a/go.mod +++ b/go.mod @@ -353,20 +353,20 @@ require ( github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/awalterschulze/gographviz v2.0.3+incompatible // indirect - github.com/aws/aws-sdk-go-v2 v1.25.2 - github.com/aws/aws-sdk-go-v2/config v1.27.4 - github.com/aws/aws-sdk-go-v2/credentials v1.17.4 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 // indirect + github.com/aws/aws-sdk-go-v2 v1.26.1 + github.com/aws/aws-sdk-go-v2/config v1.27.11 + github.com/aws/aws-sdk-go-v2/credentials v1.17.11 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect github.com/aws/aws-sdk-go-v2/service/ebs v1.21.7 // indirect github.com/aws/aws-sdk-go-v2/service/ec2 v1.149.1 - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 // indirect - github.com/aws/smithy-go v1.20.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect + github.com/aws/smithy-go v1.20.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 // indirect github.com/briandowns/spinner v1.23.0 // indirect @@ -383,7 +383,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/cli v25.0.5+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.0 // indirect + github.com/docker/docker-credential-helpers v0.8.1 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect @@ -673,6 +673,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/kms v1.27.7 github.com/aws/aws-sdk-go-v2/service/rds v1.73.0 github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.26.0 + github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240409155312-26d1ea377073 github.com/cloudfoundry-community/go-cfclient/v2 v2.0.1-0.20230503155151-3d15366c5820 github.com/containerd/cgroups/v3 v3.0.2 github.com/containerd/typeurl/v2 v2.1.1 @@ -717,7 +718,9 @@ require ( github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/aquasecurity/trivy-java-db v0.0.0-20240109071736-184bd7481d48 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.27.4 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bitnami/go-version v0.0.0-20231130084017-bb00604d650c // indirect diff --git a/go.sum b/go.sum index d23f269b158ea..fe7b9b56b6140 100644 --- a/go.sum +++ b/go.sum @@ -844,27 +844,27 @@ github.com/aws/aws-sdk-go v1.49.21 h1:Rl8KW6HqkwzhATwvXhyr7vD4JFUMi7oXGAw9SrxxIF github.com/aws/aws-sdk-go v1.49.21/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= -github.com/aws/aws-sdk-go-v2 v1.25.2 h1:/uiG1avJRgLGiQM9X3qJM8+Qa6KRGK5rRPuXE0HUM+w= -github.com/aws/aws-sdk-go-v2 v1.25.2/go.mod h1:Evoc5AsmtveRt1komDwIsjHFyrP5tDuF1D1U+6z6pNo= +github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= +github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.14/go.mod h1:9NCTOURS8OpxvoAVHq79LK81/zC78hfRWFn+aL0SPcY= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/config v1.19.0/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE= -github.com/aws/aws-sdk-go-v2/config v1.27.4 h1:AhfWb5ZwimdsYTgP7Od8E9L1u4sKmDW2ZVeLcf2O42M= -github.com/aws/aws-sdk-go-v2/config v1.27.4/go.mod h1:zq2FFXK3A416kiukwpsd+rD4ny6JC7QSkp4QdN1Mp2g= +github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA= +github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE= github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg= -github.com/aws/aws-sdk-go-v2/credentials v1.17.4 h1:h5Vztbd8qLppiPwX+y0Q6WiwMZgpd9keKe2EAENgAuI= -github.com/aws/aws-sdk-go-v2/credentials v1.17.4/go.mod h1:+30tpwrkOgvkJL1rUZuRLoxcJwtI/OkeBLYnHxJtVe0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs= +github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 h1:AK0J8iYBFeUk2Ax7O8YpLtFsfhdOByh2QIkHmigpRYk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2/go.mod h1:iRlGzMix0SExQEviAyptRWRGdYNo3+ufW/lCzvKVTUc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 h1:bNo4LagzUKbjdxE0tIcR9pMzLR2U/Tgie1Hq1HQ3iH8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2/go.mod h1:wRQv0nN6v9wDXuWThpovGQjqF1HFdcgWjporw14lS8k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 h1:EtOU5jsPdIQNP+6Q2C5e3d65NKT1PeCiQk+9OdzO12Q= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2/go.mod h1:tyF5sKccmDz0Bv4NrstEr+/9YkSPJHrcO7UsUKf7pWM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= @@ -875,14 +875,18 @@ github.com/aws/aws-sdk-go-v2/service/ebs v1.21.7 h1:CRzzXjmgx9p362yO39D6hbZULdMI github.com/aws/aws-sdk-go-v2/service/ebs v1.21.7/go.mod h1:wnsHqpi3RgDwklS5SPHUgjcUUpontGPKJ+GJYOdV7pY= github.com/aws/aws-sdk-go-v2/service/ec2 v1.149.1 h1:OGZUMBYZnz+R5nkW6FS1J8UlfLeM/pKojck+74+ZQGY= github.com/aws/aws-sdk-go-v2/service/ec2 v1.149.1/go.mod h1:XxJNg7fIkR8cbm89i0zVZSxKpcPYsC8BWRwMIJOWbnk= +github.com/aws/aws-sdk-go-v2/service/ecr v1.27.4 h1:Qr9W21mzWT3RhfYn9iAux7CeRIdbnTAqmiOlASqQgZI= +github.com/aws/aws-sdk-go-v2/service/ecr v1.27.4/go.mod h1:if7ybzzjOmDB8pat9FE35AHTY6ZxlYSy3YviSmFZv8c= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.4 h1:aNuiieMaS2IHxqAsTdM/pjHyY1aoaDLBGLqpNnFMMqk= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.4/go.mod h1:8pvvNAklmq+hKmqyvFoMRg0bwg9sdGOvdwximmKiKP0= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15/go.mod h1:26SQUPcTNgV1Tapwdt4a1rOsYRsnBsJHLMPoxK2b0d8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38/go.mod h1:epIZoRSSbRIwLPJU5F+OldHhwZPBdpDeQkRdCeY3+00= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 h1:5ffmXjPtwRExp1zc7gENLgCPyHFbhEPwVTkTiH9niSk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2/go.mod h1:Ru7vg1iQ7cR4i7SZ/JTLYN9kaXtbL69UdgG0OQWQxW0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6/go.mod h1:lnc2taBsR9nTlz9meD+lhFZZ9EWY712QHrRflWpTcOA= github.com/aws/aws-sdk-go-v2/service/kms v1.27.7 h1:wN7AN7iOiAgT9HmdifZNSvbr6S7gSpLjSSOQHIaGmFc= github.com/aws/aws-sdk-go-v2/service/kms v1.27.7/go.mod h1:D9FVDkZjkZnnFHymJ3fPVz0zOUlNSd0xcIIVmmrAac8= @@ -893,19 +897,21 @@ github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.26.0 h1:dPCRgAL4WD9tSMaDg github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.26.0/go.mod h1:4Ae1NCLK6ghmjzd45Tc33GgCKhUWD2ORAlULtMO1Cbs= github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 h1:utEGkfdQ4L6YW/ietH7111ZYglLJvS+sLriHJ1NBJEQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.1/go.mod h1:RsYqzYr2F2oPDdpy+PdhephuZxTfjHQe7SOBcZGoAU8= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 h1:9/GylMS45hGGFCcMrUZDVayQE1jYSIN6da9jo7RAYIw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1/go.mod h1:YjAPFn4kGFqKC54VsHs5fn5B6d+PCY2tziEa3U/GB5Y= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 h1:3I2cBEYgKhrWlwyZgfpSO2BpaMY1LHPqXYk/QGlu2ew= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.1/go.mod h1:uQ7YYKZt3adCRrdCBREm1CD3efFLOUNH77MrUCvx5oA= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= -github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240409155312-26d1ea377073 h1:9XtHL16FtbSDAedz9AnboTDqfKacYqc5BmwtUxzwwD8= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240409155312-26d1ea377073/go.mod h1:2nlYPkG0rFrODp6R875pk/kOnB8Ivj3+onhzk2mO57g= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw= @@ -1082,8 +1088,8 @@ github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBi github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= -github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= +github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= +github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index 85419ff93ce6f..65e19221f41fa 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -1337,6 +1337,7 @@ func InitConfig(config pkgconfigmodel.Config) { // Updater configuration config.BindEnvAndSetDefault("updater.remote_updates", false) config.BindEnv("updater.registry") + config.BindEnvAndSetDefault("updater.registry_auth", "") } // LoadProxyFromEnv overrides the proxy settings with environment variables diff --git a/pkg/updater/download.go b/pkg/updater/download.go index 9387c600a4122..5d3c65f6c28dd 100644 --- a/pkg/updater/download.go +++ b/pkg/updater/download.go @@ -13,11 +13,15 @@ import ( "runtime" "strings" + "github.com/awslabs/amazon-ecr-credential-helper/ecr-login" + "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" oci "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/google" "github.com/google/go-containerregistry/pkg/v1/remote" httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" + "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -39,13 +43,24 @@ type downloadedPackage struct { // downloader is the downloader used by the updater to download packages. type downloader struct { + keychain authn.Keychain client *http.Client remoteBaseURL string } // newDownloader returns a new Downloader. -func newDownloader(client *http.Client, remoteBaseURL string) *downloader { +func newDownloader(config config.Reader, client *http.Client, remoteBaseURL string) *downloader { + var keychain authn.Keychain + switch config.GetString("updater.registry_auth") { + case "gcr": + keychain = google.Keychain + case "ecr": + keychain = authn.NewKeychainFromHelper(ecr.NewECRHelper()) + default: + keychain = authn.DefaultKeychain + } return &downloader{ + keychain: keychain, client: client, remoteBaseURL: remoteBaseURL, } @@ -111,7 +126,7 @@ func (d *downloader) downloadRegistry(ctx context.Context, url string) (oci.Imag OS: runtime.GOOS, Architecture: runtime.GOARCH, } - index, err := remote.Index(ref, remote.WithContext(ctx), remote.WithTransport(httptrace.WrapRoundTripper(d.client.Transport))) + index, err := remote.Index(ref, remote.WithContext(ctx), remote.WithAuthFromKeychain(d.keychain), remote.WithTransport(httptrace.WrapRoundTripper(d.client.Transport))) if err != nil { return nil, fmt.Errorf("could not download image: %w", err) } diff --git a/pkg/updater/download_test.go b/pkg/updater/download_test.go index f897db79791f7..8003e2de10f9b 100644 --- a/pkg/updater/download_test.go +++ b/pkg/updater/download_test.go @@ -27,6 +27,8 @@ import ( "github.com/google/go-containerregistry/pkg/v1/remote" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/pkg/config/model" ) type fixture struct { @@ -107,11 +109,13 @@ func newTestFixturesServer(t *testing.T) *testFixturesServer { } func (s *testFixturesServer) Downloader() *downloader { - return newDownloader(s.s.Client(), "") + cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + return newDownloader(cfg, s.s.Client(), "") } func (s *testFixturesServer) DownloaderRegistryOverride() *downloader { - return newDownloader(s.s.Client(), "my.super/registry") + cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + return newDownloader(cfg, s.s.Client(), "my.super/registry") } func (s *testFixturesServer) Package(f fixture) Package { diff --git a/pkg/updater/updater.go b/pkg/updater/updater.go index b66a7f66d9c0f..76ca92b255d31 100644 --- a/pkg/updater/updater.go +++ b/pkg/updater/updater.go @@ -151,7 +151,7 @@ func newUpdater(rc *remoteConfig, repositoriesPath string, locksPath string, con remoteUpdates: config.GetBool("updater.remote_updates"), rc: rcClient, repositories: repositories, - downloader: newDownloader(http.DefaultClient, remoteRegistryOverride), + downloader: newDownloader(config, http.DefaultClient, remoteRegistryOverride), installer: newInstaller(repositories), telemetry: telemetry, requests: make(chan remoteAPIRequest, 32), From 0ba7f945968854166dd7bb0a6cd01246b476836e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9lian=20Raimbault?= <161456554+CelianR@users.noreply.github.com> Date: Fri, 12 Apr 2024 14:18:56 +0200 Subject: [PATCH 13/99] [gitlab-use-module] Use gitlab python module instead of raw http requests (#24070) Co-authored-by: usamasaqib --- .github/workflows/label-analysis.yml | 2 +- tasks/kernel_matrix_testing/ci.py | 55 ++- tasks/kmt.py | 2 +- tasks/libs/ciproviders/github_api.py | 3 - tasks/libs/ciproviders/gitlab.py | 545 --------------------------- tasks/libs/ciproviders/gitlab_api.py | 243 ++++++++++++ tasks/libs/common/remote_api.py | 123 ------ tasks/libs/pipeline/data.py | 59 +-- tasks/libs/pipeline/notifications.py | 19 +- tasks/libs/pipeline/stats.py | 6 +- tasks/libs/pipeline/tools.py | 177 +++++---- tasks/libs/types/types.py | 18 +- tasks/linter.py | 19 +- tasks/notify.py | 2 +- tasks/pipeline.py | 218 ++++++----- tasks/release.py | 11 +- tasks/unit-tests/gitlab_api_tests.py | 93 +---- tasks/unit-tests/notify_tests.py | 175 +++++---- 18 files changed, 653 insertions(+), 1117 deletions(-) delete mode 100644 tasks/libs/ciproviders/gitlab.py create mode 100644 tasks/libs/ciproviders/gitlab_api.py delete mode 100644 tasks/libs/common/remote_api.py diff --git a/.github/workflows/label-analysis.yml b/.github/workflows/label-analysis.yml index 7d97b83595f71..bbf262c9381bb 100644 --- a/.github/workflows/label-analysis.yml +++ b/.github/workflows/label-analysis.yml @@ -21,7 +21,7 @@ jobs: - name: Checkout repository uses: actions/checkout@v4 - name: Install Python dependencies - run: pip install -r tasks/requirements.txt + run: pip install -r requirements.txt -r tasks/requirements.txt - name: Auto assign team label run: inv -e github.assign-team-label --pr-id='${{ github.event.pull_request.number }}' fetch-labels: diff --git a/tasks/kernel_matrix_testing/ci.py b/tasks/kernel_matrix_testing/ci.py index 8e03b74f5a293..364bf9d3c2845 100644 --- a/tasks/kernel_matrix_testing/ci.py +++ b/tasks/kernel_matrix_testing/ci.py @@ -6,9 +6,11 @@ import re import tarfile import xml.etree.ElementTree as ET -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union, overload +from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union, overload -from tasks.libs.ciproviders.gitlab import Gitlab, get_gitlab_token +from gitlab.v4.objects import ProjectJob + +from tasks.libs.ciproviders.gitlab_api import get_gitlab_repo if TYPE_CHECKING: from typing_extensions import Literal @@ -16,31 +18,27 @@ from tasks.kernel_matrix_testing.types import Arch, Component, StackOutput, VMConfig -def get_gitlab() -> Gitlab: - return Gitlab("DataDog/datadog-agent", str(get_gitlab_token())) - - class KMTJob: """Abstract class representing a Kernel Matrix Testing job, with common properties and methods for all job types""" - def __init__(self, job_data: Dict[str, Any]): - self.gitlab = get_gitlab() - self.job_data = job_data + def __init__(self, job: ProjectJob): + self.gitlab = get_gitlab_repo() + self.job = job def __str__(self): return f"" @property def id(self) -> int: - return self.job_data["id"] + return self.job.id @property def pipeline_id(self) -> int: - return self.job_data["pipeline"]["id"] + return self.job.pipeline["id"] @property def name(self) -> str: - return self.job_data.get("name", "") + return self.job.name @property def arch(self) -> Arch: @@ -52,11 +50,11 @@ def component(self) -> Component: @property def status(self) -> str: - return self.job_data['status'] + return self.job.status @property def failure_reason(self) -> str: - return self.job_data["failure_reason"] + return self.job.failure_reason @overload def artifact_file(self, file: str, ignore_not_found: Literal[True]) -> Optional[str]: # noqa: U100 @@ -90,16 +88,14 @@ def artifact_file_binary(self, file: str, ignore_not_found: bool = False) -> Opt ignore_not_found: if True, return None if the file is not found, otherwise raise an error """ try: - res = self.gitlab.artifact(self.id, file, ignore_not_found=ignore_not_found) - if res is None: - if not ignore_not_found: - raise RuntimeError("Invalid return value from gitlab.artifact") - else: - return None - res.raise_for_status() + res = self.gitlab.jobs.get(self.id, lazy=True).artifact(file) + + return res.content except Exception as e: + if ignore_not_found: + return None + raise RuntimeError(f"Could not retrieve artifact {file}") from e - return res.content class KMTSetupEnvJob(KMTJob): @@ -107,8 +103,8 @@ class KMTSetupEnvJob(KMTJob): the job name and output artifacts """ - def __init__(self, job_data: Dict[str, Any]): - super().__init__(job_data) + def __init__(self, job: ProjectJob): + super().__init__(job) self.associated_test_jobs: List[KMTTestRunJob] = [] @property @@ -165,8 +161,8 @@ class KMTTestRunJob(KMTJob): the job name and output artifacts """ - def __init__(self, job_data: Dict[str, Any]): - super().__init__(job_data) + def __init__(self, job: ProjectJob): + super().__init__(job) self.setup_job: Optional[KMTSetupEnvJob] = None @property @@ -231,9 +227,10 @@ def get_all_jobs_for_pipeline(pipeline_id: Union[int, str]) -> Tuple[List[KMTSet setup_jobs: List[KMTSetupEnvJob] = [] test_jobs: List[KMTTestRunJob] = [] - gitlab = get_gitlab() - for job in gitlab.all_jobs(pipeline_id): - name = job.get("name", "") + gitlab = get_gitlab_repo() + jobs = gitlab.pipelines.get(pipeline_id, lazy=True).jobs.list(per_page=100, all=True) + for job in jobs: + name = job.name if name.startswith("kmt_setup_env"): setup_jobs.append(KMTSetupEnvJob(job)) elif name.startswith("kmt_run_"): diff --git a/tasks/kmt.py b/tasks/kmt.py index d7614ccc73a36..a3abd373ebdd1 100644 --- a/tasks/kmt.py +++ b/tasks/kmt.py @@ -984,7 +984,7 @@ def explain_ci_failure(_, pipeline: str): failreason = testfail # By default, we assume it's a test failure # Now check the artifacts, we'll guess why the job failed based on the size - for artifact in job.job_data.get("artifacts", []): + for artifact in job.job.artifacts: if artifact.get("filename") == "artifacts.zip": fsize = artifact.get("size", 0) if fsize < 1500: diff --git a/tasks/libs/ciproviders/github_api.py b/tasks/libs/ciproviders/github_api.py index 1d0e12f760b08..b9a186287dc6d 100644 --- a/tasks/libs/ciproviders/github_api.py +++ b/tasks/libs/ciproviders/github_api.py @@ -1,7 +1,6 @@ import base64 import os import platform -import re import subprocess from typing import List @@ -15,8 +14,6 @@ __all__ = ["GithubAPI"] -errno_regex = re.compile(r".*\[Errno (\d+)\] (.*)") - class GithubAPI: """ diff --git a/tasks/libs/ciproviders/gitlab.py b/tasks/libs/ciproviders/gitlab.py deleted file mode 100644 index 6e79edca40939..0000000000000 --- a/tasks/libs/ciproviders/gitlab.py +++ /dev/null @@ -1,545 +0,0 @@ -import json -import os -import platform -import subprocess -from collections import UserList -from urllib.parse import quote - -import yaml -from invoke.exceptions import Exit - -from tasks.libs.common.remote_api import APIError, RemoteAPI - -__all__ = ["Gitlab"] - - -class Gitlab(RemoteAPI): - """ - Helper class to perform API calls against the Gitlab API, using a Gitlab PAT. - """ - - BASE_URL = "https://gitlab.ddbuild.io/api/v4" - - def __init__(self, project_name="DataDog/datadog-agent", api_token=""): - super(Gitlab, self).__init__("Gitlab") - self.api_token = api_token - self.project_name = project_name - self.authorization_error_message = ( - "HTTP 401: Your GITLAB_TOKEN may have expired. You can " - "check and refresh it at " - "https://gitlab.ddbuild.io/-/profile/personal_access_tokens" - ) - - def test_project_found(self): - """ - Checks if a project can be found. This is useful for testing access permissions to projects. - """ - result = self.project() - - # name is arbitrary, just need to check if something is in the result - if "name" in result: - return - - print(f"Cannot find GitLab project {self.project_name}") - print("If you cannot see it in the GitLab WebUI, you likely need permission.") - raise Exit(code=1) - - def project(self): - """ - Gets the project info. - """ - path = f"/projects/{quote(self.project_name, safe='')}" - return self.make_request(path, json_output=True) - - def create_pipeline(self, ref, variables=None): - """ - Create a pipeline targeting a given reference of a project. - ref must be a branch or a tag. - """ - if variables is None: - variables = {} - - path = f"/projects/{quote(self.project_name, safe='')}/pipeline" - headers = {"Content-Type": "application/json"} - data = json.dumps({"ref": ref, "variables": [{"key": k, "value": v} for (k, v) in variables.items()]}) - return self.make_request(path, headers=headers, data=data, json_output=True) - - def all_pipelines_for_ref(self, ref, sha=None): - """ - Gets all pipelines for a given reference (+ optionally git sha). - """ - page = 1 - - # Go through all pages - results = self.pipelines_for_ref(ref, sha=sha, page=page) - while results: - yield from results - page += 1 - results = self.pipelines_for_ref(ref, sha=sha, page=page) - - def pipelines_for_ref(self, ref, sha=None, page=1, per_page=100): - """ - Gets one page of pipelines for a given reference (+ optionally git sha). - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipelines?ref={quote(ref, safe='')}&per_page={per_page}&page={page}" - if sha: - path = f"{path}&sha={sha}" - return self.make_request(path, json_output=True) - - def last_pipeline_for_ref(self, ref, per_page=100): - """ - Gets the last pipeline for a given reference. - per_page cannot exceed 100. - """ - pipelines = self.pipelines_for_ref(ref, per_page=per_page) - - if len(pipelines) == 0: - return None - - return sorted(pipelines, key=lambda pipeline: pipeline['created_at'], reverse=True)[0] - - def last_pipelines(self): - """ - Get the last 100 pipelines - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipelines?per_page=100&page=1" - return self.make_request(path, json_output=True) - - def trigger_pipeline(self, data): - """ - Trigger a pipeline on a project using the trigger endpoint. - Requires a trigger token in the data object, in the 'token' field. - """ - path = f"/projects/{quote(self.project_name, safe='')}/trigger/pipeline" - - if 'token' not in data: - raise Exit("Missing 'token' field in data object to trigger child pipelines", 1) - - return self.make_request(path, data=data, json_input=True, json_output=True) - - def pipeline(self, pipeline_id): - """ - Gets info for a given pipeline. - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipelines/{pipeline_id}" - return self.make_request(path, json_output=True) - - def cancel_pipeline(self, pipeline_id): - """ - Cancels a given pipeline. - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipelines/{pipeline_id}/cancel" - return self.make_request(path, json_output=True, method="POST") - - def cancel_job(self, job_id): - """ - Cancels a given job - """ - path = f"/projects/{quote(self.project_name, safe='')}/jobs/{job_id}/cancel" - return self.make_request(path, json_output=True, method="POST") - - def commit(self, commit_sha): - """ - Gets info for a given commit sha. - """ - path = f"/projects/{quote(self.project_name, safe='')}/repository/commits/{commit_sha}" - return self.make_request(path, json_output=True) - - def artifact(self, job_id, artifact_name, ignore_not_found=False): - path = f"/projects/{quote(self.project_name, safe='')}/jobs/{job_id}/artifacts/{artifact_name}" - try: - response = self.make_request(path, stream_output=True) - return response - except APIError as e: - if e.status_code == 404 and ignore_not_found: - return None - raise e - - def all_jobs(self, pipeline_id): - """ - Gets all the jobs for a pipeline. - """ - page = 1 - - # Go through all pages - results = self.jobs(pipeline_id, page) - while results: - yield from results - page += 1 - results = self.jobs(pipeline_id, page) - - def jobs(self, pipeline_id, page=1, per_page=100): - """ - Gets one page of the jobs for a pipeline. - per_page cannot exceed 100. - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipelines/{pipeline_id}/jobs?per_page={per_page}&page={page}" - return self.make_request(path, json_output=True) - - def job_log(self, job_id): - """ - Gets the log file for a given job. - """ - - path = f"/projects/{quote(self.project_name, safe='')}/jobs/{job_id}/trace" - return self.make_request(path) - - def all_pipeline_schedules(self): - """ - Gets all pipelines schedules for the given project. - """ - page = 1 - - # Go through all pages - results = self.pipeline_schedules(page) - while results: - yield from results - page += 1 - results = self.pipeline_schedules(page) - - def pipeline_schedules(self, page=1, per_page=100): - """ - Gets one page of the pipeline schedules for the given project. - per_page cannot exceed 100 - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules?per_page={per_page}&page={page}" - return self.make_request(path, json_output=True) - - def pipeline_schedule(self, schedule_id): - """ - Gets a single pipeline schedule. - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules/{schedule_id}" - return self.make_request(path, json_output=True) - - def create_pipeline_schedule(self, description, ref, cron, cron_timezone=None, active=None): - """ - Create a new pipeline schedule with given attributes. - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules" - data = { - "description": description, - "ref": ref, - "cron": cron, - "cron_timezone": cron_timezone, - "active": active, - } - no_none_data = {k: v for k, v in data.items() if v is not None} - return self.make_request(path, data=no_none_data, json_output=True, json_input=True) - - def edit_pipeline_schedule( - self, schedule_id, description=None, ref=None, cron=None, cron_timezone=None, active=None - ): - """ - Edit an existing pipeline schedule with given attributes. - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules/{schedule_id}" - data = { - "description": description, - "ref": ref, - "cron": cron, - "cron_timezone": cron_timezone, - "active": active, - } - no_none_data = {k: v for k, v in data.items() if v is not None} - return self.make_request(path, json_input=True, json_output=True, data=no_none_data, method="PUT") - - def delete_pipeline_schedule(self, schedule_id): - """ - Delete an existing pipeline schedule. - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules/{schedule_id}" - # Gitlab API docs claim that this returns the JSON representation of the deleted schedule, - # but it actually returns an empty string - result = self.make_request(path, json_output=False, method="DELETE") - return f"Pipeline schedule deleted; result: {result if result else '(empty)'}" - - def create_pipeline_schedule_variable(self, schedule_id, key, value): - """ - Create a variable for an existing pipeline schedule. - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules/{schedule_id}/variables" - data = { - "key": key, - "value": value, - } - return self.make_request(path, data=data, json_output=True, json_input=True) - - def edit_pipeline_schedule_variable(self, schedule_id, key, value): - """ - Edit an existing variable for a pipeline schedule. - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules/{schedule_id}/variables/{key}" - return self.make_request(path, json_input=True, data={"value": value}, json_output=True, method="PUT") - - def delete_pipeline_schedule_variable(self, schedule_id, key): - """ - Delete an existing variable for a pipeline schedule. - """ - path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules/{schedule_id}/variables/{key}" - return self.make_request(path, json_output=True, method="DELETE") - - def find_tag(self, tag_name): - """ - Look up a tag by its name. - """ - path = f"/projects/{quote(self.project_name, safe='')}/repository/tags/{tag_name}" - try: - response = self.make_request(path, json_output=True) - return response - except APIError as e: - # If Gitlab API returns a "404 not found" error we return an empty dict - if e.status_code == 404: - print( - f"Couldn't find the {tag_name} tag: Gitlab returned a 404 Not Found instead of a 200 empty response." - ) - return dict() - else: - raise e - - def lint(self, configuration): - """ - Lint a gitlab-ci configuration. - """ - path = f"/projects/{quote(self.project_name, safe='')}/ci/lint?dry_run=true&include_jobs=true" - headers = {"Content-Type": "application/json"} - data = {"content": configuration} - return self.make_request(path, headers=headers, data=data, json_input=True, json_output=True) - - def make_request( - self, path, headers=None, data=None, json_input=False, json_output=False, stream_output=False, method=None - ): - """ - Utility to make a request to the Gitlab API. - See RemoteAPI#request. - - Adds "PRIVATE-TOKEN: {self.api_token}" to the headers to be able to authenticate ourselves to GitLab. - """ - headers = dict(headers or []) - headers["PRIVATE-TOKEN"] = self.api_token - - return self.request( - path=path, - headers=headers, - data=data, - json_input=json_input, - json_output=json_output, - stream_output=stream_output, - raw_output=False, - method=method, - ) - - -def get_gitlab_token(): - if "GITLAB_TOKEN" not in os.environ: - print("GITLAB_TOKEN not found in env. Trying keychain...") - if platform.system() == "Darwin": - try: - output = subprocess.check_output( - ['security', 'find-generic-password', '-a', os.environ["USER"], '-s', 'GITLAB_TOKEN', '-w'] - ) - if len(output) > 0: - return output.strip() - except subprocess.CalledProcessError: - print("GITLAB_TOKEN not found in keychain...") - pass - print( - "Please create an 'api' access token at " - "https://gitlab.ddbuild.io/-/profile/personal_access_tokens and " - "add it as GITLAB_TOKEN in your keychain " - "or export it from your .bashrc or equivalent." - ) - raise Exit(code=1) - return os.environ["GITLAB_TOKEN"] - - -def get_gitlab_bot_token(): - if "GITLAB_BOT_TOKEN" not in os.environ: - print("GITLAB_BOT_TOKEN not found in env. Trying keychain...") - if platform.system() == "Darwin": - try: - output = subprocess.check_output( - ['security', 'find-generic-password', '-a', os.environ["USER"], '-s', 'GITLAB_BOT_TOKEN', '-w'] - ) - if output: - return output.strip() - except subprocess.CalledProcessError: - print("GITLAB_BOT_TOKEN not found in keychain...") - pass - print( - "Please make sure that the GITLAB_BOT_TOKEN is set or that " "the GITLAB_BOT_TOKEN keychain entry is set." - ) - raise Exit(code=1) - return os.environ["GITLAB_BOT_TOKEN"] - - -class ReferenceTag(yaml.YAMLObject): - """ - Custom yaml tag to handle references in gitlab-ci configuration - """ - - yaml_tag = u'!reference' - - def __init__(self, references): - self.references = references - - @classmethod - def from_yaml(cls, loader, node): - return UserList(loader.construct_sequence(node)) - - @classmethod - def to_yaml(cls, dumper, data): - return dumper.represent_sequence(cls.yaml_tag, data.data, flow_style=True) - - -def generate_gitlab_full_configuration(input_file, context=None, compare_to=None): - """ - Generate a full gitlab-ci configuration by resolving all includes - """ - # Update loader/dumper to handle !reference tag - yaml.SafeLoader.add_constructor(ReferenceTag.yaml_tag, ReferenceTag.from_yaml) - yaml.SafeDumper.add_representer(UserList, ReferenceTag.to_yaml) - - yaml_contents = [] - read_includes(input_file, yaml_contents) - full_configuration = {} - for yaml_file in yaml_contents: - full_configuration.update(yaml_file) - # Override some variables with a dedicated context - if context: - full_configuration["variables"].update(context) - if compare_to: - for value in full_configuration.values(): - if ( - isinstance(value, dict) - and "changes" in value - and isinstance(value["changes"], dict) - and "compare_to" in value["changes"] - ): - value["changes"]["compare_to"] = compare_to - elif isinstance(value, list): - for v in value: - if ( - isinstance(v, dict) - and "changes" in v - and isinstance(v["changes"], dict) - and "compare_to" in v["changes"] - ): - v["changes"]["compare_to"] = compare_to - return yaml.safe_dump(full_configuration) - - -def read_includes(yaml_file, includes): - """ - Recursive method to read all includes from yaml files and store them in a list - """ - current_file = read_content(yaml_file) - if 'include' not in current_file: - includes.append(current_file) - else: - for include in current_file['include']: - read_includes(include, includes) - del current_file['include'] - includes.append(current_file) - - -def read_content(file_path): - """ - Read the content of a file, either from a local file or from an http endpoint - """ - content = None - if file_path.startswith('http'): - import requests - - response = requests.get(file_path) - response.raise_for_status() - content = response.text - else: - with open(file_path) as f: - content = f.read() - return yaml.safe_load(content) - - -def get_preset_contexts(required_tests): - possible_tests = ["all", "main", "release", "mq"] - required_tests = required_tests.casefold().split(",") - if set(required_tests) | set(possible_tests) != set(possible_tests): - raise Exit(f"Invalid test required: {required_tests} must contain only values from {possible_tests}", 1) - main_contexts = [ - ("BUCKET_BRANCH", ["nightly"]), # ["dev", "nightly", "beta", "stable", "oldnightly"] - ("CI_COMMIT_BRANCH", ["main"]), # ["main", "mq-working-branch-main", "7.42.x", "any/name"] - ("CI_COMMIT_TAG", [""]), # ["", "1.2.3-rc.4", "6.6.6"] - ("CI_PIPELINE_SOURCE", ["pipeline"]), # ["trigger", "pipeline", "schedule"] - ("DEPLOY_AGENT", ["true"]), - ("RUN_ALL_BUILDS", ["true"]), - ("RUN_E2E_TESTS", ["auto"]), - ("RUN_KMT_TESTS", ["on"]), - ("RUN_UNIT_TESTS", ["on"]), - ("TESTING_CLEANUP", ["true"]), - ] - release_contexts = [ - ("BUCKET_BRANCH", ["stable"]), - ("CI_COMMIT_BRANCH", ["7.42.x"]), - ("CI_COMMIT_TAG", ["3.2.1", "1.2.3-rc.4"]), - ("CI_PIPELINE_SOURCE", ["schedule"]), - ("DEPLOY_AGENT", ["true"]), - ("RUN_ALL_BUILDS", ["true"]), - ("RUN_E2E_TESTS", ["auto"]), - ("RUN_KMT_TESTS", ["on"]), - ("RUN_UNIT_TESTS", ["on"]), - ("TESTING_CLEANUP", ["true"]), - ] - mq_contexts = [ - ("BUCKET_BRANCH", ["dev"]), - ("CI_COMMIT_BRANCH", ["mq-working-branch-main"]), - ("CI_PIPELINE_SOURCE", ["pipeline"]), - ("DEPLOY_AGENT", ["false"]), - ("RUN_ALL_BUILDS", ["false"]), - ("RUN_E2E_TESTS", ["auto"]), - ("RUN_KMT_TESTS", ["off"]), - ("RUN_UNIT_TESTS", ["off"]), - ("TESTING_CLEANUP", ["false"]), - ] - all_contexts = [] - for test in required_tests: - if test in ["all", "main"]: - generate_contexts(main_contexts, [], all_contexts) - if test in ["all", "release"]: - generate_contexts(release_contexts, [], all_contexts) - if test in ["all", "mq"]: - generate_contexts(mq_contexts, [], all_contexts) - return all_contexts - - -def generate_contexts(contexts, context, all_contexts): - """ - Recursive method to generate all possible contexts from a list of tuples - """ - if len(contexts) == 0: - all_contexts.append(context[:]) - return - for value in contexts[0][1]: - context.append((contexts[0][0], value)) - generate_contexts(contexts[1:], context, all_contexts) - context.pop() - - -def load_context(context): - """ - Load a context either from a yaml file or from a json string - """ - if os.path.exists(context): - with open(context) as f: - y = yaml.safe_load(f) - if "variables" not in y: - raise Exit( - f"Invalid context file: {context}, missing 'variables' key. Input file must be similar to tasks/unit-tests/testdata/gitlab_main_context_template.yml", - 1, - ) - return [[(k, v) for k, v in y["variables"].items()]] - else: - try: - j = json.loads(context) - return [[(k, v) for k, v in j.items()]] - except json.JSONDecodeError: - raise Exit(f"Invalid context: {context}, must be a valid json, or a path to a yaml file", 1) diff --git a/tasks/libs/ciproviders/gitlab_api.py b/tasks/libs/ciproviders/gitlab_api.py new file mode 100644 index 0000000000000..74136486a6cf3 --- /dev/null +++ b/tasks/libs/ciproviders/gitlab_api.py @@ -0,0 +1,243 @@ +import json +import os +import platform +import subprocess +from collections import UserList + +import gitlab +import yaml +from gitlab.v4.objects import Project +from invoke.exceptions import Exit + +BASE_URL = "https://gitlab.ddbuild.io" + + +def get_gitlab_token(): + if "GITLAB_TOKEN" not in os.environ: + print("GITLAB_TOKEN not found in env. Trying keychain...") + if platform.system() == "Darwin": + try: + output = subprocess.check_output( + ['security', 'find-generic-password', '-a', os.environ["USER"], '-s', 'GITLAB_TOKEN', '-w'] + ) + if len(output) > 0: + return output.strip() + except subprocess.CalledProcessError: + print("GITLAB_TOKEN not found in keychain...") + pass + print( + "Please create an 'api' access token at " + "https://gitlab.ddbuild.io/-/profile/personal_access_tokens and " + "add it as GITLAB_TOKEN in your keychain " + "or export it from your .bashrc or equivalent." + ) + raise Exit(code=1) + return os.environ["GITLAB_TOKEN"] + + +def get_gitlab_bot_token(): + if "GITLAB_BOT_TOKEN" not in os.environ: + print("GITLAB_BOT_TOKEN not found in env. Trying keychain...") + if platform.system() == "Darwin": + try: + output = subprocess.check_output( + ['security', 'find-generic-password', '-a', os.environ["USER"], '-s', 'GITLAB_BOT_TOKEN', '-w'] + ) + if output: + return output.strip() + except subprocess.CalledProcessError: + print("GITLAB_BOT_TOKEN not found in keychain...") + pass + print( + "Please make sure that the GITLAB_BOT_TOKEN is set or that " "the GITLAB_BOT_TOKEN keychain entry is set." + ) + raise Exit(code=1) + return os.environ["GITLAB_BOT_TOKEN"] + + +def get_gitlab_api(token=None) -> gitlab.Gitlab: + """ + Returns the gitlab api object with the api token. + The token is the one of get_gitlab_token() by default. + """ + token = token or get_gitlab_token() + + return gitlab.Gitlab(BASE_URL, private_token=token) + + +def get_gitlab_repo(repo='DataDog/datadog-agent', token=None) -> Project: + api = get_gitlab_api(token) + repo = api.projects.get(repo) + + return repo + + +class ReferenceTag(yaml.YAMLObject): + """ + Custom yaml tag to handle references in gitlab-ci configuration + """ + + yaml_tag = u'!reference' + + def __init__(self, references): + self.references = references + + @classmethod + def from_yaml(cls, loader, node): + return UserList(loader.construct_sequence(node)) + + @classmethod + def to_yaml(cls, dumper, data): + return dumper.represent_sequence(cls.yaml_tag, data.data, flow_style=True) + + +def generate_gitlab_full_configuration(input_file, context=None, compare_to=None): + """ + Generate a full gitlab-ci configuration by resolving all includes + """ + # Update loader/dumper to handle !reference tag + yaml.SafeLoader.add_constructor(ReferenceTag.yaml_tag, ReferenceTag.from_yaml) + yaml.SafeDumper.add_representer(UserList, ReferenceTag.to_yaml) + yaml_contents = [] + read_includes(input_file, yaml_contents) + full_configuration = {} + for yaml_file in yaml_contents: + full_configuration.update(yaml_file) + # Override some variables with a dedicated context + if context: + full_configuration["variables"].update(context) + if compare_to: + for value in full_configuration.values(): + if ( + isinstance(value, dict) + and "changes" in value + and isinstance(value["changes"], dict) + and "compare_to" in value["changes"] + ): + value["changes"]["compare_to"] = compare_to + elif isinstance(value, list): + for v in value: + if ( + isinstance(v, dict) + and "changes" in v + and isinstance(v["changes"], dict) + and "compare_to" in v["changes"] + ): + v["changes"]["compare_to"] = compare_to + return yaml.safe_dump(full_configuration) + + +def read_includes(yaml_file, includes): + """ + Recursive method to read all includes from yaml files and store them in a list + """ + current_file = read_content(yaml_file) + if 'include' not in current_file: + includes.append(current_file) + else: + for include in current_file['include']: + read_includes(include, includes) + del current_file['include'] + includes.append(current_file) + + +def read_content(file_path): + """ + Read the content of a file, either from a local file or from an http endpoint + """ + content = None + if file_path.startswith('http'): + import requests + + response = requests.get(file_path) + response.raise_for_status() + content = response.text + else: + with open(file_path) as f: + content = f.read() + return yaml.safe_load(content) + + +def get_preset_contexts(required_tests): + possible_tests = ["all", "main", "release", "mq"] + required_tests = required_tests.casefold().split(",") + if set(required_tests) | set(possible_tests) != set(possible_tests): + raise Exit(f"Invalid test required: {required_tests} must contain only values from {possible_tests}", 1) + main_contexts = [ + ("BUCKET_BRANCH", ["nightly"]), # ["dev", "nightly", "beta", "stable", "oldnightly"] + ("CI_COMMIT_BRANCH", ["main"]), # ["main", "mq-working-branch-main", "7.42.x", "any/name"] + ("CI_COMMIT_TAG", [""]), # ["", "1.2.3-rc.4", "6.6.6"] + ("CI_PIPELINE_SOURCE", ["pipeline"]), # ["trigger", "pipeline", "schedule"] + ("DEPLOY_AGENT", ["true"]), + ("RUN_ALL_BUILDS", ["true"]), + ("RUN_E2E_TESTS", ["auto"]), + ("RUN_KMT_TESTS", ["on"]), + ("RUN_UNIT_TESTS", ["on"]), + ("TESTING_CLEANUP", ["true"]), + ] + release_contexts = [ + ("BUCKET_BRANCH", ["stable"]), + ("CI_COMMIT_BRANCH", ["7.42.x"]), + ("CI_COMMIT_TAG", ["3.2.1", "1.2.3-rc.4"]), + ("CI_PIPELINE_SOURCE", ["schedule"]), + ("DEPLOY_AGENT", ["true"]), + ("RUN_ALL_BUILDS", ["true"]), + ("RUN_E2E_TESTS", ["auto"]), + ("RUN_KMT_TESTS", ["on"]), + ("RUN_UNIT_TESTS", ["on"]), + ("TESTING_CLEANUP", ["true"]), + ] + mq_contexts = [ + ("BUCKET_BRANCH", ["dev"]), + ("CI_COMMIT_BRANCH", ["mq-working-branch-main"]), + ("CI_PIPELINE_SOURCE", ["pipeline"]), + ("DEPLOY_AGENT", ["false"]), + ("RUN_ALL_BUILDS", ["false"]), + ("RUN_E2E_TESTS", ["auto"]), + ("RUN_KMT_TESTS", ["off"]), + ("RUN_UNIT_TESTS", ["off"]), + ("TESTING_CLEANUP", ["false"]), + ] + all_contexts = [] + for test in required_tests: + if test in ["all", "main"]: + generate_contexts(main_contexts, [], all_contexts) + if test in ["all", "release"]: + generate_contexts(release_contexts, [], all_contexts) + if test in ["all", "mq"]: + generate_contexts(mq_contexts, [], all_contexts) + return all_contexts + + +def generate_contexts(contexts, context, all_contexts): + """ + Recursive method to generate all possible contexts from a list of tuples + """ + if len(contexts) == 0: + all_contexts.append(context[:]) + return + for value in contexts[0][1]: + context.append((contexts[0][0], value)) + generate_contexts(contexts[1:], context, all_contexts) + context.pop() + + +def load_context(context): + """ + Load a context either from a yaml file or from a json string + """ + if os.path.exists(context): + with open(context) as f: + y = yaml.safe_load(f) + if "variables" not in y: + raise Exit( + f"Invalid context file: {context}, missing 'variables' key. Input file must be similar to tasks/unit-tests/testdata/gitlab_main_context_template.yml", + 1, + ) + return [[(k, v) for k, v in y["variables"].items()]] + else: + try: + j = json.loads(context) + return [[(k, v) for k, v in j.items()]] + except json.JSONDecodeError: + raise Exit(f"Invalid context: {context}, must be a valid json, or a path to a yaml file", 1) diff --git a/tasks/libs/common/remote_api.py b/tasks/libs/common/remote_api.py deleted file mode 100644 index 20f4008abed1f..0000000000000 --- a/tasks/libs/common/remote_api.py +++ /dev/null @@ -1,123 +0,0 @@ -import errno -import re -import time - -from invoke.exceptions import Exit - -errno_regex = re.compile(r".*\[Errno (\d+)\] (.*)") - - -class APIError(Exception): - def __init__(self, request, api_name): - super(APIError, self).__init__(f"{api_name} says: {request.content}") - self.status_code = request.status_code - self.request = request - - -class RemoteAPI(object): - """ - Helper class to perform calls against a given remote API. - """ - - BASE_URL = "" - - def __init__(self, api_name, sleep_time=1, retry_count=5): - self.api_name = api_name - self.authorization_error_message = "HTTP 401 Unauthorized" - self.requests_sleep_time = sleep_time - self.requests_500_retry_count = retry_count - - def request( - self, - path, - headers=None, - data=None, - json_input=False, - json_output=False, - stream_output=False, - raw_output=False, - method=None, - ): - """ - Utility to make a request to a remote API. - - headers: A hash of headers to pass to the request. - data: An object containing the body of the request. - json_input: If set to true, data is passed with the json parameter of requests.post instead of the data parameter. - - By default, the request method is GET, or POST if data is not empty. - method: Can be set to GET, POST, PUT or DELETE to force the REST method used. - - By default, we return the text field of the response object. The following fields can alter this behavior: - json_output: the json field of the response object is returned. - stream_output: the request asks for a stream response, and the raw response object is returned. - raw_output: the content field of the resposne object is returned. - """ - import requests - - url = self.BASE_URL + path - - # TODO: Use the param argument of requests instead of handling URL params - # manually - try: - # If json_input is true, we specifically want to send data using the json - # parameter of requests.post / requests.put - for retry_count in range(self.requests_500_retry_count): - if method == "PUT": - if json_input: - r = requests.put(url, headers=headers, json=data, stream=stream_output) - else: - r = requests.put(url, headers=headers, data=data, stream=stream_output) - elif method == "DELETE": - r = requests.delete(url, headers=headers, stream=stream_output) - elif data or method == "POST": - if json_input: - r = requests.post(url, headers=headers, json=data, stream=stream_output) - else: - r = requests.post(url, headers=headers, data=data, stream=stream_output) - else: - r = requests.get(url, headers=headers, stream=stream_output) - if r.status_code >= 400: - if r.status_code == 401: - print(self.authorization_error_message) - elif 500 <= r.status_code < 600: - sleep_time = self.requests_sleep_time + retry_count * self.requests_sleep_time - if sleep_time > 0: - print( - f"Request failed with error {r.status_code}, retrying in {sleep_time} seconds (retry {retry_count}/{self.requests_500_retry_count}" - ) - time.sleep(sleep_time) - continue - raise APIError(r, self.api_name) - else: - break - except requests.exceptions.Timeout: - print(f"Connection to {self.api_name} ({url}) timed out.") - raise Exit(code=1) - except requests.exceptions.RequestException as e: - m = errno_regex.match(str(e)) - if not m: - print(f"Unknown error raised connecting to {self.api_name} ({url}): {e}") - raise e - - # Parse errno to give a better explanation - # Requests doesn't have granularity at the level we want: - # http://docs.python-requests.org/en/master/_modules/requests/exceptions/ - errno_code = int(m.group(1)) - message = m.group(2) - - if errno_code == errno.ENOEXEC: - exit_msg = f"Error resolving {url}: {message}" - elif errno_code == errno.ECONNREFUSED: - exit_msg = f"Connection to {self.api_name} ({url}) refused" - else: - exit_msg = f"Error while connecting to {url}: {str(e)}" - raise Exit(message=exit_msg, code=1) - - if json_output: - return r.json() - if raw_output: - return r.content - if stream_output: - return r - return r.text diff --git a/tasks/libs/pipeline/data.py b/tasks/libs/pipeline/data.py index 4e5b5fa1c9fe5..acaf9ccdff05b 100644 --- a/tasks/libs/pipeline/data.py +++ b/tasks/libs/pipeline/data.py @@ -1,6 +1,9 @@ import re +from collections import defaultdict -from tasks.libs.ciproviders.gitlab import Gitlab, get_gitlab_token +from gitlab.v4.objects import ProjectJob + +from tasks.libs.ciproviders.gitlab_api import get_gitlab_repo from tasks.libs.types.types import FailedJobReason, FailedJobs, FailedJobType @@ -8,47 +11,47 @@ def get_failed_jobs(project_name: str, pipeline_id: str) -> FailedJobs: """ Retrieves the list of failed jobs for a given pipeline id in a given project. """ + repo = get_gitlab_repo(project_name) + pipeline = repo.pipelines.get(pipeline_id) + jobs = pipeline.jobs.list(per_page=100, all=True) - gitlab = Gitlab(project_name=project_name, api_token=get_gitlab_token()) - - # gitlab.all_jobs yields a generator, it needs to be converted to a list to be able to - # go through it twice - jobs = list(gitlab.all_jobs(pipeline_id)) - - # Get instances of failed jobs - failed_jobs = {job["name"]: [] for job in jobs if job["status"] == "failed"} - - # Group jobs per name + # Get instances of failed jobs grouped by name + failed_jobs = defaultdict(list) for job in jobs: - if job["name"] in failed_jobs: - failed_jobs[job["name"]].append(job) + if job.status == "failed": + failed_jobs[job.name].append(job) # There, we now have the following map: # job name -> list of jobs with that name, including at least one failed job processed_failed_jobs = FailedJobs() for job_name, jobs in failed_jobs.items(): # We sort each list per creation date - jobs.sort(key=lambda x: x["created_at"]) + jobs.sort(key=lambda x: x.created_at) # We truncate the job name to increase readability job_name = truncate_job_name(job_name) + job = jobs[-1] # Check the final job in the list: it contains the current status of the job # This excludes jobs that were retried and succeeded - failure_type, failure_reason = get_job_failure_context(gitlab.job_log(jobs[-1]["id"])) - final_status = { - "name": job_name, - "id": jobs[-1]["id"], - "stage": jobs[-1]["stage"], - "status": jobs[-1]["status"], - "tag_list": jobs[-1]["tag_list"], - "allow_failure": jobs[-1]["allow_failure"], - "url": jobs[-1]["web_url"], - "retry_summary": [job["status"] for job in jobs], - "failure_type": failure_type, - "failure_reason": failure_reason, - } + trace = str(repo.jobs.get(job.id, lazy=True).trace(), 'utf-8') + failure_type, failure_reason = get_job_failure_context(trace) + final_status = ProjectJob( + repo.manager, + attrs={ + "name": job_name, + "id": job.id, + "stage": job.stage, + "status": job.status, + "tag_list": job.tag_list, + "allow_failure": job.allow_failure, + "web_url": job.web_url, + "retry_summary": [ijob.status for ijob in jobs], + "failure_type": failure_type, + "failure_reason": failure_reason, + }, + ) # Also exclude jobs allowed to fail - if final_status["status"] == "failed" and should_report_job(job_name, final_status["allow_failure"]): + if final_status.status == "failed" and should_report_job(job_name, final_status.allow_failure): processed_failed_jobs.add_failed_job(final_status) return processed_failed_jobs diff --git a/tasks/libs/pipeline/notifications.py b/tasks/libs/pipeline/notifications.py index 960eb5a283234..c35282f1cea94 100644 --- a/tasks/libs/pipeline/notifications.py +++ b/tasks/libs/pipeline/notifications.py @@ -6,10 +6,12 @@ from collections import defaultdict from typing import Dict +import gitlab import yaml +from gitlab.v4.objects import ProjectJob from invoke.context import Context -from tasks.libs.ciproviders.gitlab import Gitlab, get_gitlab_token +from tasks.libs.ciproviders.gitlab_api import get_gitlab_repo from tasks.libs.owners.parsing import read_owners from tasks.libs.types.types import FailedJobReason, FailedJobs, Test @@ -51,13 +53,16 @@ def check_for_missing_owners_slack_and_jira(print_missing_teams=True, owners_fil return error -def get_failed_tests(project_name, job, owners_file=".github/CODEOWNERS"): - gitlab = Gitlab(project_name=project_name, api_token=get_gitlab_token()) +def get_failed_tests(project_name, job: ProjectJob, owners_file=".github/CODEOWNERS"): + repo = get_gitlab_repo(project_name) owners = read_owners(owners_file) - test_output = gitlab.artifact(job["id"], "test_output.json", ignore_not_found=True) + try: + test_output = str(repo.jobs.get(job.id, lazy=True).artifact('test_output.json'), 'utf-8') + except gitlab.exceptions.GitlabGetError: + test_output = '' failed_tests = {} # type: dict[tuple[str, str], Test] if test_output: - for line in test_output.iter_lines(): + for line in test_output.splitlines(): json_test = json.loads(line) if 'Test' in json_test: name = json_test['Test'] @@ -86,11 +91,11 @@ def find_job_owners(failed_jobs: FailedJobs, owners_file: str = ".gitlab/JOBOWNE # For e2e test infrastructure errors, notify the agent-e2e-testing team for job in failed_jobs.mandatory_infra_job_failures: - if job["failure_type"] == FailedJobReason.E2E_INFRA_FAILURE: + if job.failure_type == FailedJobReason.E2E_INFRA_FAILURE: owners_to_notify["@datadog/agent-e2e-testing"].add_failed_job(job) for job in failed_jobs.all_non_infra_failures(): - job_owners = owners.of(job["name"]) + job_owners = owners.of(job.name) # job_owners is a list of tuples containing the type of owner (eg. USERNAME, TEAM) and the name of the owner # eg. [('TEAM', '@DataDog/agent-ci-experience')] diff --git a/tasks/libs/pipeline/stats.py b/tasks/libs/pipeline/stats.py index 46a862bfbb94e..8bc9e1b0f9113 100644 --- a/tasks/libs/pipeline/stats.py +++ b/tasks/libs/pipeline/stats.py @@ -31,10 +31,10 @@ def get_failed_jobs_stats(project_name, pipeline_id): global_failure_reason = FailedJobType.INFRA_FAILURE.name for job in failed_jobs.all_mandatory_failures(): - failure_type = job["failure_type"] - failure_reason = job["failure_reason"] + failure_type = job.failure_type + failure_reason = job.failure_reason - key = tuple(sorted(job["tag_list"] + [f"type:{failure_type.name}", f"reason:{failure_reason.name}"])) + key = tuple(sorted(job.tag_list + [f"type:{failure_type.name}", f"reason:{failure_reason.name}"])) job_failure_stats[key] += 1 return global_failure_reason, job_failure_stats diff --git a/tasks/libs/pipeline/tools.py b/tasks/libs/pipeline/tools.py index d026d61b5f6a6..513abfa14b85c 100644 --- a/tasks/libs/pipeline/tools.py +++ b/tasks/libs/pipeline/tools.py @@ -3,6 +3,10 @@ import platform import sys from time import sleep, time +from typing import List + +from gitlab import GitlabError +from gitlab.v4.objects import Project, ProjectJob, ProjectPipeline from tasks.libs.common.color import color_message from tasks.libs.common.user_interactions import yes_no_question @@ -15,11 +19,11 @@ class FilteredOutException(Exception): pass -def get_running_pipelines_on_same_ref(gitlab, ref, sha=None): - pipelines = gitlab.all_pipelines_for_ref(ref, sha=sha) +def get_running_pipelines_on_same_ref(repo: Project, ref, sha=None) -> List[ProjectPipeline]: + pipelines = repo.pipelines.list(ref=ref, sha=sha, per_page=100, all=True) RUNNING_STATUSES = ["created", "pending", "running"] - running_pipelines = [pipeline for pipeline in pipelines if pipeline["status"] in RUNNING_STATUSES] + running_pipelines = [pipeline for pipeline in pipelines if pipeline.status in RUNNING_STATUSES] return running_pipelines @@ -32,37 +36,37 @@ def parse_datetime(dt): return datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%f%z") -def cancel_pipelines_with_confirmation(gitlab, pipelines): +def cancel_pipelines_with_confirmation(repo: Project, pipelines: List[ProjectPipeline]): for pipeline in pipelines: - commit_author, commit_short_sha, commit_title = get_commit_for_pipeline(gitlab, pipeline['id']) + commit = repo.commits.get(pipeline.sha) print( color_message("Pipeline", "blue"), - color_message(pipeline['id'], "bold"), - color_message(f"(https://gitlab.ddbuild.io/{gitlab.project_name}/pipelines/{pipeline['id']})", "green"), + color_message(pipeline.id, "bold"), + color_message(f"({repo.web_url}/pipelines/{pipeline.id})", "green"), ) - pipeline_creation_date = pipeline['created_at'] + pipeline_creation_date = pipeline.created_at print( f"{color_message('Started at', 'blue')} {parse_datetime(pipeline_creation_date).astimezone():%c} ({pipeline_creation_date})" ) print( color_message("Commit:", "blue"), - color_message(commit_title, "green"), - color_message(f"({commit_short_sha})", "grey"), + color_message(commit.title, "green"), + color_message(f"({commit.short_id})", "grey"), color_message("by", "blue"), - color_message(commit_author, "bold"), + color_message(commit.author_name, "bold"), ) if yes_no_question("Do you want to cancel this pipeline?", color="orange", default=True): - gitlab.cancel_pipeline(pipeline['id']) - print(f"Pipeline {color_message(pipeline['id'], 'bold')} has been cancelled.\n") + pipeline.cancel() + print(f"Pipeline {color_message(pipeline.id, 'bold')} has been cancelled.\n") else: - print(f"Pipeline {color_message(pipeline['id'], 'bold')} will keep running.\n") + print(f"Pipeline {color_message(pipeline.id, 'bold')} will keep running.\n") -def gracefully_cancel_pipeline(gitlab, pipeline, force_cancel_stages): +def gracefully_cancel_pipeline(repo: Project, pipeline: ProjectPipeline, force_cancel_stages): """ Gracefully cancel pipeline - Cancel all the jobs that did not start to run yet @@ -70,17 +74,17 @@ def gracefully_cancel_pipeline(gitlab, pipeline, force_cancel_stages): - Jobs in the stages specified in 'force_cancel_stages' variables will always be canceled even if running """ - jobs = gitlab.all_jobs(pipeline["id"]) + jobs = pipeline.jobs.list(per_page=100, all=True) for job in jobs: - if job["stage"] in force_cancel_stages or ( - job["status"] not in ["running", "canceled"] and "cleanup" not in job["name"] + if job.stage in force_cancel_stages or ( + job.status not in ["running", "canceled"] and "cleanup" not in job.name ): - gitlab.cancel_job(job["id"]) + repo.jobs.get(job.id, lazy=True).cancel() def trigger_agent_pipeline( - gitlab, + repo: Project, ref=DEFAULT_BRANCH, release_version_6="nightly", release_version_7="nightly-a7", @@ -90,7 +94,7 @@ def trigger_agent_pipeline( e2e_tests=False, rc_build=False, rc_k8s_deployments=False, -): +) -> ProjectPipeline: """ Trigger a pipeline on the datadog-agent repositories. Multiple options are available: - run a pipeline with all builds (by default, a pipeline only runs a subset of all available builds), @@ -137,39 +141,40 @@ def trigger_agent_pipeline( ref, "\n".join(f" - {k}: {args[k]}" for k in args) ) ) - result = gitlab.create_pipeline(ref, args) + try: + variables = [{'key': key, 'value': value} for (key, value) in args.items()] - if result and "id" in result: - return result["id"] + return repo.pipelines.create({'ref': ref, 'variables': variables}) + except GitlabError as e: + if "filtered out by workflow rules" in e.error_message: + raise FilteredOutException - if result and "filtered out by workflow rules" in result.get("message", {}).get("base", [""])[0]: - raise FilteredOutException + raise RuntimeError(f"Invalid response from Gitlab API: {e}") - raise RuntimeError(f"Invalid response from Gitlab: {result}") - -def wait_for_pipeline(gitlab, pipeline_id, pipeline_finish_timeout_sec=PIPELINE_FINISH_TIMEOUT_SEC): +def wait_for_pipeline( + repo: Project, pipeline: ProjectPipeline, pipeline_finish_timeout_sec=PIPELINE_FINISH_TIMEOUT_SEC +): """ Follow a given pipeline, periodically checking the pipeline status and printing changes to the job statuses. """ - commit_author, commit_short_sha, commit_title = get_commit_for_pipeline(gitlab, pipeline_id) + commit = repo.commits.get(pipeline.sha) print( color_message( "Commit: " - + color_message(commit_title, "green") - + color_message(f" ({commit_short_sha})", "grey") + + color_message(commit.title, "green") + + color_message(f" ({commit.short_id})", "grey") + " by " - + color_message(commit_author, "bold"), + + color_message(commit.author_name, "bold"), "blue", ), flush=True, ) print( color_message( - "Pipeline Link: " - + color_message(f"https://gitlab.ddbuild.io/{gitlab.project_name}/pipelines/{pipeline_id}", "green"), + "Pipeline Link: " + color_message(pipeline.web_url, "green"), "blue", ), flush=True, @@ -177,19 +182,10 @@ def wait_for_pipeline(gitlab, pipeline_id, pipeline_finish_timeout_sec=PIPELINE_ print(color_message("Waiting for pipeline to finish. Exiting won't cancel it.", "blue"), flush=True) - f = functools.partial(pipeline_status, gitlab, pipeline_id) + f = functools.partial(pipeline_status, pipeline) loop_status(f, pipeline_finish_timeout_sec) - return pipeline_id - - -def get_commit_for_pipeline(gitlab, pipeline_id): - pipeline = gitlab.pipeline(pipeline_id) - sha = pipeline['sha'] - commit = gitlab.commit(sha) - return commit['author_name'], commit['short_id'], commit['title'] - def loop_status(callable, timeout_sec): """ @@ -206,50 +202,49 @@ def loop_status(callable, timeout_sec): sleep(10) -def pipeline_status(gitlab, pipeline_id, job_status): +def pipeline_status(pipeline: ProjectPipeline, job_status): """ Checks the pipeline status and updates job statuses. """ - jobs = gitlab.all_jobs(pipeline_id) + jobs = pipeline.jobs.list(per_page=100, all=True) job_status = update_job_status(jobs, job_status) # Check pipeline status - pipeline = gitlab.pipeline(pipeline_id) - pipestatus = pipeline["status"].lower().strip() - ref = pipeline["ref"] + pipestatus = pipeline.status.lower().strip() + ref = pipeline.ref if pipestatus == "success": print( color_message( - f"Pipeline https://gitlab.ddbuild.io/{gitlab.project_name}/pipelines/{pipeline_id} for {ref} succeeded", + f"Pipeline {pipeline.web_url} for {ref} succeeded", "green", ), flush=True, ) - notify("Pipeline success", f"Pipeline {pipeline_id} for {ref} succeeded.") + notify("Pipeline success", f"Pipeline {pipeline.id} for {ref} succeeded.") return True, job_status if pipestatus == "failed": print( color_message( - f"Pipeline https://gitlab.ddbuild.io/{gitlab.project_name}/pipelines/{pipeline_id} for {ref} failed", + f"Pipeline {pipeline.web_url} for {ref} failed", "red", ), flush=True, ) - notify("Pipeline failure", f"Pipeline {pipeline_id} for {ref} failed.") + notify("Pipeline failure", f"Pipeline {pipeline.id} for {ref} failed.") return True, job_status if pipestatus == "canceled": print( color_message( - f"Pipeline https://gitlab.ddbuild.io/{gitlab.project_name}/pipelines/{pipeline_id} for {ref} was canceled", + f"Pipeline {pipeline.web_url} for {ref} was canceled", "grey", ), flush=True, ) - notify("Pipeline canceled", f"Pipeline {pipeline_id} for {ref} was canceled.") + notify("Pipeline canceled", f"Pipeline {pipeline.id} for {ref} was canceled.") return True, job_status if pipestatus not in ["created", "running", "pending"]: @@ -258,36 +253,36 @@ def pipeline_status(gitlab, pipeline_id, job_status): return False, job_status -def update_job_status(jobs, job_status): +def update_job_status(jobs: List[ProjectJob], job_status): """ Updates job statuses and notify on changes. """ notify = {} for job in jobs: - if job_status.get(job['name'], None) is None: - job_status[job['name']] = job - notify[job['id']] = job + if job_status.get(job.name, None) is None: + job_status[job.name] = job + notify[job.id] = job else: # There are two reasons why we want to notify: # - status change on job (when we refresh) # - another job with the same name exists (when a job is retried) # Check for id to see if we're in the first case. - old_job = job_status[job['name']] - if job['id'] == old_job['id'] and job['status'] != old_job['status']: - job_status[job['name']] = job - notify[job['id']] = job - if job['id'] != old_job['id'] and job['created_at'] > old_job['created_at']: - job_status[job['name']] = job + old_job = job_status[job.name] + if job.id == old_job.id and job.status != old_job.status: + job_status[job.name] = job + notify[job.id] = job + if job.id != old_job.id and job.created_at > old_job.created_at: + job_status[job.name] = job # Check if old job already in notification list, to append retry message - notify_old_job = notify.get(old_job['id'], None) + notify_old_job = notify.get(old_job.id, None) if notify_old_job is not None: - notify_old_job['retried_old'] = True # Add message to say the job got retried - notify_old_job['retried_created_at'] = job['created_at'] - notify[old_job['id']] = notify_old_job + notify_old_job.retried_old = True # Add message to say the job got retried + notify_old_job.retried_created_at = job.created_at + notify[old_job.id] = notify_old_job # If not (eg. previous job was notified in last refresh), add retry message to new job else: - job['retried_new'] = True - notify[job['id']] = job + job.retried_new = True + notify[job.id] = job for job in notify.values(): print_job_status(job) @@ -312,53 +307,49 @@ def print_job(name, stage, color, date, duration, status, link): def print_retry(name, date): print(color_message(f"[{date}] Job {name} was retried", "grey")) - name = job['name'] - stage = job['stage'] - allow_failure = job['allow_failure'] - duration = job['duration'] - date = job['finished_at'] # Date that is printed in the console log. In most cases, it's when the job finished. - status = job['status'] # Gitlab job status + duration = job.duration + date = job.finished_at # Date that is printed in the console log. In most cases, it's when the job finished. job_status = None # Status string printed in the console link = '' # Link to the pipeline. Only filled for failing jobs, to be able to quickly go to the failing job. color = 'grey' # Log output color # A None duration is set by Gitlab when the job gets canceled before it was started. # In that case, set a duration of 0s. - if duration is None: + if job.duration is None: duration = 0 - if status == 'success': + if job.status == 'success': job_status = 'succeeded' color = 'green' - elif status == 'failed': - if allow_failure: + elif job.status == 'failed': + if job.allow_failure: job_status = 'failed (allowed to fail)' color = 'orange' else: job_status = 'failed' color = 'red' - link = f"Link: {job['web_url']}" + link = f"Link: {job.web_url}" # Only notify on real (not retried) failures # Best-effort, as there can be situations where the retried # job didn't get created yet - if job.get('retried_old', None) is None: - notify("Job failure", f"Job {name} failed.") - elif status == 'canceled': + if getattr(job, 'retried_old', None) is None: + notify("Job failure", f"Job {job.name} failed.") + elif job.status == 'canceled': job_status = 'was canceled' color = 'grey' - elif status == 'running': + elif job.status == 'running': job_status = 'started running' - date = job['started_at'] + date = job.started_at color = 'blue' else: return # Some logic to print the retry message in the correct order (before the new job or after the old job) - if job.get('retried_new', None) is not None: - print_retry(name, job['created_at']) - print_job(name, stage, color, date, duration, job_status, link) - if job.get('retried_old', None) is not None: - print_retry(name, job['retried_created_at']) + if getattr(job, 'retried_new', None) is not None: + print_retry(job.name, job.created_at) + print_job(job.name, job.stage, color, date, duration, job_status, link) + if getattr(job, 'retried_old', None) is not None: + print_retry(job.name, job.retried_created_at) def notify(title, info_text, sound=True): diff --git a/tasks/libs/types/types.py b/tasks/libs/types/types.py index 3c0d4c103e565..10bc2eaeea10e 100644 --- a/tasks/libs/types/types.py +++ b/tasks/libs/types/types.py @@ -3,6 +3,8 @@ from collections import defaultdict from enum import Enum +from gitlab.v4.objects import ProjectJob + class Test: PACKAGE_PREFIX = "github.com/DataDog/datadog-agent/" @@ -60,12 +62,12 @@ def __init__(self): self.mandatory_infra_job_failures = [] self.optional_infra_job_failures = [] - def add_failed_job(self, job): - if job["failure_type"] == FailedJobType.INFRA_FAILURE and job["allow_failure"]: + def add_failed_job(self, job: ProjectJob): + if job.failure_type == FailedJobType.INFRA_FAILURE and job.allow_failure: self.optional_infra_job_failures.append(job) - elif job["failure_type"] == FailedJobType.INFRA_FAILURE and not job["allow_failure"]: + elif job.failure_type == FailedJobType.INFRA_FAILURE and not job.allow_failure: self.mandatory_infra_job_failures.append(job) - elif job["allow_failure"]: + elif job.allow_failure: self.optional_job_failures.append(job) else: self.mandatory_job_failures.append(job) @@ -111,13 +113,13 @@ def __render_jobs_section(self, header: str, jobs: list, buffer: io.StringIO): jobs_per_stage = defaultdict(list) for job in jobs: - jobs_per_stage[job["stage"]].append(job) + jobs_per_stage[job.stage].append(job) for stage, jobs in jobs_per_stage.items(): jobs_info = [] for job in jobs: - num_retries = len(job["retry_summary"]) - 1 - job_info = f"<{job['url']}|{job['name']}>" + num_retries = len(job.retry_summary) - 1 + job_info = f"<{job.web_url}|{job.name}>" if num_retries > 0: job_info += f" ({num_retries} retries)" @@ -131,7 +133,7 @@ def __render_jobs_section(self, header: str, jobs: list, buffer: io.StringIO): def __render_tests_section(self, buffer): print(self.TEST_SECTION_HEADER, file=buffer) for (test_name, test_package), jobs in self.failed_tests.items(): - job_list = ", ".join(f"<{job['url']}|{job['name']}>" for job in jobs[: self.MAX_JOBS_PER_TEST]) + job_list = ", ".join(f"<{job.web_url}|{job.name}>" for job in jobs[: self.MAX_JOBS_PER_TEST]) if len(jobs) > self.MAX_JOBS_PER_TEST: job_list += f" and {len(jobs) - self.MAX_JOBS_PER_TEST} more" print(f"- `{test_name}` from package `{test_package}` (in {job_list})", file=buffer) diff --git a/tasks/linter.py b/tasks/linter.py index 033c047bd787d..7c22d6dc60772 100644 --- a/tasks/linter.py +++ b/tasks/linter.py @@ -8,10 +8,9 @@ from tasks.build_tags import compute_build_tags_for_flavor from tasks.flavor import AgentFlavor from tasks.go import run_golangci_lint -from tasks.libs.ciproviders.gitlab import ( - Gitlab, +from tasks.libs.ciproviders.gitlab_api import ( generate_gitlab_full_configuration, - get_gitlab_token, + get_gitlab_repo, get_preset_contexts, load_context, ) @@ -371,15 +370,15 @@ def gitlab_ci(_, test="all", custom_context=None): else: all_contexts = get_preset_contexts(test) print(f"We will tests {len(all_contexts)} contexts.") + agent = get_gitlab_repo() for context in all_contexts: print("Test gitlab configuration with context: ", context) config = generate_gitlab_full_configuration(".gitlab-ci.yml", dict(context)) - gitlab = Gitlab(api_token=get_gitlab_token()) - res = gitlab.lint(config) - status = color_message("valid", "green") if res["valid"] else color_message("invalid", "red") + res = agent.ci_lint.create({"content": config}) + status = color_message("valid", "green") if res.valid else color_message("invalid", "red") print(f"Config is {status}") - if len(res["warnings"]) > 0: - print(color_message(f"Warnings: {res['warnings']}", "orange"), file=sys.stderr) - if not res["valid"]: - print(color_message(f"Errors: {res['errors']}", "red"), file=sys.stderr) + if len(res.warnings) > 0: + print(color_message(f"Warnings: {res.warnings}", "orange"), file=sys.stderr) + if not res.valid: + print(color_message(f"Errors: {res.errors}", "red"), file=sys.stderr) raise Exit(code=1) diff --git a/tasks/notify.py b/tasks/notify.py index 037f16be047a6..a5c8da6f26ce8 100644 --- a/tasks/notify.py +++ b/tasks/notify.py @@ -291,7 +291,7 @@ def update_statistics(job_executions): # Update statistics and collect consecutive failed jobs alert_jobs = {"consecutive": [], "cumulative": []} failed_jobs = get_failed_jobs(PROJECT_NAME, os.getenv("CI_PIPELINE_ID")) - failed_set = {job["name"] for job in failed_jobs.all_failures()} + failed_set = {job.name for job in failed_jobs.all_failures()} current_set = set(job_executions["jobs"].keys()) # Insert data for newly failing jobs new_failed_jobs = failed_set - current_set diff --git a/tasks/pipeline.py b/tasks/pipeline.py index 9a9478e7f9e8e..260d860f66f35 100644 --- a/tasks/pipeline.py +++ b/tasks/pipeline.py @@ -6,11 +6,13 @@ from datetime import datetime, timedelta, timezone import yaml +from gitlab import GitlabError +from gitlab.v4.objects import Project from invoke import task from invoke.exceptions import Exit from tasks.libs.ciproviders.github_api import GithubAPI -from tasks.libs.ciproviders.gitlab import Gitlab, get_gitlab_bot_token, get_gitlab_token +from tasks.libs.ciproviders.gitlab_api import get_gitlab_bot_token, get_gitlab_repo from tasks.libs.common.color import color_message from tasks.libs.common.utils import ( DEFAULT_BRANCH, @@ -54,7 +56,7 @@ def GitlabYamlLoader(): # Tasks to trigger pipelines -def check_deploy_pipeline(gitlab, git_ref, release_version_6, release_version_7, repo_branch): +def check_deploy_pipeline(repo: Project, git_ref, release_version_6, release_version_7, repo_branch): """ Run checks to verify a deploy pipeline is valid: - it targets a valid repo branch @@ -81,9 +83,9 @@ def check_deploy_pipeline(gitlab, git_ref, release_version_6, release_version_7, if release_version_6 and match: # release_version_6 is not empty and git_ref matches v7 pattern, construct v6 tag and check. tag_name = "6." + "".join(match.groups()) - gitlab_tag = gitlab.find_tag(tag_name) - - if ("name" not in gitlab_tag) or gitlab_tag["name"] != tag_name: + try: + repo.tags.get(tag_name) + except GitlabError: print(f"Cannot find GitLab v6 tag {tag_name} while trying to build git ref {git_ref}") raise Exit(code=1) @@ -94,9 +96,9 @@ def check_deploy_pipeline(gitlab, git_ref, release_version_6, release_version_7, if release_version_7 and match: # release_version_7 is not empty and git_ref matches v6 pattern, construct v7 tag and check. tag_name = "7." + "".join(match.groups()) - gitlab_tag = gitlab.find_tag(tag_name) - - if ("name" not in gitlab_tag) or gitlab_tag["name"] != tag_name: + try: + repo.tags.get(tag_name) + except GitlabError: print(f"Cannot find GitLab v7 tag {tag_name} while trying to build git ref {git_ref}") raise Exit(code=1) @@ -110,8 +112,7 @@ def clean_running_pipelines(ctx, git_ref=DEFAULT_BRANCH, here=False, use_latest_ should be cancelled. """ - gitlab = Gitlab(api_token=get_gitlab_token()) - gitlab.test_project_found() + agent = get_gitlab_repo() if here: git_ref = ctx.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip() @@ -124,14 +125,14 @@ def clean_running_pipelines(ctx, git_ref=DEFAULT_BRANCH, here=False, use_latest_ elif not sha: print(f"Git sha not provided, fetching all running pipelines on {git_ref}") - pipelines = get_running_pipelines_on_same_ref(gitlab, git_ref, sha) + pipelines = get_running_pipelines_on_same_ref(agent, git_ref, sha) print( f"Found {len(pipelines)} running pipeline(s) matching the request.", "They are ordered from the newest one to the oldest one.\n", sep='\n', ) - cancel_pipelines_with_confirmation(gitlab, pipelines) + cancel_pipelines_with_confirmation(agent, pipelines) def workflow_rules(gitlab_file=".gitlab-ci.yml"): @@ -175,37 +176,33 @@ def auto_cancel_previous_pipelines(ctx): if not os.environ.get('GITLAB_TOKEN'): raise Exit("GITLAB_TOKEN variable needed to cancel pipelines on the same ref.", 1) - gitlab = Gitlab(api_token=get_gitlab_token()) - gitlab.test_project_found() - git_ref = os.getenv("CI_COMMIT_REF_NAME") git_sha = os.getenv("CI_COMMIT_SHA") - pipelines = get_running_pipelines_on_same_ref(gitlab, git_ref) - pipelines_without_current = [p for p in pipelines if p["sha"] != git_sha] + repo = get_gitlab_repo() + pipelines = get_running_pipelines_on_same_ref(repo, git_ref) + pipelines_without_current = [p for p in pipelines if p.sha != git_sha] for pipeline in pipelines_without_current: # We cancel pipeline only if it correspond to a commit that is an ancestor of the current commit - is_ancestor = ctx.run(f'git merge-base --is-ancestor {pipeline["sha"]} {git_sha}', warn=True, hide="both") + is_ancestor = ctx.run(f'git merge-base --is-ancestor {pipeline.sha} {git_sha}', warn=True, hide="both") if is_ancestor.exited == 0: - print( - f'Gracefully canceling jobs that are not canceled on pipeline {pipeline["id"]} ({pipeline["web_url"]})' - ) - gracefully_cancel_pipeline(gitlab, pipeline, force_cancel_stages=["package_build"]) + print(f'Gracefully canceling jobs that are not canceled on pipeline {pipeline.id} ({pipeline.web_url})') + gracefully_cancel_pipeline(repo, pipeline, force_cancel_stages=["package_build"]) elif is_ancestor.exited == 1: - print(f'{pipeline["sha"]} is not an ancestor of {git_sha}, not cancelling pipeline {pipeline["id"]}') + print(f'{pipeline.sha} is not an ancestor of {git_sha}, not cancelling pipeline {pipeline.id}') elif is_ancestor.exited == 128: min_time_before_cancel = 5 print( - f'Could not determine if {pipeline["sha"]} is an ancestor of {git_sha}, probably because it has been deleted from the history because of force push' + f'Could not determine if {pipeline.sha} is an ancestor of {git_sha}, probably because it has been deleted from the history because of force push' ) - if datetime.strptime(pipeline["created_at"], "%Y-%m-%dT%H:%M:%S.%fZ") < datetime.now() - timedelta( + if datetime.strptime(pipeline.created_at, "%Y-%m-%dT%H:%M:%S.%fZ") < datetime.now() - timedelta( minutes=min_time_before_cancel ): print( - f'Pipeline started earlier than {min_time_before_cancel} minutes ago, gracefully canceling pipeline {pipeline["id"]}' + f'Pipeline started earlier than {min_time_before_cancel} minutes ago, gracefully canceling pipeline {pipeline.id}' ) - gracefully_cancel_pipeline(gitlab, pipeline, force_cancel_stages=["package_build"]) + gracefully_cancel_pipeline(repo, pipeline, force_cancel_stages=["package_build"]) else: print(is_ancestor.stderr) raise Exit(1) @@ -266,8 +263,7 @@ def run( inv pipeline.run --deploy --use-release-entries --major-versions "6,7" --git-ref "7.32.0" --repo-branch "stable" """ - gitlab = Gitlab(api_token=get_gitlab_token()) - gitlab.test_project_found() + repo = get_gitlab_repo() if (not git_ref and not here) or (git_ref and here): raise Exit("ERROR: Exactly one of --here or --git-ref must be specified.", code=1) @@ -290,7 +286,7 @@ def run( if deploy: # Check the validity of the deploy pipeline - check_deploy_pipeline(gitlab, git_ref, release_version_6, release_version_7, repo_branch) + check_deploy_pipeline(repo, git_ref, release_version_6, release_version_7, repo_branch) # Force all builds and kitchen tests to be run if not all_builds: print( @@ -309,7 +305,7 @@ def run( ) e2e_tests = True - pipelines = get_running_pipelines_on_same_ref(gitlab, git_ref) + pipelines = get_running_pipelines_on_same_ref(repo, git_ref) if pipelines: print( @@ -319,11 +315,11 @@ def run( "They are ordered from the newest one to the oldest one.\n", sep='\n', ) - cancel_pipelines_with_confirmation(gitlab, pipelines) + cancel_pipelines_with_confirmation(repo, pipelines) try: - pipeline_id = trigger_agent_pipeline( - gitlab, + pipeline = trigger_agent_pipeline( + repo, git_ref, release_version_6, release_version_7, @@ -338,7 +334,7 @@ def run( print(color_message(f"ERROR: pipeline does not match any workflow rule. Rules:\n{workflow_rules()}", "red")) return - wait_for_pipeline(gitlab, pipeline_id) + wait_for_pipeline(repo, pipeline) @task @@ -356,8 +352,7 @@ def follow(ctx, id=None, git_ref=None, here=False, project_name="DataDog/datadog inv pipeline.follow --id 1234567 """ - gitlab = Gitlab(project_name=project_name, api_token=get_gitlab_token()) - gitlab.test_project_found() + repo = get_gitlab_repo(project_name) args_given = 0 if id is not None: @@ -373,22 +368,25 @@ def follow(ctx, id=None, git_ref=None, here=False, project_name="DataDog/datadog ) if id is not None: - wait_for_pipeline(gitlab, id) + pipeline = repo.pipelines.get(id) + wait_for_pipeline(repo, pipeline) elif git_ref is not None: - wait_for_pipeline_from_ref(gitlab, git_ref) + wait_for_pipeline_from_ref(repo, git_ref) elif here: git_ref = ctx.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip() - wait_for_pipeline_from_ref(gitlab, git_ref) + wait_for_pipeline_from_ref(repo, git_ref) -def wait_for_pipeline_from_ref(gitlab, ref): - pipeline = gitlab.last_pipeline_for_ref(ref) - if pipeline is not None: - wait_for_pipeline(gitlab, pipeline['id']) - else: +def wait_for_pipeline_from_ref(repo: Project, ref): + # Get last updated pipeline + pipelines = repo.pipelines.list(ref=ref, per_page=1, order_by='updated_at') + if len(pipelines) == 0: print(f"No pipelines found for {ref}") raise Exit(code=1) + pipeline = pipelines[0] + wait_for_pipeline(repo, pipeline) + @task(iterable=['variable']) def trigger_child_pipeline(_, git_ref, project_name, variable=None, follow=True): @@ -402,9 +400,9 @@ def trigger_child_pipeline(_, git_ref, project_name, variable=None, follow=True) Use --follow to make this task wait for the pipeline to finish, and return 1 if it fails. (requires GITLAB_TOKEN). Examples: - inv pipeline.trigger-child-pipeline --git-ref "master" --project-name "DataDog/agent-release-management" --variables "RELEASE_VERSION" + inv pipeline.trigger-child-pipeline --git-ref "main" --project-name "DataDog/agent-release-management" --variable "RELEASE_VERSION" - inv pipeline.trigger-child-pipeline --git-ref "master" --project-name "DataDog/agent-release-management" --variables "VAR1,VAR2,VAR3" + inv pipeline.trigger-child-pipeline --git-ref "main" --project-name "DataDog/agent-release-management" --variable "VAR1" --variable "VAR2" --variable "VAR3" """ if not os.environ.get('CI_JOB_TOKEN'): @@ -418,7 +416,7 @@ def trigger_child_pipeline(_, git_ref, project_name, variable=None, follow=True) # set, but trigger_pipeline doesn't use it os.environ["GITLAB_TOKEN"] = os.environ['CI_JOB_TOKEN'] - gitlab = Gitlab(project_name=project_name, api_token=get_gitlab_token()) + repo = get_gitlab_repo(project_name) data = {"token": os.environ['CI_JOB_TOKEN'], "ref": git_ref, "variables": {}} @@ -443,23 +441,22 @@ def trigger_child_pipeline(_, git_ref, project_name, variable=None, follow=True) flush=True, ) - res = gitlab.trigger_pipeline(data) + try: + data['variables'] = [{'key': key, 'value': value} for (key, value) in data['variables'].items()] - if 'id' not in res: - raise Exit(f"Failed to create child pipeline: {res}", code=1) + pipeline = repo.pipelines.create(data) + except GitlabError as e: + raise Exit(f"Failed to create child pipeline: {e}", code=1) - pipeline_id = res['id'] - pipeline_url = res['web_url'] - print(f"Created a child pipeline with id={pipeline_id}, url={pipeline_url}", flush=True) + print(f"Created a child pipeline with id={pipeline.id}, url={pipeline.web_url}", flush=True) if follow: print("Waiting for child pipeline to finish...", flush=True) - wait_for_pipeline(gitlab, pipeline_id) + wait_for_pipeline(repo, pipeline) # Check pipeline status - pipeline = gitlab.pipeline(pipeline_id) - pipestatus = pipeline["status"].lower().strip() + pipestatus = pipeline.status.lower().strip() if pipestatus != "success": raise Exit(f"Error: child pipeline status {pipestatus.title()}", code=1) @@ -582,21 +579,16 @@ def changelog(ctx, new_commit_sha): ) -def _init_pipeline_schedule_task(): - gitlab = Gitlab(api_token=get_gitlab_bot_token()) - gitlab.test_project_found() - return gitlab - - @task def get_schedules(_): """ Pretty-print all pipeline schedules on the repository. """ - gitlab = _init_pipeline_schedule_task() - for ps in gitlab.all_pipeline_schedules(): - pprint.pprint(ps) + repo = get_gitlab_repo(token=get_gitlab_bot_token()) + + for sched in repo.pipelineschedules.list(per_page=100, all=True): + sched.pprint() @task @@ -605,9 +597,11 @@ def get_schedule(_, schedule_id): Pretty-print a single pipeline schedule on the repository. """ - gitlab = _init_pipeline_schedule_task() - result = gitlab.pipeline_schedule(schedule_id) - pprint.pprint(result) + repo = get_gitlab_repo(token=get_gitlab_bot_token()) + + sched = repo.pipelineschedules.get(schedule_id) + + sched.pprint() @task @@ -618,9 +612,13 @@ def create_schedule(_, description, ref, cron, cron_timezone=None, active=False) Note that unless you explicitly specify the --active flag, the schedule will be created as inactive. """ - gitlab = _init_pipeline_schedule_task() - result = gitlab.create_pipeline_schedule(description, ref, cron, cron_timezone, active) - pprint.pprint(result) + repo = get_gitlab_repo(token=get_gitlab_bot_token()) + + sched = repo.pipelineschedules.create( + {'description': description, 'ref': ref, 'cron': cron, 'cron_timezone': cron_timezone, 'active': active} + ) + + sched.pprint() @task @@ -629,9 +627,14 @@ def edit_schedule(_, schedule_id, description=None, ref=None, cron=None, cron_ti Edit an existing pipeline schedule on the repository. """ - gitlab = _init_pipeline_schedule_task() - result = gitlab.edit_pipeline_schedule(schedule_id, description, ref, cron, cron_timezone) - pprint.pprint(result) + repo = get_gitlab_repo(token=get_gitlab_bot_token()) + + data = {'description': description, 'ref': ref, 'cron': cron, 'cron_timezone': cron_timezone} + data = {key: value for (key, value) in data.items() if value is not None} + + sched = repo.pipelineschedules.update(schedule_id, data) + + pprint.pprint(sched) @task @@ -640,9 +643,11 @@ def activate_schedule(_, schedule_id): Activate an existing pipeline schedule on the repository. """ - gitlab = _init_pipeline_schedule_task() - result = gitlab.edit_pipeline_schedule(schedule_id, active=True) - pprint.pprint(result) + repo = get_gitlab_repo(token=get_gitlab_bot_token()) + + sched = repo.pipelineschedules.update(schedule_id, {'active': True}) + + sched.pprint() @task @@ -651,9 +656,11 @@ def deactivate_schedule(_, schedule_id): Deactivate an existing pipeline schedule on the repository. """ - gitlab = _init_pipeline_schedule_task() - result = gitlab.edit_pipeline_schedule(schedule_id, active=False) - pprint.pprint(result) + repo = get_gitlab_repo(token=get_gitlab_bot_token()) + + sched = repo.pipelineschedules.update(schedule_id, {'active': False}) + + sched.pprint() @task @@ -662,9 +669,11 @@ def delete_schedule(_, schedule_id): Delete an existing pipeline schedule on the repository. """ - gitlab = _init_pipeline_schedule_task() - result = gitlab.delete_pipeline_schedule(schedule_id) - pprint.pprint(result) + repo = get_gitlab_repo(token=get_gitlab_bot_token()) + + repo.pipelineschedules.delete(schedule_id) + + print('Deleted schedule', schedule_id) @task @@ -673,9 +682,12 @@ def create_schedule_variable(_, schedule_id, key, value): Create a variable for an existing schedule on the repository. """ - gitlab = _init_pipeline_schedule_task() - result = gitlab.create_pipeline_schedule_variable(schedule_id, key, value) - pprint.pprint(result) + repo = get_gitlab_repo(token=get_gitlab_bot_token()) + + sched = repo.pipelineschedules.get(schedule_id) + sched.variables.create({'key': key, 'value': value}) + + sched.pprint() @task @@ -684,9 +696,12 @@ def edit_schedule_variable(_, schedule_id, key, value): Edit an existing variable for a schedule on the repository. """ - gitlab = _init_pipeline_schedule_task() - result = gitlab.edit_pipeline_schedule_variable(schedule_id, key, value) - pprint.pprint(result) + repo = get_gitlab_repo(token=get_gitlab_bot_token()) + + sched = repo.pipelineschedules.get(schedule_id) + sched.variables.update(key, {'value': value}) + + sched.pprint() @task @@ -695,9 +710,12 @@ def delete_schedule_variable(_, schedule_id, key): Delete an existing variable for a schedule on the repository. """ - gitlab = _init_pipeline_schedule_task() - result = gitlab.delete_pipeline_schedule_variable(schedule_id, key) - pprint.pprint(result) + repo = get_gitlab_repo(token=get_gitlab_bot_token()) + + sched = repo.pipelineschedules.get(schedule_id) + sched.variables.delete(key) + + sched.pprint() @task( @@ -908,28 +926,28 @@ def test_merge_queue(ctx): pr.create_issue_comment("/merge") # Search for the generated pipeline print(f"PR {pr.html_url} is waiting for MQ pipeline generation") - gitlab = Gitlab(api_token=get_gitlab_token()) + agent = get_gitlab_repo() max_attempts = 5 for attempt in range(max_attempts): time.sleep(30) - pipelines = gitlab.last_pipelines() + pipelines = agent.pipelines.list(per_page=100) try: - pipeline = next(p for p in pipelines if p["ref"].startswith(f"mq-working-branch-{test_main}")) - print(f"Pipeline found: {pipeline['web_url']}") + pipeline = next(p for p in pipelines if p.ref.startswith(f"mq-working-branch-{test_main}")) + print(f"Pipeline found: {pipeline.web_url}") break except StopIteration: if attempt == max_attempts - 1: raise RuntimeError("No pipeline found for the merge queue") continue - success = pipeline["status"] == "running" + success = pipeline.status == "running" if success: print("Pipeline correctly created, congrats") else: - print(f"[ERROR] Impossible to generate a pipeline for the merge queue, please check {pipeline['web_url']}") + print(f"[ERROR] Impossible to generate a pipeline for the merge queue, please check {pipeline.web_url}") # Clean up print("Cleaning up") if success: - gitlab.cancel_pipeline(pipeline["id"]) + pipeline.cancel() pr.edit(state="closed") ctx.run(f"git checkout {current_branch}", hide=True) ctx.run(f"git branch -D {test_main}", hide=True) diff --git a/tasks/release.py b/tasks/release.py index 9b882ccabdbb0..27eb8813640da 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -11,11 +11,12 @@ from datetime import date from time import sleep +from gitlab import GitlabError from invoke import Failure, task from invoke.exceptions import Exit from tasks.libs.ciproviders.github_api import GithubAPI -from tasks.libs.ciproviders.gitlab import Gitlab, get_gitlab_token +from tasks.libs.ciproviders.gitlab_api import get_gitlab_repo from tasks.libs.common.color import color_message from tasks.libs.common.user_interactions import yes_no_question from tasks.libs.common.utils import ( @@ -1332,7 +1333,7 @@ def build_rc(ctx, major_versions="6,7", patch_version=False, k8s_deployments=Fal if sys.version_info[0] < 3: return Exit(message="Must use Python 3 for this task", code=1) - gitlab = Gitlab(project_name=GITHUB_REPO_NAME, api_token=get_gitlab_token()) + datadog_agent = get_gitlab_repo() list_major_versions = parse_major_versions(major_versions) # Get the version of the highest major: needed for tag_version and to know @@ -1381,7 +1382,11 @@ def build_rc(ctx, major_versions="6,7", patch_version=False, k8s_deployments=Fal print(color_message(f"Waiting until the {new_version} tag appears in Gitlab", "bold")) gitlab_tag = None while not gitlab_tag: - gitlab_tag = gitlab.find_tag(str(new_version)).get("name", None) + try: + gitlab_tag = datadog_agent.tags.get(str(new_version)) + except GitlabError: + continue + sleep(5) print(color_message("Creating RC pipeline", "bold")) diff --git a/tasks/unit-tests/gitlab_api_tests.py b/tasks/unit-tests/gitlab_api_tests.py index ad618b0d380ff..24399f816c8bd 100644 --- a/tasks/unit-tests/gitlab_api_tests.py +++ b/tasks/unit-tests/gitlab_api_tests.py @@ -1,97 +1,6 @@ import unittest -from itertools import cycle -from unittest import mock -from invoke.exceptions import Exit - -from tasks.libs.ciproviders.gitlab import Gitlab, generate_gitlab_full_configuration, get_gitlab_token, read_includes -from tasks.libs.common.remote_api import APIError - - -class MockResponse: - def __init__(self, content, status_code): - self.content = content - self.status_code = status_code - - def json(self): - return self.content - - -#################### FAIL REQUEST ##################### - - -def fail_not_found_request(*_args, **_kwargs): - return MockResponse([], 404) - - -##################### MOCKED GITLAB ##################### - - -def mocked_502_gitlab_requests(*_args, **_kwargs): - return MockResponse( - "\r\n502 Bad Gateway\r\n\r\n

502 Bad Gateway

\r\n\r\n\r\n", - 502, - ) - - -def mocked_gitlab_project_request(*_args, **_kwargs): - return MockResponse("name", 200) - - -class SideEffect: - def __init__(self, *fargs): - self.functions = cycle(fargs) - - def __call__(self, *args, **kwargs): - func = next(self.functions) - return func(*args, **kwargs) - - -class TestStatusCode5XX(unittest.TestCase): - @mock.patch('requests.get', side_effect=SideEffect(mocked_502_gitlab_requests, mocked_gitlab_project_request)) - def test_gitlab_one_fail_one_success(self, _): - gitlab = Gitlab(api_token=get_gitlab_token()) - gitlab.requests_sleep_time = 0 - gitlab.test_project_found() - - @mock.patch( - 'requests.get', - side_effect=SideEffect( - mocked_502_gitlab_requests, - mocked_502_gitlab_requests, - mocked_502_gitlab_requests, - mocked_502_gitlab_requests, - mocked_gitlab_project_request, - ), - ) - def test_gitlab_last_one_success(self, _): - gitlab = Gitlab(api_token=get_gitlab_token()) - gitlab.requests_sleep_time = 0 - gitlab.test_project_found() - - @mock.patch('requests.get', side_effect=SideEffect(mocked_502_gitlab_requests)) - def test_gitlab_full_fail(self, _): - failed = False - try: - gitlab = Gitlab(api_token=get_gitlab_token()) - gitlab.requests_sleep_time = 0 - gitlab.test_project_found() - except Exit: - failed = True - if not failed: - Exit("GitlabAPI was expected to fail") - - @mock.patch('requests.get', side_effect=SideEffect(fail_not_found_request, mocked_gitlab_project_request)) - def test_gitlab_real_fail(self, _): - failed = False - try: - gitlab = Gitlab(api_token=get_gitlab_token()) - gitlab.requests_sleep_time = 0 - gitlab.test_project_found() - except APIError: - failed = True - if not failed: - Exit("GitlabAPI was expected to fail") +from tasks.libs.ciproviders.gitlab_api import generate_gitlab_full_configuration, read_includes class TestReadIncludes(unittest.TestCase): diff --git a/tasks/unit-tests/notify_tests.py b/tasks/unit-tests/notify_tests.py index 01b54f89170c3..8d3c5b7173d1a 100644 --- a/tasks/unit-tests/notify_tests.py +++ b/tasks/unit-tests/notify_tests.py @@ -2,8 +2,10 @@ import os import pathlib import unittest +from typing import List from unittest.mock import MagicMock, patch +from gitlab.v4.objects import ProjectJob from invoke import MockContext, Result from invoke.exceptions import UnexpectedExit @@ -11,67 +13,95 @@ from tasks.libs.types.types import FailedJobs, FailedJobType +def get_fake_jobs() -> List[ProjectJob]: + with open("tasks/unit-tests/testdata/jobs.json") as f: + jobs = json.load(f) + + return [ProjectJob(MagicMock(), attrs=job) for job in jobs] + + class TestSendMessage(unittest.TestCase): + @patch('tasks.libs.ciproviders.gitlab_api.get_gitlab_api') + def test_merge(self, api_mock): + repo_mock = api_mock.return_value.projects.get.return_value + repo_mock.jobs.get.return_value.trace.return_value = b"Log trace" + list_mock = repo_mock.pipelines.get.return_value.jobs.list + list_mock.side_effect = [get_fake_jobs(), []] + notify.send_message(MockContext(), notification_type="merge", print_to_stdout=True) + list_mock.assert_called() + @patch("tasks.notify.get_failed_jobs") def test_merge_without_get_failed_call(self, get_failed_jobs_mock): failed = FailedJobs() failed.add_failed_job( - { - "name": "job1", - "stage": "stage1", - "retry_summary": [], - "url": "http://www.job.com", - "failure_type": FailedJobType.INFRA_FAILURE, - "allow_failure": False, - } + ProjectJob( + MagicMock(), + attrs={ + "name": "job1", + "stage": "stage1", + "retry_summary": [], + "web_url": "http://www.job.com", + "failure_type": FailedJobType.INFRA_FAILURE, + "allow_failure": False, + }, + ) ) failed.add_failed_job( - { - "name": "job2", - "stage": "stage2", - "retry_summary": [], - "url": "http://www.job.com", - "failure_type": FailedJobType.INFRA_FAILURE, - "allow_failure": True, - } + ProjectJob( + MagicMock(), + attrs={ + "name": "job2", + "stage": "stage2", + "retry_summary": [], + "web_url": "http://www.job.com", + "failure_type": FailedJobType.INFRA_FAILURE, + "allow_failure": True, + }, + ) ) failed.add_failed_job( - { - "name": "job3", - "stage": "stage3", - "retry_summary": [], - "url": "http://www.job.com", - "failure_type": FailedJobType.JOB_FAILURE, - "allow_failure": False, - } + ProjectJob( + MagicMock(), + attrs={ + "name": "job3", + "stage": "stage3", + "retry_summary": [], + "web_url": "http://www.job.com", + "failure_type": FailedJobType.JOB_FAILURE, + "allow_failure": False, + }, + ) ) failed.add_failed_job( - { - "name": "job4", - "stage": "stage4", - "retry_summary": [], - "url": "http://www.job.com", - "failure_type": FailedJobType.JOB_FAILURE, - "allow_failure": True, - } + ProjectJob( + MagicMock(), + attrs={ + "name": "job4", + "stage": "stage4", + "retry_summary": [], + "web_url": "http://www.job.com", + "failure_type": FailedJobType.JOB_FAILURE, + "allow_failure": True, + }, + ) ) get_failed_jobs_mock.return_value = failed notify.send_message(MockContext(), notification_type="merge", print_to_stdout=True) get_failed_jobs_mock.assert_called() - @patch("requests.get") - def test_merge_with_get_failed_call(self, get_mock): - with open("tasks/unit-tests/testdata/jobs.json") as f: - jobs = json.load(f) - job_list = {"json.return_value": jobs} - no_jobs = {"json.return_value": ""} - get_mock.side_effect = [ - MagicMock(status_code=200, **job_list), - MagicMock(status_code=200, **no_jobs), - MagicMock(status_code=200, text="no basic auth credentials"), - ] + @patch('tasks.libs.ciproviders.gitlab_api.get_gitlab_api') + def test_merge_with_get_failed_call(self, api_mock): + repo_mock = api_mock.return_value.projects.get.return_value + trace_mock = repo_mock.jobs.get.return_value.trace + list_mock = repo_mock.pipelines.get.return_value.jobs.list + + trace_mock.return_value = b"no basic auth credentials" + list_mock.return_value = get_fake_jobs() + notify.send_message(MockContext(), notification_type="merge", print_to_stdout=True) - get_mock.assert_called() + + trace_mock.assert_called() + list_mock.assert_called() def test_post_to_channel1(self): self.assertTrue(notify._should_send_message_to_channel('main', default_branch='main')) @@ -102,39 +132,40 @@ def test_post_to_author5(self): class TestSendStats(unittest.TestCase): - @patch("requests.get") + @patch('tasks.libs.ciproviders.gitlab_api.get_gitlab_api') @patch("tasks.notify.create_count", new=MagicMock()) - def test_nominal(self, get_mock): - with open("tasks/unit-tests/testdata/jobs.json") as f: - jobs = json.load(f) - job_list = {"json.return_value": jobs} - no_jobs = {"json.return_value": ""} - get_mock.side_effect = [ - MagicMock(status_code=200, **job_list), - MagicMock(status_code=200, **no_jobs), - MagicMock(status_code=200, text="E2E INTERNAL ERROR"), - ] + def test_nominal(self, api_mock): + repo_mock = api_mock.return_value.projects.get.return_value + trace_mock = repo_mock.jobs.get.return_value.trace + list_mock = repo_mock.pipelines.get.return_value.jobs.list + + trace_mock.return_value = b"E2E INTERNAL ERROR" + list_mock.return_value = get_fake_jobs() + notify.send_stats(MockContext(), print_to_stdout=True) - get_mock.assert_called() + + trace_mock.assert_called() + list_mock.assert_called() class TestCheckConsistentFailures(unittest.TestCase): - @patch("requests.get") - def test_nominal(self, get_mock): + @patch('tasks.libs.ciproviders.gitlab_api.get_gitlab_api') + def test_nominal(self, api_mock): os.environ["CI_PIPELINE_ID"] = "456" - with open("tasks/unit-tests/testdata/jobs.json") as f: - jobs = json.load(f) - job_list = {"json.return_value": jobs} - no_jobs = {"json.return_value": ""} - get_mock.side_effect = [ - MagicMock(status_code=200, **job_list), - MagicMock(status_code=200, **no_jobs), - MagicMock(status_code=200, text="net/http: TLS handshake timeout"), - ] + + repo_mock = api_mock.return_value.projects.get.return_value + trace_mock = repo_mock.jobs.get.return_value.trace + list_mock = repo_mock.pipelines.get.return_value.jobs.list + + trace_mock.return_value = b"net/http: TLS handshake timeout" + list_mock.return_value = get_fake_jobs() + notify.check_consistent_failures( MockContext(run=Result("test")), "tasks/unit-tests/testdata/job_executions.json" ) - get_mock.assert_called() + + trace_mock.assert_called() + list_mock.assert_called() class TestRetrieveJobExecutionsCreated(unittest.TestCase): @@ -173,7 +204,9 @@ class TestUpdateStatistics(unittest.TestCase): @patch('tasks.notify.get_failed_jobs') def test_nominal(self, mock_get_failed): failed_jobs = mock_get_failed.return_value - failed_jobs.all_failures.return_value = [{"name": "nifnif"}, {"name": "nafnaf"}] + failed_jobs.all_failures.return_value = [ + ProjectJob(MagicMock(), attrs=a) for a in [{"name": "nifnif"}, {"name": "nafnaf"}] + ] j = { "jobs": { "nafnaf": {"consecutive_failures": 2, "cumulative_failures": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]}, @@ -195,7 +228,9 @@ def test_nominal(self, mock_get_failed): @patch('tasks.notify.get_failed_jobs') def test_multiple_failures(self, mock_get_failed): failed_jobs = mock_get_failed.return_value - failed_jobs.all_failures.return_value = [{"name": "poulidor"}, {"name": "virenque"}, {"name": "bardet"}] + failed_jobs.all_failures.return_value = [ + ProjectJob(MagicMock(), attrs=a) for a in [{"name": "poulidor"}, {"name": "virenque"}, {"name": "bardet"}] + ] j = { "jobs": { "poulidor": {"consecutive_failures": 8, "cumulative_failures": [0, 0, 1, 1, 1, 1, 1, 1, 1, 1]}, From c08f9fb447640dcdc0a6f4fadd552b95f65c7540 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Mathieu?= Date: Fri, 12 Apr 2024 14:34:35 +0200 Subject: [PATCH 14/99] tasks/gitlab: build the SDS library and use it for tests + ship it in the Agent (#24469) * tasks/gitlab: build the SDS library and use it for tests + ship it in the Agent. * tasks: linter on the sds task * tasks: use a namespace for the SDS task. * Use `copy` DSL instead of `cp` in a `command` call. * Python linter * ci: not building the sds shared library for the linter pass * tasks: include_sds flag in the "compute flags" function instead. * omnibus: better inclusion of the sds software * tasks: clearer message Windows unsupported * remove a race only happening in the unit tests detected --- .gitlab/source_test/linux.yml | 3 +- omnibus/config/software/agent-dependencies.rb | 4 ++ omnibus/config/software/datadog-agent.rb | 2 +- omnibus/config/software/sds.rb | 20 ++++++++++ pkg/logs/sds/scanner_test.go | 14 ++++--- tasks/__init__.py | 2 + tasks/agent.py | 4 ++ tasks/build_tags.py | 11 +++++- tasks/go_test.py | 10 ++++- tasks/linter.py | 12 +++++- tasks/sds.py | 38 +++++++++++++++++++ 11 files changed, 108 insertions(+), 12 deletions(-) create mode 100644 omnibus/config/software/sds.rb create mode 100644 tasks/sds.py diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml index 6377e90f09362..5b55647979044 100644 --- a/.gitlab/source_test/linux.yml +++ b/.gitlab/source_test/linux.yml @@ -34,7 +34,8 @@ - popd - FAST_TESTS_FLAG="" - if [[ "$FAST_TESTS" == "true" ]]; then FAST_TESTS_FLAG="--only-impacted-packages"; fi - - inv -e test $FLAVORS --race --profile --rerun-fails=2 --python-runtimes "$PYTHON_RUNTIMES" --coverage --cpus $KUBERNETES_CPU_REQUEST $EXTRA_OPTS --save-result-json $TEST_OUTPUT_FILE --junit-tar "junit-${CI_JOB_NAME}.tgz" --build-stdlib $FAST_TESTS_FLAG + - inv -e sds.build-library + - inv -e test $FLAVORS --include-sds --race --profile --rerun-fails=2 --python-runtimes "$PYTHON_RUNTIMES" --coverage --cpus $KUBERNETES_CPU_REQUEST $EXTRA_OPTS --save-result-json $TEST_OUTPUT_FILE --junit-tar "junit-${CI_JOB_NAME}.tgz" --build-stdlib $FAST_TESTS_FLAG artifacts: expire_in: 2 weeks when: always diff --git a/omnibus/config/software/agent-dependencies.rb b/omnibus/config/software/agent-dependencies.rb index 8604878e0ae19..2a4809ad8d84f 100644 --- a/omnibus/config/software/agent-dependencies.rb +++ b/omnibus/config/software/agent-dependencies.rb @@ -12,6 +12,10 @@ # External agents dependency 'jmxfetch' +if linux_target? || osx_target? + dependency 'sds' +end + # version manifest file dependency 'version-manifest' diff --git a/omnibus/config/software/datadog-agent.rb b/omnibus/config/software/datadog-agent.rb index bc055acfe7ebb..d3b7f162915d2 100644 --- a/omnibus/config/software/datadog-agent.rb +++ b/omnibus/config/software/datadog-agent.rb @@ -80,7 +80,7 @@ command "inv -e rtloader.make --python-runtimes #{py_runtimes_arg} --install-prefix \"#{install_dir}/embedded\" --cmake-options '-DCMAKE_CXX_FLAGS:=\"-D_GLIBCXX_USE_CXX11_ABI=0 -I#{install_dir}/embedded/include\" -DCMAKE_C_FLAGS:=\"-I#{install_dir}/embedded/include\" -DCMAKE_INSTALL_LIBDIR=lib -DCMAKE_FIND_FRAMEWORK:STRING=NEVER'", :env => env command "inv -e rtloader.install" bundle_arg = bundled_agents ? bundled_agents.map { |k| "--bundle #{k}" }.join(" ") : "--bundle agent" - command "inv -e agent.build --exclude-rtloader --python-runtimes #{py_runtimes_arg} --major-version #{major_version_arg} --rebuild --no-development --install-path=#{install_dir} --embedded-path=#{install_dir}/embedded --python-home-2=#{install_dir}/embedded --python-home-3=#{install_dir}/embedded --flavor #{flavor_arg} #{bundle_arg}", env: env + command "inv -e agent.build --exclude-rtloader --include-sds --python-runtimes #{py_runtimes_arg} --major-version #{major_version_arg} --rebuild --no-development --install-path=#{install_dir} --embedded-path=#{install_dir}/embedded --python-home-2=#{install_dir}/embedded --python-home-3=#{install_dir}/embedded --flavor #{flavor_arg} #{bundle_arg}", env: env if heroku_target? command "inv -e agent.build --exclude-rtloader --python-runtimes #{py_runtimes_arg} --major-version #{major_version_arg} --rebuild --no-development --install-path=#{install_dir} --embedded-path=#{install_dir}/embedded --python-home-2=#{install_dir}/embedded --python-home-3=#{install_dir}/embedded --flavor #{flavor_arg} --agent-bin=bin/agent/core-agent --bundle agent", env: env end diff --git a/omnibus/config/software/sds.rb b/omnibus/config/software/sds.rb new file mode 100644 index 0000000000000..b8b6f21ff47f4 --- /dev/null +++ b/omnibus/config/software/sds.rb @@ -0,0 +1,20 @@ +name "sds" + +default_version "042de62f5a24fbceb4f4849256c3ee5c005b7057" +source git: 'https://github.com/DataDog/dd-sensitive-data-scanner' + +build do + license "Apache-2.0" + license_file "./LICENSE" + + # no Windows support for now. + if linux_target? || osx_target? + command "cargo build --release", cwd: "#{project_dir}/sds-go/rust" + if osx_target? + copy "sds-go/rust/target/release/libsds_go.dylib", "#{install_dir}/embedded/lib" + end + if linux_target? + copy "sds-go/rust/target/release/libsds_go.so", "#{install_dir}/embedded/lib" + end + end +end diff --git a/pkg/logs/sds/scanner_test.go b/pkg/logs/sds/scanner_test.go index 0b36e57e0286c..da825e221844e 100644 --- a/pkg/logs/sds/scanner_test.go +++ b/pkg/logs/sds/scanner_test.go @@ -435,16 +435,18 @@ func TestCloseCycleScan(t *testing.T) { }, } + // this test is about being over-cautious, making sure the Scan method + // will never cause a race when calling the Delete method at the same time. + // It can't happen with the current implementation / concurrency pattern + // used in processor.go, but I'm being over-cautious because if it happens + // in the future because of someone changing the processor implementation, + // it could lead to a panic and a hard crash of the Agent. + go func() { for { for k, _ := range tests { msg := message.Message{} - if s.IsReady() { - _, _, err := s.Scan([]byte(k), &msg) - require.NoError(err) - } else { - return - } + s.Scan([]byte(k), &msg) } } }() diff --git a/tasks/__init__.py b/tasks/__init__.py index ae5f016b0af65..99f726eb4c8b6 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -38,6 +38,7 @@ process_agent, release, rtloader, + sds, security_agent, selinux, system_probe, @@ -146,6 +147,7 @@ ns.add_collection(package) ns.add_collection(pipeline) ns.add_collection(notify) +ns.add_collection(sds) ns.add_collection(selinux) ns.add_collection(systray) ns.add_collection(release) diff --git a/tasks/agent.py b/tasks/agent.py index d6c5f048d8ec2..590a4a30b5920 100644 --- a/tasks/agent.py +++ b/tasks/agent.py @@ -119,6 +119,7 @@ def build( python_runtimes='3', arch='x64', exclude_rtloader=False, + include_sds=False, go_mod="mod", windows_sysprobe=False, cmake_options='', @@ -199,6 +200,9 @@ def build( if not agent_bin: agent_bin = os.path.join(BIN_PATH, bin_name("agent")) + if include_sds: + build_tags.append("sds") + cmd += "-o {agent_bin} -gcflags=\"{gcflags}\" -ldflags=\"{ldflags}\" {REPO_PATH}/cmd/{flavor}" args = { "go_mod": go_mod, diff --git a/tasks/build_tags.py b/tasks/build_tags.py index 484e30ea8472e..694dfc391d642 100644 --- a/tasks/build_tags.py +++ b/tasks/build_tags.py @@ -233,11 +233,11 @@ def compute_build_tags_for_flavor( build_include: List[str], build_exclude: List[str], flavor: AgentFlavor = AgentFlavor.base, + include_sds: bool = False, ): """ Given a flavor, an architecture, a list of tags to include and exclude, get the final list of tags that should be applied. - If the list of build tags to include is empty, take the default list of build tags for the flavor or arch. Otherwise, use the list of build tags to include, minus incompatible tags for the given architecture. @@ -249,8 +249,15 @@ def compute_build_tags_for_flavor( if build_include is None else filter_incompatible_tags(build_include.split(","), arch=arch) ) + build_exclude = [] if build_exclude is None else build_exclude.split(",") - return get_build_tags(build_include, build_exclude) + + list = get_build_tags(build_include, build_exclude) + + if include_sds: + list.append("sds") + + return list @task diff --git a/tasks/go_test.py b/tasks/go_test.py index c575082b1b2f1..d09d696d7ebd7 100644 --- a/tasks/go_test.py +++ b/tasks/go_test.py @@ -324,6 +324,7 @@ def test( junit_tar="", only_modified_packages=False, only_impacted_packages=False, + include_sds=False, skip_flakes=False, build_stdlib=False, ): @@ -349,7 +350,12 @@ def test( unit_tests_tags = { f: compute_build_tags_for_flavor( - flavor=f, build="unit-tests", arch=arch, build_include=build_include, build_exclude=build_exclude + flavor=f, + build="unit-tests", + arch=arch, + build_include=build_include, + build_exclude=build_exclude, + include_sds=include_sds, ) for f in flavors } @@ -928,6 +934,7 @@ def lint_go( timeout: int = None, golangci_lint_kwargs="", headless_mode=False, + include_sds=False, ): _lint_go( ctx, @@ -944,4 +951,5 @@ def lint_go( timeout, golangci_lint_kwargs, headless_mode, + include_sds, ) diff --git a/tasks/linter.py b/tasks/linter.py index 7c22d6dc60772..d79cf02a7aea7 100644 --- a/tasks/linter.py +++ b/tasks/linter.py @@ -104,6 +104,7 @@ def go( timeout: int = None, golangci_lint_kwargs="", headless_mode=False, + include_sds=False, ): """ Run go linters on the given module and targets. @@ -137,6 +138,7 @@ def go( timeout=timeout, golangci_lint_kwargs=golangci_lint_kwargs, headless_mode=headless_mode, + include_sds=include_sds, ) @@ -156,6 +158,7 @@ def _lint_go( timeout, golangci_lint_kwargs, headless_mode, + include_sds, ): if not check_tools_version(ctx, ['go', 'golangci-lint']): print("Warning: If you have linter errors it might be due to version mismatches.", file=sys.stderr) @@ -184,6 +187,7 @@ def _lint_go( timeout=timeout, golangci_lint_kwargs=golangci_lint_kwargs, headless_mode=headless_mode, + include_sds=include_sds, ) success = process_module_results(modules_results_per_phase) @@ -211,13 +215,19 @@ def run_lint_go( timeout=None, golangci_lint_kwargs="", headless_mode=False, + include_sds=False, ): modules, flavors = process_input_args(module, targets, flavors, headless_mode) linter_tags = { f: build_tags or compute_build_tags_for_flavor( - flavor=f, build=build, arch=arch, build_include=build_include, build_exclude=build_exclude + flavor=f, + build=build, + arch=arch, + build_include=build_include, + build_exclude=build_exclude, + include_sds=include_sds, ) for f in flavors } diff --git a/tasks/sds.py b/tasks/sds.py new file mode 100644 index 0000000000000..1c8989dc7238b --- /dev/null +++ b/tasks/sds.py @@ -0,0 +1,38 @@ +import os +import sys +import tempfile + +from invoke import task + +from tasks.rtloader import get_dev_path + +is_windows = sys.platform == "win32" +is_darwin = sys.platform == "darwin" + + +@task +def build_library(ctx): + """ + Build the SDS shared library + """ + if is_windows: + print("Not building the SDS library: unsupported on Windows.", file=sys.stderr) + return + with tempfile.TemporaryDirectory() as temp_dir: + with ctx.cd(temp_dir): + ctx.run("git clone https://github.com/DataDog/dd-sensitive-data-scanner") + with ctx.cd("dd-sensitive-data-scanner/sds-go/rust"): + ctx.run("cargo build --release") + # write the lib besides rtloader libs + dev_path = get_dev_path() + lib_path = os.path.join(dev_path, "lib") + lib64_path = os.path.join(dev_path, "lib64") + # We do not support Windows for now. + if is_darwin: + ctx.run(f"cp target/release/libsds_go.dylib {lib_path}") + if os.path.exists(lib64_path): + ctx.run(f"cp target/release/libsds_go.dylib {lib64_path}") + else: + ctx.run(f"cp target/release/libsds_go.so {lib_path}") + if os.path.exists(lib64_path): + ctx.run(f"cp target/release/libsds_go.so {lib64_path}") From e3e5873c48b01c88ea9a1a4c9f5b375b915805ac Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Fri, 12 Apr 2024 14:48:36 +0200 Subject: [PATCH 15/99] Use tasks in import tasks.libs import (#24627) * Use tasks in import tasks.libs import * Use tasks in import tasks.libs import --- tasks/devcontainer.py | 2 +- tasks/vscode.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tasks/devcontainer.py b/tasks/devcontainer.py index e81ccbaf089cc..dee75d4e56a80 100644 --- a/tasks/devcontainer.py +++ b/tasks/devcontainer.py @@ -10,10 +10,10 @@ from invoke import task from invoke.exceptions import Exit -from libs.common.color import color_message from tasks.build_tags import build_tags, filter_incompatible_tags, get_build_tags, get_default_build_tags from tasks.flavor import AgentFlavor +from tasks.libs.common.color import color_message DEVCONTAINER_DIR = ".devcontainer" DEVCONTAINER_FILE = "devcontainer.json" diff --git a/tasks/vscode.py b/tasks/vscode.py index b9984c1e73f6b..35a51e8deb319 100644 --- a/tasks/vscode.py +++ b/tasks/vscode.py @@ -8,10 +8,10 @@ from typing import OrderedDict from invoke import task -from libs.common.color import color_message from tasks.build_tags import build_tags, filter_incompatible_tags, get_build_tags, get_default_build_tags from tasks.flavor import AgentFlavor +from tasks.libs.common.color import color_message VSCODE_DIR = ".vscode" VSCODE_FILE = "settings.json" From 5d344a5c6f6ea037cead2f3d37b998d0d1be56e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A8le=20Oul=C3=A8s?= Date: Fri, 12 Apr 2024 14:54:44 +0200 Subject: [PATCH 16/99] Collect all pod limits and requests (#24609) * Collect all limits and requests * Add release notes --- .../cluster/orchestrator/transformers/k8s/pod.go | 13 +++++++++++++ ...ct-pod-limits-and-requests-5abaf77788cd411c.yaml | 4 ++++ 2 files changed, 17 insertions(+) create mode 100644 releasenotes/notes/collect-pod-limits-and-requests-5abaf77788cd411c.yaml diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go index 71becf7062efa..d177be3533b64 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go @@ -262,6 +262,19 @@ func convertResourceRequirements(rq corev1.ResourceRequirements, containerName s } } + // Fill non-default values (other than CPU and Memory) + for resourceName, quantity := range rq.Limits { + if _, found := limits[resourceName.String()]; !found { + limits[resourceName.String()] = quantity.Value() + } + } + + for resourceName, quantity := range rq.Requests { + if _, found := requests[resourceName.String()]; !found { + requests[resourceName.String()] = quantity.Value() + } + } + return &model.ResourceRequirements{ Limits: limits, Requests: requests, diff --git a/releasenotes/notes/collect-pod-limits-and-requests-5abaf77788cd411c.yaml b/releasenotes/notes/collect-pod-limits-and-requests-5abaf77788cd411c.yaml new file mode 100644 index 0000000000000..87bc24d9f4277 --- /dev/null +++ b/releasenotes/notes/collect-pod-limits-and-requests-5abaf77788cd411c.yaml @@ -0,0 +1,4 @@ +--- +enhancements: + - | + Collect pod limits and requests. From 7e34b6c6eddb7816901299dacad7ccdea2e0bdd3 Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Fri, 12 Apr 2024 15:13:04 +0200 Subject: [PATCH 17/99] feat(ci): Add coverage run for invoke task and set them aside go tests (#23700) * feat(ci): Add coverage run for invoke task and set them aside go tests * Force test execution on main * and add execution of the task in MQ --- .gitlab-ci.yml | 15 +++++++++++- .gitlab/source_test/include.yml | 1 + .gitlab/source_test/linux.yml | 5 ---- .gitlab/source_test/tooling_unit_tests.yml | 27 ++++++++++++++++++++++ tasks/winbuildscripts/unittests.ps1 | 17 -------------- 5 files changed, 42 insertions(+), 23 deletions(-) create mode 100644 .gitlab/source_test/tooling_unit_tests.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c5bd424543daa..baac10e636852 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1053,4 +1053,17 @@ workflow: - .gitlab-ci.yml - .gitlab/**/* compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 - \ No newline at end of file + +.on_invoke_tasks_changes: + - <<: *if_main_branch + - changes: + paths: + - tasks/**/* + compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 + +.on_kitchen_invoke_tasks_changes: + - <<: *if_main_branch + - changes: + paths: + - test/kitchen/tasks/**/* + compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 diff --git a/.gitlab/source_test/include.yml b/.gitlab/source_test/include.yml index 7dce6c67c9a5d..60666c8d4f5a1 100644 --- a/.gitlab/source_test/include.yml +++ b/.gitlab/source_test/include.yml @@ -12,3 +12,4 @@ include: - .gitlab/source_test/slack.yml - .gitlab/source_test/golang_deps_diff.yml - .gitlab/source_test/notify.yml + - .gitlab/source_test/tooling_unit_tests.yml diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml index 5b55647979044..5304729be7918 100644 --- a/.gitlab/source_test/linux.yml +++ b/.gitlab/source_test/linux.yml @@ -27,11 +27,6 @@ script: - !reference [.retrieve_linux_go_tools_deps] - inv -e install-tools - - python3 -m pip install -r tasks/libs/requirements-github.txt - - inv -e invoke-unit-tests - - pushd test/kitchen - - inv -e kitchen.invoke-unit-tests - - popd - FAST_TESTS_FLAG="" - if [[ "$FAST_TESTS" == "true" ]]; then FAST_TESTS_FLAG="--only-impacted-packages"; fi - inv -e sds.build-library diff --git a/.gitlab/source_test/tooling_unit_tests.yml b/.gitlab/source_test/tooling_unit_tests.yml new file mode 100644 index 0000000000000..192642b9631da --- /dev/null +++ b/.gitlab/source_test/tooling_unit_tests.yml @@ -0,0 +1,27 @@ +--- +# Unit test of internal python code +invoke_unit_tests: + stage: source_test + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + tags: ["arch:amd64"] + needs: [] + rules: + - !reference [.on_invoke_tasks_changes] + script: + - source /root/.bashrc + - python3 -m pip install -r tasks/libs/requirements-github.txt + - inv -e invoke-unit-tests + +kitchen_invoke_unit_tests: + stage: source_test + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + tags: ["arch:amd64"] + needs: [] + rules: + - !reference [.on_kitchen_invoke_tasks_changes] + script: + - source /root/.bashrc + - python3 -m pip install -r tasks/libs/requirements-github.txt + - pushd test/kitchen + - inv -e kitchen.invoke-unit-tests + - popd diff --git a/tasks/winbuildscripts/unittests.ps1 b/tasks/winbuildscripts/unittests.ps1 index f948bef117aea..a5e29249d9b51 100644 --- a/tasks/winbuildscripts/unittests.ps1 +++ b/tasks/winbuildscripts/unittests.ps1 @@ -16,23 +16,6 @@ $UT_BUILD_ROOT=(Get-Location).Path $Env:PATH="$UT_BUILD_ROOT\dev\lib;$Env:GOPATH\bin;$Env:Python3_ROOT_DIR;$Env:Python3_ROOT_DIR\Scripts;$Env:PATH" & pip install -r tasks/libs/requirements-github.txt -& inv -e invoke-unit-tests - -if ($LASTEXITCODE -ne 0) { - Write-Host "[Error]: Some unit tests failed" - exit $LASTEXITCODE -} - -& pushd "test\kitchen" - -& inv -e kitchen.invoke-unit-tests - -if ($LASTEXITCODE -ne 0) { - Write-Host "[Error]: Some kitchen unit tests failed" - exit $LASTEXITCODE -} - -& popd $archflag = "x64" if ($Env:TARGET_ARCH -eq "x86") { From f95330344ccdda257079e981b8dba9355c56baca Mon Sep 17 00:00:00 2001 From: "agent-platform-auto-pr[bot]" <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> Date: Fri, 12 Apr 2024 13:23:53 +0000 Subject: [PATCH 18/99] [test-infra-definitions][automated] Bump test-infra-definitions to 4ebac656 (#24632) Co-authored-by: agent-platform-auto-pr[bot] <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> --- .gitlab/common/test_infra_version.yml | 2 +- test/new-e2e/go.mod | 2 +- test/new-e2e/go.sum | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml index bee1f6f6a952f..14ce1cf54cd9d 100644 --- a/.gitlab/common/test_infra_version.yml +++ b/.gitlab/common/test_infra_version.yml @@ -4,4 +4,4 @@ variables: # and check the job creating the image to make sure you have the right SHA prefix TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: "" # Make sure to update test-infra-definitions version in go.mod as well - TEST_INFRA_DEFINITIONS_BUILDIMAGES: e2e7d263e05c + TEST_INFRA_DEFINITIONS_BUILDIMAGES: 4ebac6561266 diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 77e74d08ff6d0..6b5a8d0f5e5e3 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -28,7 +28,7 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20240411084208-e2e7d263e05c + github.com/DataDog/test-infra-definitions v0.0.0-20240412104413-4ebac6561266 github.com/aws/aws-sdk-go-v2 v1.25.2 github.com/aws/aws-sdk-go-v2/config v1.27.6 github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.1 diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index b2a260feb8b16..014a6532fcf5b 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -12,8 +12,8 @@ github.com/DataDog/datadog-api-client-go/v2 v2.19.0 h1:Wvz/63/q39EpVwSH1T8jVyRvP github.com/DataDog/datadog-api-client-go/v2 v2.19.0/go.mod h1:oD5Lx8Li3oPRa/BSBenkn4i48z+91gwYORF/+6ph71g= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20240411084208-e2e7d263e05c h1:5AnfaCrbIezuQi/KiwCut8gpuYCMGwvJO/rJB4wthE8= -github.com/DataDog/test-infra-definitions v0.0.0-20240411084208-e2e7d263e05c/go.mod h1:KNF9SeKFoqxSSucHpuXQ1QDmpi7HFS9yr5kM2h9ls3c= +github.com/DataDog/test-infra-definitions v0.0.0-20240412104413-4ebac6561266 h1:k7mLs7RQUmiKSjYIQlY3Vnk/R26xJ/tMpnX+5bzH/NM= +github.com/DataDog/test-infra-definitions v0.0.0-20240412104413-4ebac6561266/go.mod h1:KNF9SeKFoqxSSucHpuXQ1QDmpi7HFS9yr5kM2h9ls3c= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= From 700503ff66c49676cfae99faa91cc9037e1dc15f Mon Sep 17 00:00:00 2001 From: Pierre Gimalac Date: Fri, 12 Apr 2024 16:19:36 +0200 Subject: [PATCH 19/99] [ASCII-1400] Update Go experimental libraries in all modules (#24356) * chore: update all go experimental lib modules * fix: tidy --- cmd/agent/common/path/go.mod | 2 +- cmd/agent/common/path/go.sum | 4 +-- comp/core/config/go.mod | 6 ++-- comp/core/config/go.sum | 16 +++++----- comp/core/flare/types/go.mod | 2 +- comp/core/flare/types/go.sum | 4 +-- comp/core/hostname/hostnameinterface/go.mod | 2 +- comp/core/hostname/hostnameinterface/go.sum | 4 +-- comp/core/log/go.mod | 6 ++-- comp/core/log/go.sum | 20 ++++++------- comp/core/secrets/go.mod | 2 +- comp/core/secrets/go.sum | 4 +-- comp/core/status/go.mod | 7 +++-- comp/core/status/go.sum | 19 ++++++++---- comp/core/status/statusimpl/go.mod | 4 +-- comp/core/status/statusimpl/go.sum | 12 ++++---- comp/core/telemetry/go.mod | 2 +- comp/core/telemetry/go.sum | 4 +-- comp/forwarder/defaultforwarder/go.mod | 6 ++-- comp/forwarder/defaultforwarder/go.sum | 16 +++++----- .../orchestrator/orchestratorinterface/go.mod | 6 ++-- .../orchestrator/orchestratorinterface/go.sum | 16 +++++----- comp/logs/agent/config/go.mod | 6 ++-- comp/logs/agent/config/go.sum | 16 +++++----- comp/netflow/payload/go.sum | 0 .../exporter/logsagentexporter/go.mod | 8 ++--- .../exporter/logsagentexporter/go.sum | 20 ++++++------- .../exporter/serializerexporter/go.mod | 6 ++-- .../exporter/serializerexporter/go.sum | 16 +++++----- comp/otelcol/otlp/testutil/go.mod | 6 ++-- comp/otelcol/otlp/testutil/go.sum | 16 +++++----- comp/serializer/compression/go.mod | 6 ++-- comp/serializer/compression/go.sum | 16 +++++----- internal/tools/go.mod | 16 +++++----- internal/tools/go.sum | 30 ++++++++++--------- internal/tools/independent-lint/go.mod | 4 +-- internal/tools/independent-lint/go.sum | 6 ++-- internal/tools/modformatter/go.mod | 4 +-- internal/tools/modformatter/go.sum | 6 ++-- internal/tools/modparser/go.mod | 2 +- internal/tools/modparser/go.sum | 4 +-- internal/tools/proto/go.mod | 7 +++-- internal/tools/proto/go.sum | 15 +++++----- pkg/api/go.mod | 6 ++-- pkg/api/go.sum | 16 +++++----- pkg/config/env/go.mod | 6 ++-- pkg/config/env/go.sum | 15 +++++----- pkg/config/logs/go.mod | 4 +-- pkg/config/logs/go.sum | 8 ++--- pkg/config/model/go.mod | 4 +-- pkg/config/model/go.sum | 7 +++-- pkg/config/remote/go.mod | 4 +-- pkg/config/remote/go.sum | 11 +++---- pkg/config/setup/go.mod | 6 ++-- pkg/config/setup/go.sum | 16 +++++----- pkg/config/utils/go.mod | 6 ++-- pkg/config/utils/go.sum | 16 +++++----- pkg/gohai/go.mod | 2 +- pkg/gohai/go.sum | 4 +-- pkg/logs/auditor/go.mod | 6 ++-- pkg/logs/auditor/go.sum | 16 +++++----- pkg/logs/client/go.mod | 6 ++-- pkg/logs/client/go.sum | 16 +++++----- pkg/logs/diagnostic/go.mod | 6 ++-- pkg/logs/diagnostic/go.sum | 16 +++++----- pkg/logs/message/go.mod | 6 ++-- pkg/logs/message/go.sum | 16 +++++----- pkg/logs/metrics/go.mod | 2 +- pkg/logs/metrics/go.sum | 4 +-- pkg/logs/pipeline/go.mod | 6 ++-- pkg/logs/pipeline/go.sum | 16 +++++----- pkg/logs/processor/go.mod | 4 +-- pkg/logs/processor/go.sum | 12 ++++---- pkg/logs/sds/go.mod | 4 +-- pkg/logs/sds/go.sum | 12 ++++---- pkg/logs/sender/go.mod | 6 ++-- pkg/logs/sender/go.sum | 16 +++++----- pkg/logs/sources/go.mod | 6 ++-- pkg/logs/sources/go.sum | 16 +++++----- pkg/logs/util/testutils/go.mod | 6 ++-- pkg/logs/util/testutils/go.sum | 16 +++++----- pkg/metrics/go.mod | 4 +-- pkg/metrics/go.sum | 8 ++--- pkg/obfuscate/go.mod | 3 +- pkg/obfuscate/go.sum | 6 ++-- pkg/process/util/api/go.mod | 2 +- pkg/process/util/api/go.sum | 4 +-- pkg/proto/go.mod | 4 +-- pkg/proto/go.sum | 8 ++--- pkg/remoteconfig/state/go.mod | 1 + pkg/remoteconfig/state/go.sum | 8 ++--- pkg/security/secl/go.mod | 2 +- pkg/security/secl/go.sum | 3 +- pkg/serializer/go.mod | 6 ++-- pkg/serializer/go.sum | 16 +++++----- pkg/tagger/types/go.sum | 0 pkg/telemetry/go.mod | 2 +- pkg/telemetry/go.sum | 4 +-- pkg/trace/go.mod | 8 ++--- pkg/trace/go.sum | 20 ++++++------- pkg/util/cgroups/go.mod | 2 +- pkg/util/cgroups/go.sum | 4 +-- pkg/util/filesystem/go.mod | 2 +- pkg/util/filesystem/go.sum | 3 +- pkg/util/flavor/go.mod | 6 ++-- pkg/util/flavor/go.sum | 16 +++++----- pkg/util/fxutil/go.mod | 2 +- pkg/util/fxutil/go.sum | 4 +-- pkg/util/grpc/go.mod | 4 +-- pkg/util/grpc/go.sum | 8 ++--- pkg/util/http/go.mod | 4 +-- pkg/util/http/go.sum | 8 ++--- pkg/util/system/go.mod | 2 +- pkg/util/system/go.sum | 4 +-- pkg/util/system/socket/go.mod | 7 +++-- pkg/util/system/socket/go.sum | 16 +++++----- pkg/util/uuid/go.mod | 2 +- pkg/util/uuid/go.sum | 3 +- pkg/util/winutil/go.mod | 2 +- pkg/util/winutil/go.sum | 4 +-- test/e2e/containers/otlp_sender/go.mod | 4 +-- test/e2e/containers/otlp_sender/go.sum | 8 ++--- test/fakeintake/go.mod | 2 +- test/fakeintake/go.sum | 8 ++--- test/new-e2e/go.mod | 14 ++++----- test/new-e2e/go.sum | 28 ++++++++--------- 126 files changed, 507 insertions(+), 488 deletions(-) create mode 100644 comp/netflow/payload/go.sum create mode 100644 pkg/tagger/types/go.sum diff --git a/cmd/agent/common/path/go.mod b/cmd/agent/common/path/go.mod index bf95278f93840..5f2895af7abaf 100644 --- a/cmd/agent/common/path/go.mod +++ b/cmd/agent/common/path/go.mod @@ -13,7 +13,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/executable v0.53.0-rc.2 github.com/DataDog/datadog-agent/pkg/util/log v0.53.0-rc.2 github.com/DataDog/datadog-agent/pkg/util/winutil v0.53.0-rc.2 - golang.org/x/sys v0.14.0 + golang.org/x/sys v0.19.0 ) require ( diff --git a/cmd/agent/common/path/go.sum b/cmd/agent/common/path/go.sum index 0892d88b98792..97375b044626a 100644 --- a/cmd/agent/common/path/go.sum +++ b/cmd/agent/common/path/go.sum @@ -12,8 +12,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= diff --git a/comp/core/config/go.mod b/comp/core/config/go.mod index 61daa8eca11ef..d3d387094a2f6 100644 --- a/comp/core/config/go.mod +++ b/comp/core/config/go.mod @@ -99,9 +99,9 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/comp/core/config/go.sum b/comp/core/config/go.sum index 8740113ac582a..0b0165f78ca6d 100644 --- a/comp/core/config/go.sum +++ b/comp/core/config/go.sum @@ -268,8 +268,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -288,8 +288,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -305,11 +305,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/comp/core/flare/types/go.mod b/comp/core/flare/types/go.mod index 5162988e91202..4c24995d57052 100644 --- a/comp/core/flare/types/go.mod +++ b/comp/core/flare/types/go.mod @@ -9,5 +9,5 @@ require ( go.uber.org/dig v1.17.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect - golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect + golang.org/x/sys v0.19.0 // indirect ) diff --git a/comp/core/flare/types/go.sum b/comp/core/flare/types/go.sum index c8da96dd2fa1b..ec32c30254d97 100644 --- a/comp/core/flare/types/go.sum +++ b/comp/core/flare/types/go.sum @@ -23,7 +23,7 @@ go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/comp/core/hostname/hostnameinterface/go.mod b/comp/core/hostname/hostnameinterface/go.mod index 4d13f42ea4b36..cb064be169ee3 100644 --- a/comp/core/hostname/hostnameinterface/go.mod +++ b/comp/core/hostname/hostnameinterface/go.mod @@ -20,6 +20,6 @@ require ( go.uber.org/dig v1.17.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect - golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect + golang.org/x/sys v0.19.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/core/hostname/hostnameinterface/go.sum b/comp/core/hostname/hostnameinterface/go.sum index 045181eb3f60b..0c92762b6beb3 100644 --- a/comp/core/hostname/hostnameinterface/go.sum +++ b/comp/core/hostname/hostnameinterface/go.sum @@ -31,8 +31,8 @@ go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/comp/core/log/go.mod b/comp/core/log/go.mod index 0fe3d4a247fb6..c4cba79a73ee2 100644 --- a/comp/core/log/go.mod +++ b/comp/core/log/go.mod @@ -121,9 +121,9 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect diff --git a/comp/core/log/go.sum b/comp/core/log/go.sum index 9aa4bb543a067..71b255c293368 100644 --- a/comp/core/log/go.sum +++ b/comp/core/log/go.sum @@ -333,8 +333,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -348,8 +348,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -367,8 +367,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -379,8 +379,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -407,8 +407,8 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= diff --git a/comp/core/secrets/go.mod b/comp/core/secrets/go.mod index e8f9d2ff75d91..a09d77dda3fcf 100644 --- a/comp/core/secrets/go.mod +++ b/comp/core/secrets/go.mod @@ -24,7 +24,7 @@ require ( github.com/stretchr/testify v1.9.0 go.uber.org/fx v1.18.2 golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 - golang.org/x/sys v0.14.0 + golang.org/x/sys v0.19.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/comp/core/secrets/go.sum b/comp/core/secrets/go.sum index 98d2bf82b4d35..2f04ec33f191f 100644 --- a/comp/core/secrets/go.sum +++ b/comp/core/secrets/go.sum @@ -83,8 +83,8 @@ go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 h1:qCEDpW1G+vcj3Y7Fy52pEM1AWm3abj8WimGYejI3SC4= golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= diff --git a/comp/core/status/go.mod b/comp/core/status/go.mod index 0df89ff147e47..656370890b00f 100644 --- a/comp/core/status/go.mod +++ b/comp/core/status/go.mod @@ -7,7 +7,7 @@ require ( github.com/fatih/color v1.16.0 github.com/stretchr/testify v1.9.0 go.uber.org/fx v1.18.2 - golang.org/x/text v0.3.0 + golang.org/x/text v0.14.0 ) require ( @@ -19,6 +19,9 @@ require ( go.uber.org/dig v1.15.0 // indirect go.uber.org/multierr v1.5.0 // indirect go.uber.org/zap v1.16.0 // indirect - golang.org/x/sys v0.14.0 // indirect + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/tools v0.13.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/core/status/go.sum b/comp/core/status/go.sum index 010a73ec868dc..f4e75aa6168e5 100644 --- a/comp/core/status/go.sum +++ b/comp/core/status/go.sum @@ -47,9 +47,14 @@ go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -58,16 +63,20 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/core/status/statusimpl/go.mod b/comp/core/status/statusimpl/go.mod index fc0db45a237a0..dbee1499559c6 100644 --- a/comp/core/status/statusimpl/go.mod +++ b/comp/core/status/statusimpl/go.mod @@ -93,8 +93,8 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/core/status/statusimpl/go.sum b/comp/core/status/statusimpl/go.sum index adb3a050f4a7c..a65d0e854b7be 100644 --- a/comp/core/status/statusimpl/go.sum +++ b/comp/core/status/statusimpl/go.sum @@ -275,8 +275,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -295,8 +295,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -314,8 +314,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= diff --git a/comp/core/telemetry/go.mod b/comp/core/telemetry/go.mod index 97ea82a1815e9..83d6864fdfdd5 100644 --- a/comp/core/telemetry/go.mod +++ b/comp/core/telemetry/go.mod @@ -36,7 +36,7 @@ require ( go.uber.org/dig v1.17.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect - golang.org/x/sys v0.14.0 // indirect + golang.org/x/sys v0.19.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/core/telemetry/go.sum b/comp/core/telemetry/go.sum index 2dbbe9742a8c5..42bbfa45606f9 100644 --- a/comp/core/telemetry/go.sum +++ b/comp/core/telemetry/go.sum @@ -76,8 +76,8 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= diff --git a/comp/forwarder/defaultforwarder/go.mod b/comp/forwarder/defaultforwarder/go.mod index 70f341653e551..4ed2dbed75b22 100644 --- a/comp/forwarder/defaultforwarder/go.mod +++ b/comp/forwarder/defaultforwarder/go.mod @@ -133,9 +133,9 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/sys v0.18.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/tools v0.19.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/comp/forwarder/defaultforwarder/go.sum b/comp/forwarder/defaultforwarder/go.sum index c703865be41aa..8a0e53f75f425 100644 --- a/comp/forwarder/defaultforwarder/go.sum +++ b/comp/forwarder/defaultforwarder/go.sum @@ -291,8 +291,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -305,16 +305,16 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -332,8 +332,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= diff --git a/comp/forwarder/orchestrator/orchestratorinterface/go.mod b/comp/forwarder/orchestrator/orchestratorinterface/go.mod index 20e7330fe1d26..820401b2ef372 100644 --- a/comp/forwarder/orchestrator/orchestratorinterface/go.mod +++ b/comp/forwarder/orchestrator/orchestratorinterface/go.mod @@ -141,9 +141,9 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/sys v0.18.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.19.0 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/comp/forwarder/orchestrator/orchestratorinterface/go.sum b/comp/forwarder/orchestrator/orchestratorinterface/go.sum index d99632ec4ee0e..1338f18c67be6 100644 --- a/comp/forwarder/orchestrator/orchestratorinterface/go.sum +++ b/comp/forwarder/orchestrator/orchestratorinterface/go.sum @@ -287,8 +287,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -301,16 +301,16 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -328,8 +328,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= diff --git a/comp/logs/agent/config/go.mod b/comp/logs/agent/config/go.mod index 94eed71960137..2bfa2e52975f1 100644 --- a/comp/logs/agent/config/go.mod +++ b/comp/logs/agent/config/go.mod @@ -85,9 +85,9 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/logs/agent/config/go.sum b/comp/logs/agent/config/go.sum index d1ac227abd21e..3409be6f4ebf4 100644 --- a/comp/logs/agent/config/go.sum +++ b/comp/logs/agent/config/go.sum @@ -266,8 +266,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -286,8 +286,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -303,11 +303,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/comp/netflow/payload/go.sum b/comp/netflow/payload/go.sum new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod index d5a03597a3a05..fb7c0020d5f86 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod @@ -137,11 +137,11 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum index 8f375be2a9383..bb67fbf3bfffe 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum @@ -337,8 +337,8 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -354,8 +354,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= @@ -366,8 +366,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -390,12 +390,12 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod index 7341e81753a95..693c342608757 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod @@ -192,9 +192,9 @@ require ( go.uber.org/fx v1.18.2 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/sys v0.18.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.19.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum b/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum index 1df7049d105d7..4914fd2bbb0b1 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum @@ -395,8 +395,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -413,8 +413,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -424,8 +424,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -449,8 +449,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= diff --git a/comp/otelcol/otlp/testutil/go.mod b/comp/otelcol/otlp/testutil/go.mod index c00b30268a2c5..db00bbeb4719e 100644 --- a/comp/otelcol/otlp/testutil/go.mod +++ b/comp/otelcol/otlp/testutil/go.mod @@ -75,9 +75,9 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect diff --git a/comp/otelcol/otlp/testutil/go.sum b/comp/otelcol/otlp/testutil/go.sum index e378c6d33459d..548a167da57d9 100644 --- a/comp/otelcol/otlp/testutil/go.sum +++ b/comp/otelcol/otlp/testutil/go.sum @@ -278,8 +278,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -293,8 +293,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -302,8 +302,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -320,8 +320,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= diff --git a/comp/serializer/compression/go.mod b/comp/serializer/compression/go.mod index f04933993fa37..6f47571d42136 100644 --- a/comp/serializer/compression/go.mod +++ b/comp/serializer/compression/go.mod @@ -82,9 +82,9 @@ require ( go.uber.org/multierr v1.10.0 // indirect go.uber.org/zap v1.26.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/serializer/compression/go.sum b/comp/serializer/compression/go.sum index 53d25067b10ef..1d6b6e40b33e7 100644 --- a/comp/serializer/compression/go.sum +++ b/comp/serializer/compression/go.sum @@ -266,8 +266,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -286,8 +286,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -303,11 +303,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 12c5fb5f7ee05..1bbdc432532a8 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -212,16 +212,16 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.12.0 // indirect + golang.org/x/crypto v0.22.0 // indirect golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect - golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.14.0 // indirect - golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/term v0.11.0 // indirect - golang.org/x/text v0.12.0 // indirect - golang.org/x/tools v0.12.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.13.0 // indirect gonum.org/v1/gonum v0.7.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/internal/tools/go.sum b/internal/tools/go.sum index 367d2e2c926c9..93db9f02dcd28 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -743,8 +743,8 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -800,8 +800,9 @@ golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -847,8 +848,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20170207211851-4464e7848382/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -875,8 +876,9 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -945,8 +947,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -956,8 +958,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -972,8 +974,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1049,8 +1051,8 @@ golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= -golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= -golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/tools/independent-lint/go.mod b/internal/tools/independent-lint/go.mod index a908ef5f59188..183e9e46f449b 100644 --- a/internal/tools/independent-lint/go.mod +++ b/internal/tools/independent-lint/go.mod @@ -2,6 +2,4 @@ module github.com/DataDog/datadog-agent/cmd/independent-lint go 1.21.9 -require golang.org/x/mod v0.5.1 - -require golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 // indirect +require golang.org/x/mod v0.17.0 diff --git a/internal/tools/independent-lint/go.sum b/internal/tools/independent-lint/go.sum index 2db7d927f3e59..e69cc3746b639 100644 --- a/internal/tools/independent-lint/go.sum +++ b/internal/tools/independent-lint/go.sum @@ -1,4 +1,2 @@ -golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= diff --git a/internal/tools/modformatter/go.mod b/internal/tools/modformatter/go.mod index c7ea8047d8571..e019d65377f77 100644 --- a/internal/tools/modformatter/go.mod +++ b/internal/tools/modformatter/go.mod @@ -2,6 +2,4 @@ module github.com/DataDog/datadog-agent/internal/tools/modformatter go 1.21.9 -require golang.org/x/mod v0.5.1 - -require golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 // indirect +require golang.org/x/mod v0.17.0 diff --git a/internal/tools/modformatter/go.sum b/internal/tools/modformatter/go.sum index 2db7d927f3e59..e69cc3746b639 100644 --- a/internal/tools/modformatter/go.sum +++ b/internal/tools/modformatter/go.sum @@ -1,4 +1,2 @@ -golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= diff --git a/internal/tools/modparser/go.mod b/internal/tools/modparser/go.mod index 46c210d8c8919..51cae2491b1a4 100644 --- a/internal/tools/modparser/go.mod +++ b/internal/tools/modparser/go.mod @@ -4,7 +4,7 @@ go 1.21.9 require ( github.com/stretchr/testify v1.9.0 - golang.org/x/mod v0.16.0 + golang.org/x/mod v0.17.0 ) require ( diff --git a/internal/tools/modparser/go.sum b/internal/tools/modparser/go.sum index 66bcc651e9807..0749a08cb9fc1 100644 --- a/internal/tools/modparser/go.sum +++ b/internal/tools/modparser/go.sum @@ -4,8 +4,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/internal/tools/proto/go.mod b/internal/tools/proto/go.mod index bb8b650acc2c8..aac71a0f32cf0 100644 --- a/internal/tools/proto/go.mod +++ b/internal/tools/proto/go.mod @@ -16,11 +16,12 @@ require ( github.com/ghodss/yaml v1.0.0 // indirect github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect github.com/philhofer/fwd v1.1.2 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.16.1 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/internal/tools/proto/go.sum b/internal/tools/proto/go.sum index f8a4401ae0cf5..2a87f05a9b39c 100644 --- a/internal/tools/proto/go.sum +++ b/internal/tools/proto/go.sum @@ -42,8 +42,8 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -52,8 +52,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -63,8 +63,8 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= @@ -81,8 +81,9 @@ golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= diff --git a/pkg/api/go.mod b/pkg/api/go.mod index cfff48da1d838..17b46ac826373 100644 --- a/pkg/api/go.mod +++ b/pkg/api/go.mod @@ -86,9 +86,9 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/api/go.sum b/pkg/api/go.sum index d1ac227abd21e..3409be6f4ebf4 100644 --- a/pkg/api/go.sum +++ b/pkg/api/go.sum @@ -266,8 +266,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -286,8 +286,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -303,11 +303,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/config/env/go.mod b/pkg/config/env/go.mod index d8b7bbddd78f0..89b4651e65b7e 100644 --- a/pkg/config/env/go.mod +++ b/pkg/config/env/go.mod @@ -41,9 +41,9 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.3.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/config/env/go.sum b/pkg/config/env/go.sum index a15fe793e7686..2d60d59ae6990 100644 --- a/pkg/config/env/go.sum +++ b/pkg/config/env/go.sum @@ -196,8 +196,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -216,8 +216,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -233,10 +233,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/config/logs/go.mod b/pkg/config/logs/go.mod index 41ea1af6d1fd3..ec494d2d6e72d 100644 --- a/pkg/config/logs/go.mod +++ b/pkg/config/logs/go.mod @@ -32,8 +32,8 @@ require ( github.com/spf13/pflag v1.0.5 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/config/logs/go.sum b/pkg/config/logs/go.sum index 3d2cd0423cbde..9b9c2ec9fe177 100644 --- a/pkg/config/logs/go.sum +++ b/pkg/config/logs/go.sum @@ -210,11 +210,11 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/config/model/go.mod b/pkg/config/model/go.mod index 627c9ec584569..c5d5ede864838 100644 --- a/pkg/config/model/go.mod +++ b/pkg/config/model/go.mod @@ -30,8 +30,8 @@ require ( github.com/spf13/cast v1.3.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.3.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/config/model/go.sum b/pkg/config/model/go.sum index 1fb40d129f861..620036b03d2c9 100644 --- a/pkg/config/model/go.sum +++ b/pkg/config/model/go.sum @@ -199,10 +199,11 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/config/remote/go.mod b/pkg/config/remote/go.mod index 297a0a5a5c94d..760b32c0c337d 100644 --- a/pkg/config/remote/go.mod +++ b/pkg/config/remote/go.mod @@ -75,8 +75,8 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/tinylib/msgp v1.1.8 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect diff --git a/pkg/config/remote/go.sum b/pkg/config/remote/go.sum index 17fa5893d7bae..96ddf5b29106e 100644 --- a/pkg/config/remote/go.sum +++ b/pkg/config/remote/go.sum @@ -355,8 +355,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -433,8 +433,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -505,8 +505,9 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= diff --git a/pkg/config/setup/go.mod b/pkg/config/setup/go.mod index e4e23230da11f..ea0cf2df84fc3 100644 --- a/pkg/config/setup/go.mod +++ b/pkg/config/setup/go.mod @@ -99,9 +99,9 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/config/setup/go.sum b/pkg/config/setup/go.sum index 41a7c87c55a72..e4edfd3a68455 100644 --- a/pkg/config/setup/go.sum +++ b/pkg/config/setup/go.sum @@ -265,8 +265,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -285,8 +285,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -302,11 +302,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/config/utils/go.mod b/pkg/config/utils/go.mod index 9442823abb086..d8a9a62896428 100644 --- a/pkg/config/utils/go.mod +++ b/pkg/config/utils/go.mod @@ -73,9 +73,9 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/config/utils/go.sum b/pkg/config/utils/go.sum index ec178f18589da..520bc0fe906eb 100644 --- a/pkg/config/utils/go.sum +++ b/pkg/config/utils/go.sum @@ -258,8 +258,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -278,8 +278,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -295,11 +295,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/gohai/go.mod b/pkg/gohai/go.mod index 51ead1371b7fc..c3e2b130ce526 100644 --- a/pkg/gohai/go.mod +++ b/pkg/gohai/go.mod @@ -10,7 +10,7 @@ require ( github.com/moby/sys/mountinfo v0.7.1 github.com/shirou/gopsutil/v3 v3.24.1 github.com/stretchr/testify v1.9.0 - golang.org/x/sys v0.17.0 + golang.org/x/sys v0.19.0 ) require ( diff --git a/pkg/gohai/go.sum b/pkg/gohai/go.sum index 66d068292b2ca..532ecfe470b9b 100644 --- a/pkg/gohai/go.sum +++ b/pkg/gohai/go.sum @@ -61,8 +61,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/pkg/logs/auditor/go.mod b/pkg/logs/auditor/go.mod index 1806b8a5f46f8..5074acbe3df95 100644 --- a/pkg/logs/auditor/go.mod +++ b/pkg/logs/auditor/go.mod @@ -88,9 +88,9 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/auditor/go.sum b/pkg/logs/auditor/go.sum index ec178f18589da..520bc0fe906eb 100644 --- a/pkg/logs/auditor/go.sum +++ b/pkg/logs/auditor/go.sum @@ -258,8 +258,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -278,8 +278,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -295,11 +295,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/logs/client/go.mod b/pkg/logs/client/go.mod index 09541e1cb8f67..cd9a945d9c9f6 100644 --- a/pkg/logs/client/go.mod +++ b/pkg/logs/client/go.mod @@ -54,7 +54,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/log v0.53.0-rc.2 github.com/DataDog/datadog-agent/pkg/version v0.53.0-rc.2 github.com/stretchr/testify v1.9.0 - golang.org/x/net v0.21.0 + golang.org/x/net v0.24.0 ) require ( @@ -124,8 +124,8 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect google.golang.org/protobuf v1.31.0 // indirect diff --git a/pkg/logs/client/go.sum b/pkg/logs/client/go.sum index a34efd9030a51..0e73b1e4a6f61 100644 --- a/pkg/logs/client/go.sum +++ b/pkg/logs/client/go.sum @@ -415,8 +415,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -454,8 +454,8 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -475,8 +475,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -523,8 +523,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/pkg/logs/diagnostic/go.mod b/pkg/logs/diagnostic/go.mod index 6c927ca84efee..0e478e9ad225d 100644 --- a/pkg/logs/diagnostic/go.mod +++ b/pkg/logs/diagnostic/go.mod @@ -97,9 +97,9 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/diagnostic/go.sum b/pkg/logs/diagnostic/go.sum index d1ac227abd21e..3409be6f4ebf4 100644 --- a/pkg/logs/diagnostic/go.sum +++ b/pkg/logs/diagnostic/go.sum @@ -266,8 +266,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -286,8 +286,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -303,11 +303,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/logs/message/go.mod b/pkg/logs/message/go.mod index eacab60663f42..a5b363b6bac68 100644 --- a/pkg/logs/message/go.mod +++ b/pkg/logs/message/go.mod @@ -84,9 +84,9 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/message/go.sum b/pkg/logs/message/go.sum index ec178f18589da..520bc0fe906eb 100644 --- a/pkg/logs/message/go.sum +++ b/pkg/logs/message/go.sum @@ -258,8 +258,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -278,8 +278,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -295,11 +295,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/logs/metrics/go.mod b/pkg/logs/metrics/go.mod index 8e63edfdc6f94..73956abf1e20e 100644 --- a/pkg/logs/metrics/go.mod +++ b/pkg/logs/metrics/go.mod @@ -42,7 +42,7 @@ require ( go.uber.org/fx v1.18.2 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect - golang.org/x/sys v0.14.0 // indirect + golang.org/x/sys v0.19.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/logs/metrics/go.sum b/pkg/logs/metrics/go.sum index 2dbbe9742a8c5..42bbfa45606f9 100644 --- a/pkg/logs/metrics/go.sum +++ b/pkg/logs/metrics/go.sum @@ -76,8 +76,8 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= diff --git a/pkg/logs/pipeline/go.mod b/pkg/logs/pipeline/go.mod index dceb81f320493..db7e455239538 100644 --- a/pkg/logs/pipeline/go.mod +++ b/pkg/logs/pipeline/go.mod @@ -143,9 +143,9 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect google.golang.org/protobuf v1.31.0 // indirect diff --git a/pkg/logs/pipeline/go.sum b/pkg/logs/pipeline/go.sum index 2866ec45ae429..767ad8add737c 100644 --- a/pkg/logs/pipeline/go.sum +++ b/pkg/logs/pipeline/go.sum @@ -422,8 +422,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -461,8 +461,8 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -482,8 +482,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -530,8 +530,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/pkg/logs/processor/go.mod b/pkg/logs/processor/go.mod index b7420f86f638d..f9f3a2b35b5a1 100644 --- a/pkg/logs/processor/go.mod +++ b/pkg/logs/processor/go.mod @@ -124,8 +124,8 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect google.golang.org/protobuf v1.31.0 // indirect diff --git a/pkg/logs/processor/go.sum b/pkg/logs/processor/go.sum index 7977a37f19395..fa86f84653ba0 100644 --- a/pkg/logs/processor/go.sum +++ b/pkg/logs/processor/go.sum @@ -422,8 +422,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -480,8 +480,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -528,8 +528,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/pkg/logs/sds/go.mod b/pkg/logs/sds/go.mod index 3f18220799c45..b34ba8cc38d7d 100644 --- a/pkg/logs/sds/go.mod +++ b/pkg/logs/sds/go.mod @@ -96,8 +96,8 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect diff --git a/pkg/logs/sds/go.sum b/pkg/logs/sds/go.sum index 34c7ea5566408..04497ddd2ba92 100644 --- a/pkg/logs/sds/go.sum +++ b/pkg/logs/sds/go.sum @@ -408,8 +408,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -466,8 +466,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -514,8 +514,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/pkg/logs/sender/go.mod b/pkg/logs/sender/go.mod index bfe24e93b3f60..62758f81e1345 100644 --- a/pkg/logs/sender/go.mod +++ b/pkg/logs/sender/go.mod @@ -125,9 +125,9 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect google.golang.org/protobuf v1.31.0 // indirect diff --git a/pkg/logs/sender/go.sum b/pkg/logs/sender/go.sum index 8bd2a31813d6f..fc4cd6833c202 100644 --- a/pkg/logs/sender/go.sum +++ b/pkg/logs/sender/go.sum @@ -415,8 +415,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -454,8 +454,8 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -475,8 +475,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -523,8 +523,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/pkg/logs/sources/go.mod b/pkg/logs/sources/go.mod index 636c5587bc455..cb8275cfdfd1a 100644 --- a/pkg/logs/sources/go.mod +++ b/pkg/logs/sources/go.mod @@ -82,9 +82,9 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/sources/go.sum b/pkg/logs/sources/go.sum index ec178f18589da..520bc0fe906eb 100644 --- a/pkg/logs/sources/go.sum +++ b/pkg/logs/sources/go.sum @@ -258,8 +258,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -278,8 +278,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -295,11 +295,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/logs/util/testutils/go.mod b/pkg/logs/util/testutils/go.mod index ba6e408640d8c..bd766110ef3a7 100644 --- a/pkg/logs/util/testutils/go.mod +++ b/pkg/logs/util/testutils/go.mod @@ -81,9 +81,9 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/pkg/logs/util/testutils/go.sum b/pkg/logs/util/testutils/go.sum index ec178f18589da..520bc0fe906eb 100644 --- a/pkg/logs/util/testutils/go.sum +++ b/pkg/logs/util/testutils/go.sum @@ -258,8 +258,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -278,8 +278,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -295,11 +295,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/metrics/go.mod b/pkg/metrics/go.mod index c7ebbb9039441..4340c30ad7cc8 100644 --- a/pkg/metrics/go.mod +++ b/pkg/metrics/go.mod @@ -74,8 +74,8 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/sys v0.14.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/metrics/go.sum b/pkg/metrics/go.sum index 3f34bb7ce21c4..de11b4bfbe89b 100644 --- a/pkg/metrics/go.sum +++ b/pkg/metrics/go.sum @@ -489,8 +489,8 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -500,8 +500,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/pkg/obfuscate/go.mod b/pkg/obfuscate/go.mod index 3e5cac5195fcc..6124fef34129d 100644 --- a/pkg/obfuscate/go.mod +++ b/pkg/obfuscate/go.mod @@ -17,6 +17,7 @@ require ( github.com/dustin/go-humanize v1.0.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/obfuscate/go.sum b/pkg/obfuscate/go.sum index ca6903d7bc746..7fd71e9a17250 100644 --- a/pkg/obfuscate/go.sum +++ b/pkg/obfuscate/go.sum @@ -44,8 +44,9 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -55,8 +56,9 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/FiaizQEK5Gu4Bq4JE8= golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= diff --git a/pkg/process/util/api/go.mod b/pkg/process/util/api/go.mod index e65a0d2fffd7d..93b3b82257e49 100644 --- a/pkg/process/util/api/go.mod +++ b/pkg/process/util/api/go.mod @@ -47,7 +47,7 @@ require ( go.uber.org/fx v1.18.2 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect - golang.org/x/sys v0.14.0 // indirect + golang.org/x/sys v0.19.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/process/util/api/go.sum b/pkg/process/util/api/go.sum index 7de43e79fd8d6..57b48183b6483 100644 --- a/pkg/process/util/api/go.sum +++ b/pkg/process/util/api/go.sum @@ -105,8 +105,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/proto/go.mod b/pkg/proto/go.mod index 58337a440e9a8..b60edeafc332d 100644 --- a/pkg/proto/go.mod +++ b/pkg/proto/go.mod @@ -22,8 +22,8 @@ require ( github.com/philhofer/fwd v1.1.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect diff --git a/pkg/proto/go.sum b/pkg/proto/go.sum index 02ae2293b314b..cb6adf5576124 100644 --- a/pkg/proto/go.sum +++ b/pkg/proto/go.sum @@ -80,8 +80,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -102,8 +102,8 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= diff --git a/pkg/remoteconfig/state/go.mod b/pkg/remoteconfig/state/go.mod index 8076ebbc8c3ea..4404211ba6e95 100644 --- a/pkg/remoteconfig/state/go.mod +++ b/pkg/remoteconfig/state/go.mod @@ -12,5 +12,6 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/crypto v0.22.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/remoteconfig/state/go.sum b/pkg/remoteconfig/state/go.sum index a2699cfa06f27..e58605d3ae908 100644 --- a/pkg/remoteconfig/state/go.sum +++ b/pkg/remoteconfig/state/go.sum @@ -16,10 +16,10 @@ github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAj github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index a96d4b40b1345..abbccb621beda 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -32,7 +32,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect - golang.org/x/crypto v0.3.0 // indirect + golang.org/x/crypto v0.22.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/sync v0.7.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index 75fe7a3813b6d..0e546c360cca7 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -66,8 +66,9 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= diff --git a/pkg/serializer/go.mod b/pkg/serializer/go.mod index d7fd66f31b470..66c45e5f262e2 100644 --- a/pkg/serializer/go.mod +++ b/pkg/serializer/go.mod @@ -166,9 +166,9 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/sys v0.18.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.19.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/pkg/serializer/go.sum b/pkg/serializer/go.sum index 4d70de12701bb..a194bcf77dc2e 100644 --- a/pkg/serializer/go.sum +++ b/pkg/serializer/go.sum @@ -331,8 +331,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -346,8 +346,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -355,8 +355,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -375,8 +375,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= diff --git a/pkg/tagger/types/go.sum b/pkg/tagger/types/go.sum new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/telemetry/go.mod b/pkg/telemetry/go.mod index 2824b193e0720..71cf51c3ea382 100644 --- a/pkg/telemetry/go.mod +++ b/pkg/telemetry/go.mod @@ -40,7 +40,7 @@ require ( go.uber.org/fx v1.18.2 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect - golang.org/x/sys v0.14.0 // indirect + golang.org/x/sys v0.19.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/telemetry/go.sum b/pkg/telemetry/go.sum index 2dbbe9742a8c5..42bbfa45606f9 100644 --- a/pkg/telemetry/go.sum +++ b/pkg/telemetry/go.sum @@ -76,8 +76,8 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= diff --git a/pkg/trace/go.mod b/pkg/trace/go.mod index 12d17538bc8c0..8aad757225bbd 100644 --- a/pkg/trace/go.mod +++ b/pkg/trace/go.mod @@ -38,8 +38,8 @@ require ( go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel/metric v1.24.0 go.uber.org/atomic v1.11.0 - golang.org/x/sys v0.17.0 - golang.org/x/time v0.3.0 + golang.org/x/sys v0.19.0 + golang.org/x/time v0.5.0 google.golang.org/grpc v1.62.1 google.golang.org/protobuf v1.33.0 k8s.io/apimachinery v0.25.5 @@ -95,8 +95,8 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.16.1 // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/pkg/trace/go.sum b/pkg/trace/go.sum index 6f234e0f60611..8d6bc7b6a71b8 100644 --- a/pkg/trace/go.sum +++ b/pkg/trace/go.sum @@ -195,15 +195,15 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -214,8 +214,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -242,8 +242,8 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= @@ -255,8 +255,8 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/pkg/util/cgroups/go.mod b/pkg/util/cgroups/go.mod index 8c504bf421c09..918565623aa93 100644 --- a/pkg/util/cgroups/go.mod +++ b/pkg/util/cgroups/go.mod @@ -27,7 +27,7 @@ require ( github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/sys v0.2.0 // indirect + golang.org/x/sys v0.19.0 // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/util/cgroups/go.sum b/pkg/util/cgroups/go.sum index 555765b2c8ca6..8b40a597a4a9b 100644 --- a/pkg/util/cgroups/go.sum +++ b/pkg/util/cgroups/go.sum @@ -24,8 +24,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= diff --git a/pkg/util/filesystem/go.mod b/pkg/util/filesystem/go.mod index 5790ce95784f3..32ba611e1dab8 100644 --- a/pkg/util/filesystem/go.mod +++ b/pkg/util/filesystem/go.mod @@ -13,7 +13,7 @@ require ( github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 github.com/shirou/gopsutil/v3 v3.23.9 github.com/stretchr/testify v1.9.0 - golang.org/x/sys v0.12.0 + golang.org/x/sys v0.19.0 ) require ( diff --git a/pkg/util/filesystem/go.sum b/pkg/util/filesystem/go.sum index 6b7f10f6047e1..dc00f37090513 100644 --- a/pkg/util/filesystem/go.sum +++ b/pkg/util/filesystem/go.sum @@ -37,8 +37,9 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/util/flavor/go.mod b/pkg/util/flavor/go.mod index 82a2bf098a6f3..ff3c9a6ae5e77 100644 --- a/pkg/util/flavor/go.mod +++ b/pkg/util/flavor/go.mod @@ -71,9 +71,9 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/util/flavor/go.sum b/pkg/util/flavor/go.sum index ec178f18589da..520bc0fe906eb 100644 --- a/pkg/util/flavor/go.sum +++ b/pkg/util/flavor/go.sum @@ -258,8 +258,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -278,8 +278,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -295,11 +295,11 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/util/fxutil/go.mod b/pkg/util/fxutil/go.mod index efd6dc8905976..47a2281dc6922 100644 --- a/pkg/util/fxutil/go.mod +++ b/pkg/util/fxutil/go.mod @@ -17,6 +17,6 @@ require ( go.uber.org/dig v1.17.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.23.0 // indirect - golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect + golang.org/x/sys v0.19.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/util/fxutil/go.sum b/pkg/util/fxutil/go.sum index 045181eb3f60b..0c92762b6beb3 100644 --- a/pkg/util/fxutil/go.sum +++ b/pkg/util/fxutil/go.sum @@ -31,8 +31,8 @@ go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/pkg/util/grpc/go.mod b/pkg/util/grpc/go.mod index d50e623f87b78..764eaa99ef3f7 100644 --- a/pkg/util/grpc/go.mod +++ b/pkg/util/grpc/go.mod @@ -14,7 +14,7 @@ require ( github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 github.com/stretchr/testify v1.9.0 - golang.org/x/net v0.19.0 + golang.org/x/net v0.24.0 google.golang.org/grpc v1.59.0 ) @@ -28,7 +28,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/tinylib/msgp v1.1.8 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect diff --git a/pkg/util/grpc/go.sum b/pkg/util/grpc/go.sum index f9d8e2f5c3ca0..1259fb3045842 100644 --- a/pkg/util/grpc/go.sum +++ b/pkg/util/grpc/go.sum @@ -94,8 +94,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= @@ -116,8 +116,8 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= diff --git a/pkg/util/http/go.mod b/pkg/util/http/go.mod index 9d1801f1f1728..7adadee34ba95 100644 --- a/pkg/util/http/go.mod +++ b/pkg/util/http/go.mod @@ -13,7 +13,7 @@ require ( github.com/DataDog/datadog-agent/pkg/config/model v0.53.0-rc.2 github.com/DataDog/datadog-agent/pkg/util/log v0.53.0-rc.2 github.com/stretchr/testify v1.9.0 - golang.org/x/net v0.19.0 + golang.org/x/net v0.24.0 ) require ( @@ -33,7 +33,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/util/http/go.sum b/pkg/util/http/go.sum index 73c8eb3b50b74..84ed6703d9daa 100644 --- a/pkg/util/http/go.sum +++ b/pkg/util/http/go.sum @@ -357,8 +357,8 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -418,8 +418,8 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/pkg/util/system/go.mod b/pkg/util/system/go.mod index 57946026caabe..4f6eab7316552 100644 --- a/pkg/util/system/go.mod +++ b/pkg/util/system/go.mod @@ -20,7 +20,7 @@ require ( github.com/shirou/gopsutil/v3 v3.23.12 github.com/stretchr/testify v1.9.0 go.uber.org/atomic v1.11.0 - golang.org/x/sys v0.16.0 + golang.org/x/sys v0.19.0 ) require ( diff --git a/pkg/util/system/go.sum b/pkg/util/system/go.sum index 736eb0f117493..f38e13d31eefe 100644 --- a/pkg/util/system/go.sum +++ b/pkg/util/system/go.sum @@ -45,8 +45,8 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/util/system/socket/go.mod b/pkg/util/system/socket/go.mod index d344e81d16e2f..d102d2b4a9a50 100644 --- a/pkg/util/system/socket/go.mod +++ b/pkg/util/system/socket/go.mod @@ -5,7 +5,8 @@ go 1.21.9 require github.com/Microsoft/go-winio v0.6.1 require ( - golang.org/x/mod v0.8.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/tools v0.6.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/tools v0.13.0 // indirect ) diff --git a/pkg/util/system/socket/go.sum b/pkg/util/system/socket/go.sum index fa01f4f8a17f5..91cabb909ab66 100644 --- a/pkg/util/system/socket/go.sum +++ b/pkg/util/system/socket/go.sum @@ -1,10 +1,10 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= diff --git a/pkg/util/uuid/go.mod b/pkg/util/uuid/go.mod index de1936c7b6765..2affa61657e4c 100644 --- a/pkg/util/uuid/go.mod +++ b/pkg/util/uuid/go.mod @@ -12,7 +12,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/cache v0.53.0-rc.2 github.com/DataDog/datadog-agent/pkg/util/log v0.53.0-rc.2 github.com/shirou/gopsutil/v3 v3.24.1 - golang.org/x/sys v0.16.0 + golang.org/x/sys v0.19.0 ) require ( diff --git a/pkg/util/uuid/go.sum b/pkg/util/uuid/go.sum index 9b83c417c1830..0ebc1013a5c7f 100644 --- a/pkg/util/uuid/go.sum +++ b/pkg/util/uuid/go.sum @@ -43,8 +43,9 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/util/winutil/go.mod b/pkg/util/winutil/go.mod index 01cf7cc96d088..4d89a54b37b48 100644 --- a/pkg/util/winutil/go.mod +++ b/pkg/util/winutil/go.mod @@ -13,7 +13,7 @@ require ( github.com/fsnotify/fsnotify v1.7.0 github.com/stretchr/testify v1.9.0 go.uber.org/atomic v1.11.0 - golang.org/x/sys v0.14.0 + golang.org/x/sys v0.19.0 ) require ( diff --git a/pkg/util/winutil/go.sum b/pkg/util/winutil/go.sum index 8aa2b601edd00..66f0d92f6d110 100644 --- a/pkg/util/winutil/go.sum +++ b/pkg/util/winutil/go.sum @@ -10,8 +10,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= diff --git a/test/e2e/containers/otlp_sender/go.mod b/test/e2e/containers/otlp_sender/go.mod index 660a7fb138369..658eca844fa52 100644 --- a/test/e2e/containers/otlp_sender/go.mod +++ b/test/e2e/containers/otlp_sender/go.mod @@ -103,8 +103,8 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect go.opentelemetry.io/proto/otlp v1.1.0 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/sys v0.18.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect diff --git a/test/e2e/containers/otlp_sender/go.sum b/test/e2e/containers/otlp_sender/go.sum index d327eaba2444f..38ecf57be8661 100644 --- a/test/e2e/containers/otlp_sender/go.sum +++ b/test/e2e/containers/otlp_sender/go.sum @@ -285,8 +285,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -302,8 +302,8 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= diff --git a/test/fakeintake/go.mod b/test/fakeintake/go.mod index c0ec41ba8a7d1..4ca925712cecd 100644 --- a/test/fakeintake/go.mod +++ b/test/fakeintake/go.mod @@ -45,6 +45,6 @@ require ( github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/spf13/pflag v1.0.5 // indirect golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/sys v0.19.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/test/fakeintake/go.sum b/test/fakeintake/go.sum index ef2c773d71c66..6949b6a094fd5 100644 --- a/test/fakeintake/go.sum +++ b/test/fakeintake/go.sum @@ -94,8 +94,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -110,8 +110,8 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 6b5a8d0f5e5e3..92e933588f69b 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -45,9 +45,9 @@ require ( github.com/samber/lo v1.39.0 github.com/sethvargo/go-retry v0.2.4 github.com/stretchr/testify v1.9.0 - golang.org/x/crypto v0.21.0 - golang.org/x/sys v0.18.0 - golang.org/x/term v0.18.0 + golang.org/x/crypto v0.22.0 + golang.org/x/sys v0.19.0 + golang.org/x/term v0.19.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/zorkian/go-datadog-api.v2 v2.30.0 k8s.io/api v0.28.4 @@ -209,12 +209,12 @@ require ( go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.22.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sync v0.6.0 // indirect + golang.org/x/sync v0.7.0 // indirect golang.org/x/text v0.14.0 - golang.org/x/time v0.3.0 // indirect + golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.19.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8 // indirect diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index 014a6532fcf5b..68ef27a905c42 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -473,8 +473,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -488,8 +488,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -507,8 +507,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= @@ -519,8 +519,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -546,8 +546,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -556,8 +556,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -569,8 +569,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 6b9438c0d21d9702794bed4c72cf3b03e190c0fb Mon Sep 17 00:00:00 2001 From: Raphael Gavache Date: Fri, 12 Apr 2024 16:43:14 +0200 Subject: [PATCH 20/99] Update updater_test.go (#24637) --- pkg/updater/updater_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/updater/updater_test.go b/pkg/updater/updater_test.go index f39273828eb86..8f663f6ce31db 100644 --- a/pkg/updater/updater_test.go +++ b/pkg/updater/updater_test.go @@ -199,7 +199,8 @@ func TestBootstrapWithRC(t *testing.T) { assertEqualFS(t, s.PackageFS(fixtureSimpleV2), r.StableFS()) } -func TestBootstrapCatalogUpdate(t *testing.T) { +// hacky name to avoid hitting https://github.com/golang/go/issues/62614 +func TestBootUpd(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() From 479eafbc161ffc28edcf8e192c097d527c7dd026 Mon Sep 17 00:00:00 2001 From: Hasan Mahmood <6599778+hmahmood@users.noreply.github.com> Date: Fri, 12 Apr 2024 09:49:36 -0500 Subject: [PATCH 21/99] Add retries for connection query (#24620) --- pkg/network/tracer/tracer_linux_test.go | 26 ++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/pkg/network/tracer/tracer_linux_test.go b/pkg/network/tracer/tracer_linux_test.go index be169d5ddcc7b..43965358754b1 100644 --- a/pkg/network/tracer/tracer_linux_test.go +++ b/pkg/network/tracer/tracer_linux_test.go @@ -2224,18 +2224,26 @@ LOOP: // get connections, the client connection will still // not be in the closed state, so duration will the // timestamp of when it was created - conns := getConnections(t, tr) - conn, found := findConnection(c.LocalAddr(), srv.ln.Addr(), conns) - require.True(t, found) + var conn *network.ConnectionStats + require.EventuallyWithT(t, func(collect *assert.CollectT) { + conns := getConnections(t, tr) + var found bool + conn, found = findConnection(c.LocalAddr(), srv.ln.Addr(), conns) + assert.True(collect, found, "could not find connection") + + }, 3*time.Second, 100*time.Millisecond, "could not find connection") // all we can do is verify it is > 0 assert.Greater(t, conn.Duration, time.Duration(0)) require.NoError(t, c.Close(), "error closing client connection") + require.EventuallyWithT(t, func(collect *assert.CollectT) { + var found bool + conn, found = findConnection(c.LocalAddr(), srv.ln.Addr(), getConnections(t, tr)) + assert.True(collect, found, "could not find closed connection") + }, 3*time.Second, 100*time.Millisecond, "could not find closed connection") + // after closing the client connection, the duration should be - // updated to a value between 1s and 1.1s - conn, found = findConnection(c.LocalAddr(), srv.ln.Addr(), getConnections(t, tr)) - require.True(t, found) - t.Log(conn.Duration) - assert.GreaterOrEqual(t, conn.Duration, time.Second, "connection duration should be between 1 and 1.1 seconds") - assert.Less(t, conn.Duration, 1100*time.Millisecond, "connection duration should be between 1 and 1.1 seconds") + // updated to a value between 1s and 2s + assert.Greater(t, conn.Duration, time.Second, "connection duration should be between 1 and 2 seconds") + assert.Less(t, conn.Duration, 2*time.Second, "connection duration should be between 1 and 2 seconds") } From 89a3834fc6a3537359ce1c78d784f7b36dc9875c Mon Sep 17 00:00:00 2001 From: Adel Haj Hassan <41540817+adel121@users.noreply.github.com> Date: Fri, 12 Apr 2024 16:49:42 +0200 Subject: [PATCH 22/99] Validate annotations before patching them and only patch if annotations are valid (#24619) --- pkg/clusteragent/languagedetection/patcher.go | 41 +++++++--- .../languagedetection/patcher_test.go | 78 ++++++++++++++++--- 2 files changed, 97 insertions(+), 22 deletions(-) diff --git a/pkg/clusteragent/languagedetection/patcher.go b/pkg/clusteragent/languagedetection/patcher.go index 778d69a385f8f..64f7258dfbcb7 100644 --- a/pkg/clusteragent/languagedetection/patcher.go +++ b/pkg/clusteragent/languagedetection/patcher.go @@ -11,12 +11,15 @@ package languagedetection import ( "context" "encoding/json" + "errors" "fmt" "strings" "sync" + "k8s.io/apimachinery/pkg/api/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/client-go/dynamic" "k8s.io/client-go/util/retry" @@ -207,11 +210,10 @@ func (lp *languagePatcher) handleDeploymentEvent(event workloadmeta.Event) { if len(deployment.InjectableLanguages) > 0 { // If some annotations still exist, remove them annotationsPatch := lp.generateAnnotationsPatch(deployment.InjectableLanguages, langUtil.ContainersLanguages{}) - lp.patchOwner(&owner, annotationsPatch) - return + err = lp.patchOwner(&owner, annotationsPatch) + } else { + Patches.Inc(owner.Kind, owner.Name, owner.Namespace, statusSkip) } - - Patches.Inc(owner.Kind, owner.Name, owner.Namespace, statusSkip) } else if event.Type == workloadmeta.EventTypeSet { detectedLanguages := deployment.DetectedLanguages injectableLanguages := deployment.InjectableLanguages @@ -219,21 +221,40 @@ func (lp *languagePatcher) handleDeploymentEvent(event workloadmeta.Event) { // Calculate annotations patch annotationsPatch := lp.generateAnnotationsPatch(injectableLanguages, detectedLanguages) if len(annotationsPatch) > 0 { - lp.patchOwner(&owner, annotationsPatch) + err = lp.patchOwner(&owner, annotationsPatch) } else { Patches.Inc(owner.Kind, owner.Name, owner.Namespace, statusSkip) } } + + if err != nil { + lp.logger.Errorf("failed to handle deployment event: %v", err) + } + } // patches the owner with the corresponding language annotations -func (lp *languagePatcher) patchOwner(namespacedOwnerRef *langUtil.NamespacedOwnerReference, annotationsPatch map[string]interface{}) { +func (lp *languagePatcher) patchOwner(namespacedOwnerRef *langUtil.NamespacedOwnerReference, annotationsPatch map[string]interface{}) error { + + setAnnotations := map[string]string{} + for k, v := range annotationsPatch { + if v != nil { + setAnnotations[k] = fmt.Sprintf("%v", v) + } + } + + errs := validation.ValidateAnnotations(setAnnotations, field.NewPath("annotations")) + + if len(errs) > 0 { + Patches.Inc(namespacedOwnerRef.Kind, namespacedOwnerRef.Name, namespacedOwnerRef.Namespace, statusError) + return errors.New(errs.ToAggregate().Error()) + } + ownerGVR, err := langUtil.GetGVR(namespacedOwnerRef) if err != nil { - lp.logger.Errorf("failed to update owner: %v", err) Patches.Inc(namespacedOwnerRef.Kind, namespacedOwnerRef.Name, namespacedOwnerRef.Namespace, statusError) - return + return err } retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { @@ -257,9 +278,9 @@ func (lp *languagePatcher) patchOwner(namespacedOwnerRef *langUtil.NamespacedOwn if retryErr != nil { Patches.Inc(namespacedOwnerRef.Kind, namespacedOwnerRef.Name, namespacedOwnerRef.Namespace, statusError) - lp.logger.Errorf("failed to update owner: %v", retryErr) - return + return retryErr } Patches.Inc(namespacedOwnerRef.Kind, namespacedOwnerRef.Name, namespacedOwnerRef.Namespace, statusSuccess) + return nil } diff --git a/pkg/clusteragent/languagedetection/patcher_test.go b/pkg/clusteragent/languagedetection/patcher_test.go index 846c318a5f5cd..c4d83917e84dd 100644 --- a/pkg/clusteragent/languagedetection/patcher_test.go +++ b/pkg/clusteragent/languagedetection/patcher_test.go @@ -10,12 +10,11 @@ package languagedetection import ( "context" "fmt" - "github.com/DataDog/datadog-agent/comp/core" - "github.com/DataDog/datadog-agent/comp/core/log" - "github.com/DataDog/datadog-agent/comp/core/log/logimpl" - "github.com/DataDog/datadog-agent/comp/core/workloadmeta" - langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" - "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "reflect" + "strings" + "testing" + "time" + "github.com/stretchr/testify/assert" "go.uber.org/fx" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,9 +23,13 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" dynamicfake "k8s.io/client-go/dynamic/fake" - "reflect" - "testing" - "time" + + "github.com/DataDog/datadog-agent/comp/core" + "github.com/DataDog/datadog-agent/comp/core/log" + "github.com/DataDog/datadog-agent/comp/core/log/logimpl" + "github.com/DataDog/datadog-agent/comp/core/workloadmeta" + langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) const ( @@ -463,6 +466,7 @@ func TestRun(t *testing.T) { defer lp.cancel() deploymentName := "test-deployment" + longContNameDeploymentName := "test-deployment-long-cont-name" ns := "test-namespace" // Create target deployment @@ -483,10 +487,27 @@ func TestRun(t *testing.T) { "spec": map[string]interface{}{}, }, } + + // Create long container name deployment + longContNameDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": longContNameDeploymentName, + "namespace": ns, + "annotations": map[string]interface{}{}, + }, + "spec": map[string]interface{}{}, + }, + } gvr := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} _, err := mockK8sClient.Resource(gvr).Namespace(ns).Create(context.TODO(), deploymentObject, metav1.CreateOptions{}) assert.NoError(t, err) + _, err = mockK8sClient.Resource(gvr).Namespace(ns).Create(context.TODO(), longContNameDeployment, metav1.CreateOptions{}) + assert.NoError(t, err) + //////////////////////////////// // // // Handling Set Event // @@ -506,7 +527,26 @@ func TestRun(t *testing.T) { }, }}) - mockDeploymentEvent := workloadmeta.Event{ + mockDeploymentEventToFail := workloadmeta.Event{ + Type: workloadmeta.EventTypeSet, + Entity: &workloadmeta.KubernetesDeployment{ + EntityID: workloadmeta.EntityID{ + Kind: workloadmeta.KindKubernetesDeployment, + ID: "test-namespace/" + longContNameDeploymentName, + }, + DetectedLanguages: map[langUtil.Container]langUtil.LanguageSet{ + *langUtil.NewContainer("some-cont"): {"java": {}, "python": {}}, + *langUtil.NewInitContainer("python-ruby-init"): {"ruby": {}, "python": {}}, + // The max allowed annotation key name length in kubernetes is 63 + // To test that validation works, we are using a container name of length 69 + *langUtil.NewInitContainer(strings.Repeat("x", 69)): {"ruby": {}, "python": {}}, + }, + }, + } + + mockStore.Push(workloadmeta.SourceLanguageDetectionServer, mockDeploymentEventToFail) + + mockDeploymentEventToSucceed := workloadmeta.Event{ Type: workloadmeta.EventTypeSet, Entity: &workloadmeta.KubernetesDeployment{ EntityID: workloadmeta.EntityID{ @@ -520,7 +560,7 @@ func TestRun(t *testing.T) { }, } - mockStore.Push(workloadmeta.SourceLanguageDetectionServer, mockDeploymentEvent) + mockStore.Push(workloadmeta.SourceLanguageDetectionServer, mockDeploymentEventToSucceed) expectedAnnotations := map[string]string{ "internal.dd.datadoghq.com/some-cont.detected_langs": "java,python", @@ -551,8 +591,22 @@ func TestRun(t *testing.T) { "deployment should be patched with the correct annotations", ) + // Check that the deployment with long container name was not patched + // This is correct since workloadmeta events are processed sequentially, which means that since the second event has been asserted first + // the first event has already been processed and its side-effect can be asserted instantly + assert.Truef(t, func() bool { + // Check the patch + got, err := lp.k8sClient.Resource(gvr).Namespace(ns).Get(context.TODO(), longContNameDeploymentName, metav1.GetOptions{}) + if err != nil { + return false + } + annotations := got.GetAnnotations() + + return len(annotations) == 0 + }(), "Deployment should not be patched with language annotations since one of the containers has a very long name") + // Simulate kubeapiserver collector (i.e. update injectable languages in wlm) - mockDeploymentEvent = workloadmeta.Event{ + mockDeploymentEvent := workloadmeta.Event{ Type: workloadmeta.EventTypeSet, Entity: &workloadmeta.KubernetesDeployment{ EntityID: workloadmeta.EntityID{ From 3215c0d5be802e3e123518fb67499e570797aefa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9lian=20Raimbault?= <161456554+CelianR@users.noreply.github.com> Date: Fri, 12 Apr 2024 17:37:31 +0200 Subject: [PATCH 23/99] Revert "[gitlab-use-module] Use gitlab python module instead of raw http requests (#24070)" (#24651) This reverts commit 0ba7f945968854166dd7bb0a6cd01246b476836e. This is causing timeouts in jobs that trigger child pipelines. --- .github/workflows/label-analysis.yml | 2 +- tasks/kernel_matrix_testing/ci.py | 55 +-- tasks/kmt.py | 2 +- tasks/libs/ciproviders/github_api.py | 3 + tasks/libs/ciproviders/gitlab.py | 545 +++++++++++++++++++++++++++ tasks/libs/ciproviders/gitlab_api.py | 243 ------------ tasks/libs/common/remote_api.py | 123 ++++++ tasks/libs/pipeline/data.py | 59 ++- tasks/libs/pipeline/notifications.py | 19 +- tasks/libs/pipeline/stats.py | 6 +- tasks/libs/pipeline/tools.py | 177 ++++----- tasks/libs/types/types.py | 18 +- tasks/linter.py | 19 +- tasks/notify.py | 2 +- tasks/pipeline.py | 218 +++++------ tasks/release.py | 11 +- tasks/unit-tests/gitlab_api_tests.py | 93 ++++- tasks/unit-tests/notify_tests.py | 175 ++++----- 18 files changed, 1117 insertions(+), 653 deletions(-) create mode 100644 tasks/libs/ciproviders/gitlab.py delete mode 100644 tasks/libs/ciproviders/gitlab_api.py create mode 100644 tasks/libs/common/remote_api.py diff --git a/.github/workflows/label-analysis.yml b/.github/workflows/label-analysis.yml index bbf262c9381bb..7d97b83595f71 100644 --- a/.github/workflows/label-analysis.yml +++ b/.github/workflows/label-analysis.yml @@ -21,7 +21,7 @@ jobs: - name: Checkout repository uses: actions/checkout@v4 - name: Install Python dependencies - run: pip install -r requirements.txt -r tasks/requirements.txt + run: pip install -r tasks/requirements.txt - name: Auto assign team label run: inv -e github.assign-team-label --pr-id='${{ github.event.pull_request.number }}' fetch-labels: diff --git a/tasks/kernel_matrix_testing/ci.py b/tasks/kernel_matrix_testing/ci.py index 364bf9d3c2845..8e03b74f5a293 100644 --- a/tasks/kernel_matrix_testing/ci.py +++ b/tasks/kernel_matrix_testing/ci.py @@ -6,11 +6,9 @@ import re import tarfile import xml.etree.ElementTree as ET -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union, overload +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union, overload -from gitlab.v4.objects import ProjectJob - -from tasks.libs.ciproviders.gitlab_api import get_gitlab_repo +from tasks.libs.ciproviders.gitlab import Gitlab, get_gitlab_token if TYPE_CHECKING: from typing_extensions import Literal @@ -18,27 +16,31 @@ from tasks.kernel_matrix_testing.types import Arch, Component, StackOutput, VMConfig +def get_gitlab() -> Gitlab: + return Gitlab("DataDog/datadog-agent", str(get_gitlab_token())) + + class KMTJob: """Abstract class representing a Kernel Matrix Testing job, with common properties and methods for all job types""" - def __init__(self, job: ProjectJob): - self.gitlab = get_gitlab_repo() - self.job = job + def __init__(self, job_data: Dict[str, Any]): + self.gitlab = get_gitlab() + self.job_data = job_data def __str__(self): return f"" @property def id(self) -> int: - return self.job.id + return self.job_data["id"] @property def pipeline_id(self) -> int: - return self.job.pipeline["id"] + return self.job_data["pipeline"]["id"] @property def name(self) -> str: - return self.job.name + return self.job_data.get("name", "") @property def arch(self) -> Arch: @@ -50,11 +52,11 @@ def component(self) -> Component: @property def status(self) -> str: - return self.job.status + return self.job_data['status'] @property def failure_reason(self) -> str: - return self.job.failure_reason + return self.job_data["failure_reason"] @overload def artifact_file(self, file: str, ignore_not_found: Literal[True]) -> Optional[str]: # noqa: U100 @@ -88,14 +90,16 @@ def artifact_file_binary(self, file: str, ignore_not_found: bool = False) -> Opt ignore_not_found: if True, return None if the file is not found, otherwise raise an error """ try: - res = self.gitlab.jobs.get(self.id, lazy=True).artifact(file) - - return res.content + res = self.gitlab.artifact(self.id, file, ignore_not_found=ignore_not_found) + if res is None: + if not ignore_not_found: + raise RuntimeError("Invalid return value from gitlab.artifact") + else: + return None + res.raise_for_status() except Exception as e: - if ignore_not_found: - return None - raise RuntimeError(f"Could not retrieve artifact {file}") from e + return res.content class KMTSetupEnvJob(KMTJob): @@ -103,8 +107,8 @@ class KMTSetupEnvJob(KMTJob): the job name and output artifacts """ - def __init__(self, job: ProjectJob): - super().__init__(job) + def __init__(self, job_data: Dict[str, Any]): + super().__init__(job_data) self.associated_test_jobs: List[KMTTestRunJob] = [] @property @@ -161,8 +165,8 @@ class KMTTestRunJob(KMTJob): the job name and output artifacts """ - def __init__(self, job: ProjectJob): - super().__init__(job) + def __init__(self, job_data: Dict[str, Any]): + super().__init__(job_data) self.setup_job: Optional[KMTSetupEnvJob] = None @property @@ -227,10 +231,9 @@ def get_all_jobs_for_pipeline(pipeline_id: Union[int, str]) -> Tuple[List[KMTSet setup_jobs: List[KMTSetupEnvJob] = [] test_jobs: List[KMTTestRunJob] = [] - gitlab = get_gitlab_repo() - jobs = gitlab.pipelines.get(pipeline_id, lazy=True).jobs.list(per_page=100, all=True) - for job in jobs: - name = job.name + gitlab = get_gitlab() + for job in gitlab.all_jobs(pipeline_id): + name = job.get("name", "") if name.startswith("kmt_setup_env"): setup_jobs.append(KMTSetupEnvJob(job)) elif name.startswith("kmt_run_"): diff --git a/tasks/kmt.py b/tasks/kmt.py index a3abd373ebdd1..d7614ccc73a36 100644 --- a/tasks/kmt.py +++ b/tasks/kmt.py @@ -984,7 +984,7 @@ def explain_ci_failure(_, pipeline: str): failreason = testfail # By default, we assume it's a test failure # Now check the artifacts, we'll guess why the job failed based on the size - for artifact in job.job.artifacts: + for artifact in job.job_data.get("artifacts", []): if artifact.get("filename") == "artifacts.zip": fsize = artifact.get("size", 0) if fsize < 1500: diff --git a/tasks/libs/ciproviders/github_api.py b/tasks/libs/ciproviders/github_api.py index b9a186287dc6d..1d0e12f760b08 100644 --- a/tasks/libs/ciproviders/github_api.py +++ b/tasks/libs/ciproviders/github_api.py @@ -1,6 +1,7 @@ import base64 import os import platform +import re import subprocess from typing import List @@ -14,6 +15,8 @@ __all__ = ["GithubAPI"] +errno_regex = re.compile(r".*\[Errno (\d+)\] (.*)") + class GithubAPI: """ diff --git a/tasks/libs/ciproviders/gitlab.py b/tasks/libs/ciproviders/gitlab.py new file mode 100644 index 0000000000000..6e79edca40939 --- /dev/null +++ b/tasks/libs/ciproviders/gitlab.py @@ -0,0 +1,545 @@ +import json +import os +import platform +import subprocess +from collections import UserList +from urllib.parse import quote + +import yaml +from invoke.exceptions import Exit + +from tasks.libs.common.remote_api import APIError, RemoteAPI + +__all__ = ["Gitlab"] + + +class Gitlab(RemoteAPI): + """ + Helper class to perform API calls against the Gitlab API, using a Gitlab PAT. + """ + + BASE_URL = "https://gitlab.ddbuild.io/api/v4" + + def __init__(self, project_name="DataDog/datadog-agent", api_token=""): + super(Gitlab, self).__init__("Gitlab") + self.api_token = api_token + self.project_name = project_name + self.authorization_error_message = ( + "HTTP 401: Your GITLAB_TOKEN may have expired. You can " + "check and refresh it at " + "https://gitlab.ddbuild.io/-/profile/personal_access_tokens" + ) + + def test_project_found(self): + """ + Checks if a project can be found. This is useful for testing access permissions to projects. + """ + result = self.project() + + # name is arbitrary, just need to check if something is in the result + if "name" in result: + return + + print(f"Cannot find GitLab project {self.project_name}") + print("If you cannot see it in the GitLab WebUI, you likely need permission.") + raise Exit(code=1) + + def project(self): + """ + Gets the project info. + """ + path = f"/projects/{quote(self.project_name, safe='')}" + return self.make_request(path, json_output=True) + + def create_pipeline(self, ref, variables=None): + """ + Create a pipeline targeting a given reference of a project. + ref must be a branch or a tag. + """ + if variables is None: + variables = {} + + path = f"/projects/{quote(self.project_name, safe='')}/pipeline" + headers = {"Content-Type": "application/json"} + data = json.dumps({"ref": ref, "variables": [{"key": k, "value": v} for (k, v) in variables.items()]}) + return self.make_request(path, headers=headers, data=data, json_output=True) + + def all_pipelines_for_ref(self, ref, sha=None): + """ + Gets all pipelines for a given reference (+ optionally git sha). + """ + page = 1 + + # Go through all pages + results = self.pipelines_for_ref(ref, sha=sha, page=page) + while results: + yield from results + page += 1 + results = self.pipelines_for_ref(ref, sha=sha, page=page) + + def pipelines_for_ref(self, ref, sha=None, page=1, per_page=100): + """ + Gets one page of pipelines for a given reference (+ optionally git sha). + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipelines?ref={quote(ref, safe='')}&per_page={per_page}&page={page}" + if sha: + path = f"{path}&sha={sha}" + return self.make_request(path, json_output=True) + + def last_pipeline_for_ref(self, ref, per_page=100): + """ + Gets the last pipeline for a given reference. + per_page cannot exceed 100. + """ + pipelines = self.pipelines_for_ref(ref, per_page=per_page) + + if len(pipelines) == 0: + return None + + return sorted(pipelines, key=lambda pipeline: pipeline['created_at'], reverse=True)[0] + + def last_pipelines(self): + """ + Get the last 100 pipelines + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipelines?per_page=100&page=1" + return self.make_request(path, json_output=True) + + def trigger_pipeline(self, data): + """ + Trigger a pipeline on a project using the trigger endpoint. + Requires a trigger token in the data object, in the 'token' field. + """ + path = f"/projects/{quote(self.project_name, safe='')}/trigger/pipeline" + + if 'token' not in data: + raise Exit("Missing 'token' field in data object to trigger child pipelines", 1) + + return self.make_request(path, data=data, json_input=True, json_output=True) + + def pipeline(self, pipeline_id): + """ + Gets info for a given pipeline. + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipelines/{pipeline_id}" + return self.make_request(path, json_output=True) + + def cancel_pipeline(self, pipeline_id): + """ + Cancels a given pipeline. + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipelines/{pipeline_id}/cancel" + return self.make_request(path, json_output=True, method="POST") + + def cancel_job(self, job_id): + """ + Cancels a given job + """ + path = f"/projects/{quote(self.project_name, safe='')}/jobs/{job_id}/cancel" + return self.make_request(path, json_output=True, method="POST") + + def commit(self, commit_sha): + """ + Gets info for a given commit sha. + """ + path = f"/projects/{quote(self.project_name, safe='')}/repository/commits/{commit_sha}" + return self.make_request(path, json_output=True) + + def artifact(self, job_id, artifact_name, ignore_not_found=False): + path = f"/projects/{quote(self.project_name, safe='')}/jobs/{job_id}/artifacts/{artifact_name}" + try: + response = self.make_request(path, stream_output=True) + return response + except APIError as e: + if e.status_code == 404 and ignore_not_found: + return None + raise e + + def all_jobs(self, pipeline_id): + """ + Gets all the jobs for a pipeline. + """ + page = 1 + + # Go through all pages + results = self.jobs(pipeline_id, page) + while results: + yield from results + page += 1 + results = self.jobs(pipeline_id, page) + + def jobs(self, pipeline_id, page=1, per_page=100): + """ + Gets one page of the jobs for a pipeline. + per_page cannot exceed 100. + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipelines/{pipeline_id}/jobs?per_page={per_page}&page={page}" + return self.make_request(path, json_output=True) + + def job_log(self, job_id): + """ + Gets the log file for a given job. + """ + + path = f"/projects/{quote(self.project_name, safe='')}/jobs/{job_id}/trace" + return self.make_request(path) + + def all_pipeline_schedules(self): + """ + Gets all pipelines schedules for the given project. + """ + page = 1 + + # Go through all pages + results = self.pipeline_schedules(page) + while results: + yield from results + page += 1 + results = self.pipeline_schedules(page) + + def pipeline_schedules(self, page=1, per_page=100): + """ + Gets one page of the pipeline schedules for the given project. + per_page cannot exceed 100 + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules?per_page={per_page}&page={page}" + return self.make_request(path, json_output=True) + + def pipeline_schedule(self, schedule_id): + """ + Gets a single pipeline schedule. + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules/{schedule_id}" + return self.make_request(path, json_output=True) + + def create_pipeline_schedule(self, description, ref, cron, cron_timezone=None, active=None): + """ + Create a new pipeline schedule with given attributes. + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules" + data = { + "description": description, + "ref": ref, + "cron": cron, + "cron_timezone": cron_timezone, + "active": active, + } + no_none_data = {k: v for k, v in data.items() if v is not None} + return self.make_request(path, data=no_none_data, json_output=True, json_input=True) + + def edit_pipeline_schedule( + self, schedule_id, description=None, ref=None, cron=None, cron_timezone=None, active=None + ): + """ + Edit an existing pipeline schedule with given attributes. + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules/{schedule_id}" + data = { + "description": description, + "ref": ref, + "cron": cron, + "cron_timezone": cron_timezone, + "active": active, + } + no_none_data = {k: v for k, v in data.items() if v is not None} + return self.make_request(path, json_input=True, json_output=True, data=no_none_data, method="PUT") + + def delete_pipeline_schedule(self, schedule_id): + """ + Delete an existing pipeline schedule. + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules/{schedule_id}" + # Gitlab API docs claim that this returns the JSON representation of the deleted schedule, + # but it actually returns an empty string + result = self.make_request(path, json_output=False, method="DELETE") + return f"Pipeline schedule deleted; result: {result if result else '(empty)'}" + + def create_pipeline_schedule_variable(self, schedule_id, key, value): + """ + Create a variable for an existing pipeline schedule. + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules/{schedule_id}/variables" + data = { + "key": key, + "value": value, + } + return self.make_request(path, data=data, json_output=True, json_input=True) + + def edit_pipeline_schedule_variable(self, schedule_id, key, value): + """ + Edit an existing variable for a pipeline schedule. + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules/{schedule_id}/variables/{key}" + return self.make_request(path, json_input=True, data={"value": value}, json_output=True, method="PUT") + + def delete_pipeline_schedule_variable(self, schedule_id, key): + """ + Delete an existing variable for a pipeline schedule. + """ + path = f"/projects/{quote(self.project_name, safe='')}/pipeline_schedules/{schedule_id}/variables/{key}" + return self.make_request(path, json_output=True, method="DELETE") + + def find_tag(self, tag_name): + """ + Look up a tag by its name. + """ + path = f"/projects/{quote(self.project_name, safe='')}/repository/tags/{tag_name}" + try: + response = self.make_request(path, json_output=True) + return response + except APIError as e: + # If Gitlab API returns a "404 not found" error we return an empty dict + if e.status_code == 404: + print( + f"Couldn't find the {tag_name} tag: Gitlab returned a 404 Not Found instead of a 200 empty response." + ) + return dict() + else: + raise e + + def lint(self, configuration): + """ + Lint a gitlab-ci configuration. + """ + path = f"/projects/{quote(self.project_name, safe='')}/ci/lint?dry_run=true&include_jobs=true" + headers = {"Content-Type": "application/json"} + data = {"content": configuration} + return self.make_request(path, headers=headers, data=data, json_input=True, json_output=True) + + def make_request( + self, path, headers=None, data=None, json_input=False, json_output=False, stream_output=False, method=None + ): + """ + Utility to make a request to the Gitlab API. + See RemoteAPI#request. + + Adds "PRIVATE-TOKEN: {self.api_token}" to the headers to be able to authenticate ourselves to GitLab. + """ + headers = dict(headers or []) + headers["PRIVATE-TOKEN"] = self.api_token + + return self.request( + path=path, + headers=headers, + data=data, + json_input=json_input, + json_output=json_output, + stream_output=stream_output, + raw_output=False, + method=method, + ) + + +def get_gitlab_token(): + if "GITLAB_TOKEN" not in os.environ: + print("GITLAB_TOKEN not found in env. Trying keychain...") + if platform.system() == "Darwin": + try: + output = subprocess.check_output( + ['security', 'find-generic-password', '-a', os.environ["USER"], '-s', 'GITLAB_TOKEN', '-w'] + ) + if len(output) > 0: + return output.strip() + except subprocess.CalledProcessError: + print("GITLAB_TOKEN not found in keychain...") + pass + print( + "Please create an 'api' access token at " + "https://gitlab.ddbuild.io/-/profile/personal_access_tokens and " + "add it as GITLAB_TOKEN in your keychain " + "or export it from your .bashrc or equivalent." + ) + raise Exit(code=1) + return os.environ["GITLAB_TOKEN"] + + +def get_gitlab_bot_token(): + if "GITLAB_BOT_TOKEN" not in os.environ: + print("GITLAB_BOT_TOKEN not found in env. Trying keychain...") + if platform.system() == "Darwin": + try: + output = subprocess.check_output( + ['security', 'find-generic-password', '-a', os.environ["USER"], '-s', 'GITLAB_BOT_TOKEN', '-w'] + ) + if output: + return output.strip() + except subprocess.CalledProcessError: + print("GITLAB_BOT_TOKEN not found in keychain...") + pass + print( + "Please make sure that the GITLAB_BOT_TOKEN is set or that " "the GITLAB_BOT_TOKEN keychain entry is set." + ) + raise Exit(code=1) + return os.environ["GITLAB_BOT_TOKEN"] + + +class ReferenceTag(yaml.YAMLObject): + """ + Custom yaml tag to handle references in gitlab-ci configuration + """ + + yaml_tag = u'!reference' + + def __init__(self, references): + self.references = references + + @classmethod + def from_yaml(cls, loader, node): + return UserList(loader.construct_sequence(node)) + + @classmethod + def to_yaml(cls, dumper, data): + return dumper.represent_sequence(cls.yaml_tag, data.data, flow_style=True) + + +def generate_gitlab_full_configuration(input_file, context=None, compare_to=None): + """ + Generate a full gitlab-ci configuration by resolving all includes + """ + # Update loader/dumper to handle !reference tag + yaml.SafeLoader.add_constructor(ReferenceTag.yaml_tag, ReferenceTag.from_yaml) + yaml.SafeDumper.add_representer(UserList, ReferenceTag.to_yaml) + + yaml_contents = [] + read_includes(input_file, yaml_contents) + full_configuration = {} + for yaml_file in yaml_contents: + full_configuration.update(yaml_file) + # Override some variables with a dedicated context + if context: + full_configuration["variables"].update(context) + if compare_to: + for value in full_configuration.values(): + if ( + isinstance(value, dict) + and "changes" in value + and isinstance(value["changes"], dict) + and "compare_to" in value["changes"] + ): + value["changes"]["compare_to"] = compare_to + elif isinstance(value, list): + for v in value: + if ( + isinstance(v, dict) + and "changes" in v + and isinstance(v["changes"], dict) + and "compare_to" in v["changes"] + ): + v["changes"]["compare_to"] = compare_to + return yaml.safe_dump(full_configuration) + + +def read_includes(yaml_file, includes): + """ + Recursive method to read all includes from yaml files and store them in a list + """ + current_file = read_content(yaml_file) + if 'include' not in current_file: + includes.append(current_file) + else: + for include in current_file['include']: + read_includes(include, includes) + del current_file['include'] + includes.append(current_file) + + +def read_content(file_path): + """ + Read the content of a file, either from a local file or from an http endpoint + """ + content = None + if file_path.startswith('http'): + import requests + + response = requests.get(file_path) + response.raise_for_status() + content = response.text + else: + with open(file_path) as f: + content = f.read() + return yaml.safe_load(content) + + +def get_preset_contexts(required_tests): + possible_tests = ["all", "main", "release", "mq"] + required_tests = required_tests.casefold().split(",") + if set(required_tests) | set(possible_tests) != set(possible_tests): + raise Exit(f"Invalid test required: {required_tests} must contain only values from {possible_tests}", 1) + main_contexts = [ + ("BUCKET_BRANCH", ["nightly"]), # ["dev", "nightly", "beta", "stable", "oldnightly"] + ("CI_COMMIT_BRANCH", ["main"]), # ["main", "mq-working-branch-main", "7.42.x", "any/name"] + ("CI_COMMIT_TAG", [""]), # ["", "1.2.3-rc.4", "6.6.6"] + ("CI_PIPELINE_SOURCE", ["pipeline"]), # ["trigger", "pipeline", "schedule"] + ("DEPLOY_AGENT", ["true"]), + ("RUN_ALL_BUILDS", ["true"]), + ("RUN_E2E_TESTS", ["auto"]), + ("RUN_KMT_TESTS", ["on"]), + ("RUN_UNIT_TESTS", ["on"]), + ("TESTING_CLEANUP", ["true"]), + ] + release_contexts = [ + ("BUCKET_BRANCH", ["stable"]), + ("CI_COMMIT_BRANCH", ["7.42.x"]), + ("CI_COMMIT_TAG", ["3.2.1", "1.2.3-rc.4"]), + ("CI_PIPELINE_SOURCE", ["schedule"]), + ("DEPLOY_AGENT", ["true"]), + ("RUN_ALL_BUILDS", ["true"]), + ("RUN_E2E_TESTS", ["auto"]), + ("RUN_KMT_TESTS", ["on"]), + ("RUN_UNIT_TESTS", ["on"]), + ("TESTING_CLEANUP", ["true"]), + ] + mq_contexts = [ + ("BUCKET_BRANCH", ["dev"]), + ("CI_COMMIT_BRANCH", ["mq-working-branch-main"]), + ("CI_PIPELINE_SOURCE", ["pipeline"]), + ("DEPLOY_AGENT", ["false"]), + ("RUN_ALL_BUILDS", ["false"]), + ("RUN_E2E_TESTS", ["auto"]), + ("RUN_KMT_TESTS", ["off"]), + ("RUN_UNIT_TESTS", ["off"]), + ("TESTING_CLEANUP", ["false"]), + ] + all_contexts = [] + for test in required_tests: + if test in ["all", "main"]: + generate_contexts(main_contexts, [], all_contexts) + if test in ["all", "release"]: + generate_contexts(release_contexts, [], all_contexts) + if test in ["all", "mq"]: + generate_contexts(mq_contexts, [], all_contexts) + return all_contexts + + +def generate_contexts(contexts, context, all_contexts): + """ + Recursive method to generate all possible contexts from a list of tuples + """ + if len(contexts) == 0: + all_contexts.append(context[:]) + return + for value in contexts[0][1]: + context.append((contexts[0][0], value)) + generate_contexts(contexts[1:], context, all_contexts) + context.pop() + + +def load_context(context): + """ + Load a context either from a yaml file or from a json string + """ + if os.path.exists(context): + with open(context) as f: + y = yaml.safe_load(f) + if "variables" not in y: + raise Exit( + f"Invalid context file: {context}, missing 'variables' key. Input file must be similar to tasks/unit-tests/testdata/gitlab_main_context_template.yml", + 1, + ) + return [[(k, v) for k, v in y["variables"].items()]] + else: + try: + j = json.loads(context) + return [[(k, v) for k, v in j.items()]] + except json.JSONDecodeError: + raise Exit(f"Invalid context: {context}, must be a valid json, or a path to a yaml file", 1) diff --git a/tasks/libs/ciproviders/gitlab_api.py b/tasks/libs/ciproviders/gitlab_api.py deleted file mode 100644 index 74136486a6cf3..0000000000000 --- a/tasks/libs/ciproviders/gitlab_api.py +++ /dev/null @@ -1,243 +0,0 @@ -import json -import os -import platform -import subprocess -from collections import UserList - -import gitlab -import yaml -from gitlab.v4.objects import Project -from invoke.exceptions import Exit - -BASE_URL = "https://gitlab.ddbuild.io" - - -def get_gitlab_token(): - if "GITLAB_TOKEN" not in os.environ: - print("GITLAB_TOKEN not found in env. Trying keychain...") - if platform.system() == "Darwin": - try: - output = subprocess.check_output( - ['security', 'find-generic-password', '-a', os.environ["USER"], '-s', 'GITLAB_TOKEN', '-w'] - ) - if len(output) > 0: - return output.strip() - except subprocess.CalledProcessError: - print("GITLAB_TOKEN not found in keychain...") - pass - print( - "Please create an 'api' access token at " - "https://gitlab.ddbuild.io/-/profile/personal_access_tokens and " - "add it as GITLAB_TOKEN in your keychain " - "or export it from your .bashrc or equivalent." - ) - raise Exit(code=1) - return os.environ["GITLAB_TOKEN"] - - -def get_gitlab_bot_token(): - if "GITLAB_BOT_TOKEN" not in os.environ: - print("GITLAB_BOT_TOKEN not found in env. Trying keychain...") - if platform.system() == "Darwin": - try: - output = subprocess.check_output( - ['security', 'find-generic-password', '-a', os.environ["USER"], '-s', 'GITLAB_BOT_TOKEN', '-w'] - ) - if output: - return output.strip() - except subprocess.CalledProcessError: - print("GITLAB_BOT_TOKEN not found in keychain...") - pass - print( - "Please make sure that the GITLAB_BOT_TOKEN is set or that " "the GITLAB_BOT_TOKEN keychain entry is set." - ) - raise Exit(code=1) - return os.environ["GITLAB_BOT_TOKEN"] - - -def get_gitlab_api(token=None) -> gitlab.Gitlab: - """ - Returns the gitlab api object with the api token. - The token is the one of get_gitlab_token() by default. - """ - token = token or get_gitlab_token() - - return gitlab.Gitlab(BASE_URL, private_token=token) - - -def get_gitlab_repo(repo='DataDog/datadog-agent', token=None) -> Project: - api = get_gitlab_api(token) - repo = api.projects.get(repo) - - return repo - - -class ReferenceTag(yaml.YAMLObject): - """ - Custom yaml tag to handle references in gitlab-ci configuration - """ - - yaml_tag = u'!reference' - - def __init__(self, references): - self.references = references - - @classmethod - def from_yaml(cls, loader, node): - return UserList(loader.construct_sequence(node)) - - @classmethod - def to_yaml(cls, dumper, data): - return dumper.represent_sequence(cls.yaml_tag, data.data, flow_style=True) - - -def generate_gitlab_full_configuration(input_file, context=None, compare_to=None): - """ - Generate a full gitlab-ci configuration by resolving all includes - """ - # Update loader/dumper to handle !reference tag - yaml.SafeLoader.add_constructor(ReferenceTag.yaml_tag, ReferenceTag.from_yaml) - yaml.SafeDumper.add_representer(UserList, ReferenceTag.to_yaml) - yaml_contents = [] - read_includes(input_file, yaml_contents) - full_configuration = {} - for yaml_file in yaml_contents: - full_configuration.update(yaml_file) - # Override some variables with a dedicated context - if context: - full_configuration["variables"].update(context) - if compare_to: - for value in full_configuration.values(): - if ( - isinstance(value, dict) - and "changes" in value - and isinstance(value["changes"], dict) - and "compare_to" in value["changes"] - ): - value["changes"]["compare_to"] = compare_to - elif isinstance(value, list): - for v in value: - if ( - isinstance(v, dict) - and "changes" in v - and isinstance(v["changes"], dict) - and "compare_to" in v["changes"] - ): - v["changes"]["compare_to"] = compare_to - return yaml.safe_dump(full_configuration) - - -def read_includes(yaml_file, includes): - """ - Recursive method to read all includes from yaml files and store them in a list - """ - current_file = read_content(yaml_file) - if 'include' not in current_file: - includes.append(current_file) - else: - for include in current_file['include']: - read_includes(include, includes) - del current_file['include'] - includes.append(current_file) - - -def read_content(file_path): - """ - Read the content of a file, either from a local file or from an http endpoint - """ - content = None - if file_path.startswith('http'): - import requests - - response = requests.get(file_path) - response.raise_for_status() - content = response.text - else: - with open(file_path) as f: - content = f.read() - return yaml.safe_load(content) - - -def get_preset_contexts(required_tests): - possible_tests = ["all", "main", "release", "mq"] - required_tests = required_tests.casefold().split(",") - if set(required_tests) | set(possible_tests) != set(possible_tests): - raise Exit(f"Invalid test required: {required_tests} must contain only values from {possible_tests}", 1) - main_contexts = [ - ("BUCKET_BRANCH", ["nightly"]), # ["dev", "nightly", "beta", "stable", "oldnightly"] - ("CI_COMMIT_BRANCH", ["main"]), # ["main", "mq-working-branch-main", "7.42.x", "any/name"] - ("CI_COMMIT_TAG", [""]), # ["", "1.2.3-rc.4", "6.6.6"] - ("CI_PIPELINE_SOURCE", ["pipeline"]), # ["trigger", "pipeline", "schedule"] - ("DEPLOY_AGENT", ["true"]), - ("RUN_ALL_BUILDS", ["true"]), - ("RUN_E2E_TESTS", ["auto"]), - ("RUN_KMT_TESTS", ["on"]), - ("RUN_UNIT_TESTS", ["on"]), - ("TESTING_CLEANUP", ["true"]), - ] - release_contexts = [ - ("BUCKET_BRANCH", ["stable"]), - ("CI_COMMIT_BRANCH", ["7.42.x"]), - ("CI_COMMIT_TAG", ["3.2.1", "1.2.3-rc.4"]), - ("CI_PIPELINE_SOURCE", ["schedule"]), - ("DEPLOY_AGENT", ["true"]), - ("RUN_ALL_BUILDS", ["true"]), - ("RUN_E2E_TESTS", ["auto"]), - ("RUN_KMT_TESTS", ["on"]), - ("RUN_UNIT_TESTS", ["on"]), - ("TESTING_CLEANUP", ["true"]), - ] - mq_contexts = [ - ("BUCKET_BRANCH", ["dev"]), - ("CI_COMMIT_BRANCH", ["mq-working-branch-main"]), - ("CI_PIPELINE_SOURCE", ["pipeline"]), - ("DEPLOY_AGENT", ["false"]), - ("RUN_ALL_BUILDS", ["false"]), - ("RUN_E2E_TESTS", ["auto"]), - ("RUN_KMT_TESTS", ["off"]), - ("RUN_UNIT_TESTS", ["off"]), - ("TESTING_CLEANUP", ["false"]), - ] - all_contexts = [] - for test in required_tests: - if test in ["all", "main"]: - generate_contexts(main_contexts, [], all_contexts) - if test in ["all", "release"]: - generate_contexts(release_contexts, [], all_contexts) - if test in ["all", "mq"]: - generate_contexts(mq_contexts, [], all_contexts) - return all_contexts - - -def generate_contexts(contexts, context, all_contexts): - """ - Recursive method to generate all possible contexts from a list of tuples - """ - if len(contexts) == 0: - all_contexts.append(context[:]) - return - for value in contexts[0][1]: - context.append((contexts[0][0], value)) - generate_contexts(contexts[1:], context, all_contexts) - context.pop() - - -def load_context(context): - """ - Load a context either from a yaml file or from a json string - """ - if os.path.exists(context): - with open(context) as f: - y = yaml.safe_load(f) - if "variables" not in y: - raise Exit( - f"Invalid context file: {context}, missing 'variables' key. Input file must be similar to tasks/unit-tests/testdata/gitlab_main_context_template.yml", - 1, - ) - return [[(k, v) for k, v in y["variables"].items()]] - else: - try: - j = json.loads(context) - return [[(k, v) for k, v in j.items()]] - except json.JSONDecodeError: - raise Exit(f"Invalid context: {context}, must be a valid json, or a path to a yaml file", 1) diff --git a/tasks/libs/common/remote_api.py b/tasks/libs/common/remote_api.py new file mode 100644 index 0000000000000..20f4008abed1f --- /dev/null +++ b/tasks/libs/common/remote_api.py @@ -0,0 +1,123 @@ +import errno +import re +import time + +from invoke.exceptions import Exit + +errno_regex = re.compile(r".*\[Errno (\d+)\] (.*)") + + +class APIError(Exception): + def __init__(self, request, api_name): + super(APIError, self).__init__(f"{api_name} says: {request.content}") + self.status_code = request.status_code + self.request = request + + +class RemoteAPI(object): + """ + Helper class to perform calls against a given remote API. + """ + + BASE_URL = "" + + def __init__(self, api_name, sleep_time=1, retry_count=5): + self.api_name = api_name + self.authorization_error_message = "HTTP 401 Unauthorized" + self.requests_sleep_time = sleep_time + self.requests_500_retry_count = retry_count + + def request( + self, + path, + headers=None, + data=None, + json_input=False, + json_output=False, + stream_output=False, + raw_output=False, + method=None, + ): + """ + Utility to make a request to a remote API. + + headers: A hash of headers to pass to the request. + data: An object containing the body of the request. + json_input: If set to true, data is passed with the json parameter of requests.post instead of the data parameter. + + By default, the request method is GET, or POST if data is not empty. + method: Can be set to GET, POST, PUT or DELETE to force the REST method used. + + By default, we return the text field of the response object. The following fields can alter this behavior: + json_output: the json field of the response object is returned. + stream_output: the request asks for a stream response, and the raw response object is returned. + raw_output: the content field of the resposne object is returned. + """ + import requests + + url = self.BASE_URL + path + + # TODO: Use the param argument of requests instead of handling URL params + # manually + try: + # If json_input is true, we specifically want to send data using the json + # parameter of requests.post / requests.put + for retry_count in range(self.requests_500_retry_count): + if method == "PUT": + if json_input: + r = requests.put(url, headers=headers, json=data, stream=stream_output) + else: + r = requests.put(url, headers=headers, data=data, stream=stream_output) + elif method == "DELETE": + r = requests.delete(url, headers=headers, stream=stream_output) + elif data or method == "POST": + if json_input: + r = requests.post(url, headers=headers, json=data, stream=stream_output) + else: + r = requests.post(url, headers=headers, data=data, stream=stream_output) + else: + r = requests.get(url, headers=headers, stream=stream_output) + if r.status_code >= 400: + if r.status_code == 401: + print(self.authorization_error_message) + elif 500 <= r.status_code < 600: + sleep_time = self.requests_sleep_time + retry_count * self.requests_sleep_time + if sleep_time > 0: + print( + f"Request failed with error {r.status_code}, retrying in {sleep_time} seconds (retry {retry_count}/{self.requests_500_retry_count}" + ) + time.sleep(sleep_time) + continue + raise APIError(r, self.api_name) + else: + break + except requests.exceptions.Timeout: + print(f"Connection to {self.api_name} ({url}) timed out.") + raise Exit(code=1) + except requests.exceptions.RequestException as e: + m = errno_regex.match(str(e)) + if not m: + print(f"Unknown error raised connecting to {self.api_name} ({url}): {e}") + raise e + + # Parse errno to give a better explanation + # Requests doesn't have granularity at the level we want: + # http://docs.python-requests.org/en/master/_modules/requests/exceptions/ + errno_code = int(m.group(1)) + message = m.group(2) + + if errno_code == errno.ENOEXEC: + exit_msg = f"Error resolving {url}: {message}" + elif errno_code == errno.ECONNREFUSED: + exit_msg = f"Connection to {self.api_name} ({url}) refused" + else: + exit_msg = f"Error while connecting to {url}: {str(e)}" + raise Exit(message=exit_msg, code=1) + + if json_output: + return r.json() + if raw_output: + return r.content + if stream_output: + return r + return r.text diff --git a/tasks/libs/pipeline/data.py b/tasks/libs/pipeline/data.py index acaf9ccdff05b..4e5b5fa1c9fe5 100644 --- a/tasks/libs/pipeline/data.py +++ b/tasks/libs/pipeline/data.py @@ -1,9 +1,6 @@ import re -from collections import defaultdict -from gitlab.v4.objects import ProjectJob - -from tasks.libs.ciproviders.gitlab_api import get_gitlab_repo +from tasks.libs.ciproviders.gitlab import Gitlab, get_gitlab_token from tasks.libs.types.types import FailedJobReason, FailedJobs, FailedJobType @@ -11,47 +8,47 @@ def get_failed_jobs(project_name: str, pipeline_id: str) -> FailedJobs: """ Retrieves the list of failed jobs for a given pipeline id in a given project. """ - repo = get_gitlab_repo(project_name) - pipeline = repo.pipelines.get(pipeline_id) - jobs = pipeline.jobs.list(per_page=100, all=True) - # Get instances of failed jobs grouped by name - failed_jobs = defaultdict(list) + gitlab = Gitlab(project_name=project_name, api_token=get_gitlab_token()) + + # gitlab.all_jobs yields a generator, it needs to be converted to a list to be able to + # go through it twice + jobs = list(gitlab.all_jobs(pipeline_id)) + + # Get instances of failed jobs + failed_jobs = {job["name"]: [] for job in jobs if job["status"] == "failed"} + + # Group jobs per name for job in jobs: - if job.status == "failed": - failed_jobs[job.name].append(job) + if job["name"] in failed_jobs: + failed_jobs[job["name"]].append(job) # There, we now have the following map: # job name -> list of jobs with that name, including at least one failed job processed_failed_jobs = FailedJobs() for job_name, jobs in failed_jobs.items(): # We sort each list per creation date - jobs.sort(key=lambda x: x.created_at) + jobs.sort(key=lambda x: x["created_at"]) # We truncate the job name to increase readability job_name = truncate_job_name(job_name) - job = jobs[-1] # Check the final job in the list: it contains the current status of the job # This excludes jobs that were retried and succeeded - trace = str(repo.jobs.get(job.id, lazy=True).trace(), 'utf-8') - failure_type, failure_reason = get_job_failure_context(trace) - final_status = ProjectJob( - repo.manager, - attrs={ - "name": job_name, - "id": job.id, - "stage": job.stage, - "status": job.status, - "tag_list": job.tag_list, - "allow_failure": job.allow_failure, - "web_url": job.web_url, - "retry_summary": [ijob.status for ijob in jobs], - "failure_type": failure_type, - "failure_reason": failure_reason, - }, - ) + failure_type, failure_reason = get_job_failure_context(gitlab.job_log(jobs[-1]["id"])) + final_status = { + "name": job_name, + "id": jobs[-1]["id"], + "stage": jobs[-1]["stage"], + "status": jobs[-1]["status"], + "tag_list": jobs[-1]["tag_list"], + "allow_failure": jobs[-1]["allow_failure"], + "url": jobs[-1]["web_url"], + "retry_summary": [job["status"] for job in jobs], + "failure_type": failure_type, + "failure_reason": failure_reason, + } # Also exclude jobs allowed to fail - if final_status.status == "failed" and should_report_job(job_name, final_status.allow_failure): + if final_status["status"] == "failed" and should_report_job(job_name, final_status["allow_failure"]): processed_failed_jobs.add_failed_job(final_status) return processed_failed_jobs diff --git a/tasks/libs/pipeline/notifications.py b/tasks/libs/pipeline/notifications.py index c35282f1cea94..960eb5a283234 100644 --- a/tasks/libs/pipeline/notifications.py +++ b/tasks/libs/pipeline/notifications.py @@ -6,12 +6,10 @@ from collections import defaultdict from typing import Dict -import gitlab import yaml -from gitlab.v4.objects import ProjectJob from invoke.context import Context -from tasks.libs.ciproviders.gitlab_api import get_gitlab_repo +from tasks.libs.ciproviders.gitlab import Gitlab, get_gitlab_token from tasks.libs.owners.parsing import read_owners from tasks.libs.types.types import FailedJobReason, FailedJobs, Test @@ -53,16 +51,13 @@ def check_for_missing_owners_slack_and_jira(print_missing_teams=True, owners_fil return error -def get_failed_tests(project_name, job: ProjectJob, owners_file=".github/CODEOWNERS"): - repo = get_gitlab_repo(project_name) +def get_failed_tests(project_name, job, owners_file=".github/CODEOWNERS"): + gitlab = Gitlab(project_name=project_name, api_token=get_gitlab_token()) owners = read_owners(owners_file) - try: - test_output = str(repo.jobs.get(job.id, lazy=True).artifact('test_output.json'), 'utf-8') - except gitlab.exceptions.GitlabGetError: - test_output = '' + test_output = gitlab.artifact(job["id"], "test_output.json", ignore_not_found=True) failed_tests = {} # type: dict[tuple[str, str], Test] if test_output: - for line in test_output.splitlines(): + for line in test_output.iter_lines(): json_test = json.loads(line) if 'Test' in json_test: name = json_test['Test'] @@ -91,11 +86,11 @@ def find_job_owners(failed_jobs: FailedJobs, owners_file: str = ".gitlab/JOBOWNE # For e2e test infrastructure errors, notify the agent-e2e-testing team for job in failed_jobs.mandatory_infra_job_failures: - if job.failure_type == FailedJobReason.E2E_INFRA_FAILURE: + if job["failure_type"] == FailedJobReason.E2E_INFRA_FAILURE: owners_to_notify["@datadog/agent-e2e-testing"].add_failed_job(job) for job in failed_jobs.all_non_infra_failures(): - job_owners = owners.of(job.name) + job_owners = owners.of(job["name"]) # job_owners is a list of tuples containing the type of owner (eg. USERNAME, TEAM) and the name of the owner # eg. [('TEAM', '@DataDog/agent-ci-experience')] diff --git a/tasks/libs/pipeline/stats.py b/tasks/libs/pipeline/stats.py index 8bc9e1b0f9113..46a862bfbb94e 100644 --- a/tasks/libs/pipeline/stats.py +++ b/tasks/libs/pipeline/stats.py @@ -31,10 +31,10 @@ def get_failed_jobs_stats(project_name, pipeline_id): global_failure_reason = FailedJobType.INFRA_FAILURE.name for job in failed_jobs.all_mandatory_failures(): - failure_type = job.failure_type - failure_reason = job.failure_reason + failure_type = job["failure_type"] + failure_reason = job["failure_reason"] - key = tuple(sorted(job.tag_list + [f"type:{failure_type.name}", f"reason:{failure_reason.name}"])) + key = tuple(sorted(job["tag_list"] + [f"type:{failure_type.name}", f"reason:{failure_reason.name}"])) job_failure_stats[key] += 1 return global_failure_reason, job_failure_stats diff --git a/tasks/libs/pipeline/tools.py b/tasks/libs/pipeline/tools.py index 513abfa14b85c..d026d61b5f6a6 100644 --- a/tasks/libs/pipeline/tools.py +++ b/tasks/libs/pipeline/tools.py @@ -3,10 +3,6 @@ import platform import sys from time import sleep, time -from typing import List - -from gitlab import GitlabError -from gitlab.v4.objects import Project, ProjectJob, ProjectPipeline from tasks.libs.common.color import color_message from tasks.libs.common.user_interactions import yes_no_question @@ -19,11 +15,11 @@ class FilteredOutException(Exception): pass -def get_running_pipelines_on_same_ref(repo: Project, ref, sha=None) -> List[ProjectPipeline]: - pipelines = repo.pipelines.list(ref=ref, sha=sha, per_page=100, all=True) +def get_running_pipelines_on_same_ref(gitlab, ref, sha=None): + pipelines = gitlab.all_pipelines_for_ref(ref, sha=sha) RUNNING_STATUSES = ["created", "pending", "running"] - running_pipelines = [pipeline for pipeline in pipelines if pipeline.status in RUNNING_STATUSES] + running_pipelines = [pipeline for pipeline in pipelines if pipeline["status"] in RUNNING_STATUSES] return running_pipelines @@ -36,37 +32,37 @@ def parse_datetime(dt): return datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%f%z") -def cancel_pipelines_with_confirmation(repo: Project, pipelines: List[ProjectPipeline]): +def cancel_pipelines_with_confirmation(gitlab, pipelines): for pipeline in pipelines: - commit = repo.commits.get(pipeline.sha) + commit_author, commit_short_sha, commit_title = get_commit_for_pipeline(gitlab, pipeline['id']) print( color_message("Pipeline", "blue"), - color_message(pipeline.id, "bold"), - color_message(f"({repo.web_url}/pipelines/{pipeline.id})", "green"), + color_message(pipeline['id'], "bold"), + color_message(f"(https://gitlab.ddbuild.io/{gitlab.project_name}/pipelines/{pipeline['id']})", "green"), ) - pipeline_creation_date = pipeline.created_at + pipeline_creation_date = pipeline['created_at'] print( f"{color_message('Started at', 'blue')} {parse_datetime(pipeline_creation_date).astimezone():%c} ({pipeline_creation_date})" ) print( color_message("Commit:", "blue"), - color_message(commit.title, "green"), - color_message(f"({commit.short_id})", "grey"), + color_message(commit_title, "green"), + color_message(f"({commit_short_sha})", "grey"), color_message("by", "blue"), - color_message(commit.author_name, "bold"), + color_message(commit_author, "bold"), ) if yes_no_question("Do you want to cancel this pipeline?", color="orange", default=True): - pipeline.cancel() - print(f"Pipeline {color_message(pipeline.id, 'bold')} has been cancelled.\n") + gitlab.cancel_pipeline(pipeline['id']) + print(f"Pipeline {color_message(pipeline['id'], 'bold')} has been cancelled.\n") else: - print(f"Pipeline {color_message(pipeline.id, 'bold')} will keep running.\n") + print(f"Pipeline {color_message(pipeline['id'], 'bold')} will keep running.\n") -def gracefully_cancel_pipeline(repo: Project, pipeline: ProjectPipeline, force_cancel_stages): +def gracefully_cancel_pipeline(gitlab, pipeline, force_cancel_stages): """ Gracefully cancel pipeline - Cancel all the jobs that did not start to run yet @@ -74,17 +70,17 @@ def gracefully_cancel_pipeline(repo: Project, pipeline: ProjectPipeline, force_c - Jobs in the stages specified in 'force_cancel_stages' variables will always be canceled even if running """ - jobs = pipeline.jobs.list(per_page=100, all=True) + jobs = gitlab.all_jobs(pipeline["id"]) for job in jobs: - if job.stage in force_cancel_stages or ( - job.status not in ["running", "canceled"] and "cleanup" not in job.name + if job["stage"] in force_cancel_stages or ( + job["status"] not in ["running", "canceled"] and "cleanup" not in job["name"] ): - repo.jobs.get(job.id, lazy=True).cancel() + gitlab.cancel_job(job["id"]) def trigger_agent_pipeline( - repo: Project, + gitlab, ref=DEFAULT_BRANCH, release_version_6="nightly", release_version_7="nightly-a7", @@ -94,7 +90,7 @@ def trigger_agent_pipeline( e2e_tests=False, rc_build=False, rc_k8s_deployments=False, -) -> ProjectPipeline: +): """ Trigger a pipeline on the datadog-agent repositories. Multiple options are available: - run a pipeline with all builds (by default, a pipeline only runs a subset of all available builds), @@ -141,40 +137,39 @@ def trigger_agent_pipeline( ref, "\n".join(f" - {k}: {args[k]}" for k in args) ) ) - try: - variables = [{'key': key, 'value': value} for (key, value) in args.items()] + result = gitlab.create_pipeline(ref, args) - return repo.pipelines.create({'ref': ref, 'variables': variables}) - except GitlabError as e: - if "filtered out by workflow rules" in e.error_message: - raise FilteredOutException + if result and "id" in result: + return result["id"] - raise RuntimeError(f"Invalid response from Gitlab API: {e}") + if result and "filtered out by workflow rules" in result.get("message", {}).get("base", [""])[0]: + raise FilteredOutException + raise RuntimeError(f"Invalid response from Gitlab: {result}") -def wait_for_pipeline( - repo: Project, pipeline: ProjectPipeline, pipeline_finish_timeout_sec=PIPELINE_FINISH_TIMEOUT_SEC -): + +def wait_for_pipeline(gitlab, pipeline_id, pipeline_finish_timeout_sec=PIPELINE_FINISH_TIMEOUT_SEC): """ Follow a given pipeline, periodically checking the pipeline status and printing changes to the job statuses. """ - commit = repo.commits.get(pipeline.sha) + commit_author, commit_short_sha, commit_title = get_commit_for_pipeline(gitlab, pipeline_id) print( color_message( "Commit: " - + color_message(commit.title, "green") - + color_message(f" ({commit.short_id})", "grey") + + color_message(commit_title, "green") + + color_message(f" ({commit_short_sha})", "grey") + " by " - + color_message(commit.author_name, "bold"), + + color_message(commit_author, "bold"), "blue", ), flush=True, ) print( color_message( - "Pipeline Link: " + color_message(pipeline.web_url, "green"), + "Pipeline Link: " + + color_message(f"https://gitlab.ddbuild.io/{gitlab.project_name}/pipelines/{pipeline_id}", "green"), "blue", ), flush=True, @@ -182,10 +177,19 @@ def wait_for_pipeline( print(color_message("Waiting for pipeline to finish. Exiting won't cancel it.", "blue"), flush=True) - f = functools.partial(pipeline_status, pipeline) + f = functools.partial(pipeline_status, gitlab, pipeline_id) loop_status(f, pipeline_finish_timeout_sec) + return pipeline_id + + +def get_commit_for_pipeline(gitlab, pipeline_id): + pipeline = gitlab.pipeline(pipeline_id) + sha = pipeline['sha'] + commit = gitlab.commit(sha) + return commit['author_name'], commit['short_id'], commit['title'] + def loop_status(callable, timeout_sec): """ @@ -202,49 +206,50 @@ def loop_status(callable, timeout_sec): sleep(10) -def pipeline_status(pipeline: ProjectPipeline, job_status): +def pipeline_status(gitlab, pipeline_id, job_status): """ Checks the pipeline status and updates job statuses. """ - jobs = pipeline.jobs.list(per_page=100, all=True) + jobs = gitlab.all_jobs(pipeline_id) job_status = update_job_status(jobs, job_status) # Check pipeline status - pipestatus = pipeline.status.lower().strip() - ref = pipeline.ref + pipeline = gitlab.pipeline(pipeline_id) + pipestatus = pipeline["status"].lower().strip() + ref = pipeline["ref"] if pipestatus == "success": print( color_message( - f"Pipeline {pipeline.web_url} for {ref} succeeded", + f"Pipeline https://gitlab.ddbuild.io/{gitlab.project_name}/pipelines/{pipeline_id} for {ref} succeeded", "green", ), flush=True, ) - notify("Pipeline success", f"Pipeline {pipeline.id} for {ref} succeeded.") + notify("Pipeline success", f"Pipeline {pipeline_id} for {ref} succeeded.") return True, job_status if pipestatus == "failed": print( color_message( - f"Pipeline {pipeline.web_url} for {ref} failed", + f"Pipeline https://gitlab.ddbuild.io/{gitlab.project_name}/pipelines/{pipeline_id} for {ref} failed", "red", ), flush=True, ) - notify("Pipeline failure", f"Pipeline {pipeline.id} for {ref} failed.") + notify("Pipeline failure", f"Pipeline {pipeline_id} for {ref} failed.") return True, job_status if pipestatus == "canceled": print( color_message( - f"Pipeline {pipeline.web_url} for {ref} was canceled", + f"Pipeline https://gitlab.ddbuild.io/{gitlab.project_name}/pipelines/{pipeline_id} for {ref} was canceled", "grey", ), flush=True, ) - notify("Pipeline canceled", f"Pipeline {pipeline.id} for {ref} was canceled.") + notify("Pipeline canceled", f"Pipeline {pipeline_id} for {ref} was canceled.") return True, job_status if pipestatus not in ["created", "running", "pending"]: @@ -253,36 +258,36 @@ def pipeline_status(pipeline: ProjectPipeline, job_status): return False, job_status -def update_job_status(jobs: List[ProjectJob], job_status): +def update_job_status(jobs, job_status): """ Updates job statuses and notify on changes. """ notify = {} for job in jobs: - if job_status.get(job.name, None) is None: - job_status[job.name] = job - notify[job.id] = job + if job_status.get(job['name'], None) is None: + job_status[job['name']] = job + notify[job['id']] = job else: # There are two reasons why we want to notify: # - status change on job (when we refresh) # - another job with the same name exists (when a job is retried) # Check for id to see if we're in the first case. - old_job = job_status[job.name] - if job.id == old_job.id and job.status != old_job.status: - job_status[job.name] = job - notify[job.id] = job - if job.id != old_job.id and job.created_at > old_job.created_at: - job_status[job.name] = job + old_job = job_status[job['name']] + if job['id'] == old_job['id'] and job['status'] != old_job['status']: + job_status[job['name']] = job + notify[job['id']] = job + if job['id'] != old_job['id'] and job['created_at'] > old_job['created_at']: + job_status[job['name']] = job # Check if old job already in notification list, to append retry message - notify_old_job = notify.get(old_job.id, None) + notify_old_job = notify.get(old_job['id'], None) if notify_old_job is not None: - notify_old_job.retried_old = True # Add message to say the job got retried - notify_old_job.retried_created_at = job.created_at - notify[old_job.id] = notify_old_job + notify_old_job['retried_old'] = True # Add message to say the job got retried + notify_old_job['retried_created_at'] = job['created_at'] + notify[old_job['id']] = notify_old_job # If not (eg. previous job was notified in last refresh), add retry message to new job else: - job.retried_new = True - notify[job.id] = job + job['retried_new'] = True + notify[job['id']] = job for job in notify.values(): print_job_status(job) @@ -307,49 +312,53 @@ def print_job(name, stage, color, date, duration, status, link): def print_retry(name, date): print(color_message(f"[{date}] Job {name} was retried", "grey")) - duration = job.duration - date = job.finished_at # Date that is printed in the console log. In most cases, it's when the job finished. + name = job['name'] + stage = job['stage'] + allow_failure = job['allow_failure'] + duration = job['duration'] + date = job['finished_at'] # Date that is printed in the console log. In most cases, it's when the job finished. + status = job['status'] # Gitlab job status job_status = None # Status string printed in the console link = '' # Link to the pipeline. Only filled for failing jobs, to be able to quickly go to the failing job. color = 'grey' # Log output color # A None duration is set by Gitlab when the job gets canceled before it was started. # In that case, set a duration of 0s. - if job.duration is None: + if duration is None: duration = 0 - if job.status == 'success': + if status == 'success': job_status = 'succeeded' color = 'green' - elif job.status == 'failed': - if job.allow_failure: + elif status == 'failed': + if allow_failure: job_status = 'failed (allowed to fail)' color = 'orange' else: job_status = 'failed' color = 'red' - link = f"Link: {job.web_url}" + link = f"Link: {job['web_url']}" # Only notify on real (not retried) failures # Best-effort, as there can be situations where the retried # job didn't get created yet - if getattr(job, 'retried_old', None) is None: - notify("Job failure", f"Job {job.name} failed.") - elif job.status == 'canceled': + if job.get('retried_old', None) is None: + notify("Job failure", f"Job {name} failed.") + elif status == 'canceled': job_status = 'was canceled' color = 'grey' - elif job.status == 'running': + elif status == 'running': job_status = 'started running' - date = job.started_at + date = job['started_at'] color = 'blue' else: return # Some logic to print the retry message in the correct order (before the new job or after the old job) - if getattr(job, 'retried_new', None) is not None: - print_retry(job.name, job.created_at) - print_job(job.name, job.stage, color, date, duration, job_status, link) - if getattr(job, 'retried_old', None) is not None: - print_retry(job.name, job.retried_created_at) + if job.get('retried_new', None) is not None: + print_retry(name, job['created_at']) + print_job(name, stage, color, date, duration, job_status, link) + if job.get('retried_old', None) is not None: + print_retry(name, job['retried_created_at']) def notify(title, info_text, sound=True): diff --git a/tasks/libs/types/types.py b/tasks/libs/types/types.py index 10bc2eaeea10e..3c0d4c103e565 100644 --- a/tasks/libs/types/types.py +++ b/tasks/libs/types/types.py @@ -3,8 +3,6 @@ from collections import defaultdict from enum import Enum -from gitlab.v4.objects import ProjectJob - class Test: PACKAGE_PREFIX = "github.com/DataDog/datadog-agent/" @@ -62,12 +60,12 @@ def __init__(self): self.mandatory_infra_job_failures = [] self.optional_infra_job_failures = [] - def add_failed_job(self, job: ProjectJob): - if job.failure_type == FailedJobType.INFRA_FAILURE and job.allow_failure: + def add_failed_job(self, job): + if job["failure_type"] == FailedJobType.INFRA_FAILURE and job["allow_failure"]: self.optional_infra_job_failures.append(job) - elif job.failure_type == FailedJobType.INFRA_FAILURE and not job.allow_failure: + elif job["failure_type"] == FailedJobType.INFRA_FAILURE and not job["allow_failure"]: self.mandatory_infra_job_failures.append(job) - elif job.allow_failure: + elif job["allow_failure"]: self.optional_job_failures.append(job) else: self.mandatory_job_failures.append(job) @@ -113,13 +111,13 @@ def __render_jobs_section(self, header: str, jobs: list, buffer: io.StringIO): jobs_per_stage = defaultdict(list) for job in jobs: - jobs_per_stage[job.stage].append(job) + jobs_per_stage[job["stage"]].append(job) for stage, jobs in jobs_per_stage.items(): jobs_info = [] for job in jobs: - num_retries = len(job.retry_summary) - 1 - job_info = f"<{job.web_url}|{job.name}>" + num_retries = len(job["retry_summary"]) - 1 + job_info = f"<{job['url']}|{job['name']}>" if num_retries > 0: job_info += f" ({num_retries} retries)" @@ -133,7 +131,7 @@ def __render_jobs_section(self, header: str, jobs: list, buffer: io.StringIO): def __render_tests_section(self, buffer): print(self.TEST_SECTION_HEADER, file=buffer) for (test_name, test_package), jobs in self.failed_tests.items(): - job_list = ", ".join(f"<{job.web_url}|{job.name}>" for job in jobs[: self.MAX_JOBS_PER_TEST]) + job_list = ", ".join(f"<{job['url']}|{job['name']}>" for job in jobs[: self.MAX_JOBS_PER_TEST]) if len(jobs) > self.MAX_JOBS_PER_TEST: job_list += f" and {len(jobs) - self.MAX_JOBS_PER_TEST} more" print(f"- `{test_name}` from package `{test_package}` (in {job_list})", file=buffer) diff --git a/tasks/linter.py b/tasks/linter.py index d79cf02a7aea7..c6906ffaceeb1 100644 --- a/tasks/linter.py +++ b/tasks/linter.py @@ -8,9 +8,10 @@ from tasks.build_tags import compute_build_tags_for_flavor from tasks.flavor import AgentFlavor from tasks.go import run_golangci_lint -from tasks.libs.ciproviders.gitlab_api import ( +from tasks.libs.ciproviders.gitlab import ( + Gitlab, generate_gitlab_full_configuration, - get_gitlab_repo, + get_gitlab_token, get_preset_contexts, load_context, ) @@ -380,15 +381,15 @@ def gitlab_ci(_, test="all", custom_context=None): else: all_contexts = get_preset_contexts(test) print(f"We will tests {len(all_contexts)} contexts.") - agent = get_gitlab_repo() for context in all_contexts: print("Test gitlab configuration with context: ", context) config = generate_gitlab_full_configuration(".gitlab-ci.yml", dict(context)) - res = agent.ci_lint.create({"content": config}) - status = color_message("valid", "green") if res.valid else color_message("invalid", "red") + gitlab = Gitlab(api_token=get_gitlab_token()) + res = gitlab.lint(config) + status = color_message("valid", "green") if res["valid"] else color_message("invalid", "red") print(f"Config is {status}") - if len(res.warnings) > 0: - print(color_message(f"Warnings: {res.warnings}", "orange"), file=sys.stderr) - if not res.valid: - print(color_message(f"Errors: {res.errors}", "red"), file=sys.stderr) + if len(res["warnings"]) > 0: + print(color_message(f"Warnings: {res['warnings']}", "orange"), file=sys.stderr) + if not res["valid"]: + print(color_message(f"Errors: {res['errors']}", "red"), file=sys.stderr) raise Exit(code=1) diff --git a/tasks/notify.py b/tasks/notify.py index a5c8da6f26ce8..037f16be047a6 100644 --- a/tasks/notify.py +++ b/tasks/notify.py @@ -291,7 +291,7 @@ def update_statistics(job_executions): # Update statistics and collect consecutive failed jobs alert_jobs = {"consecutive": [], "cumulative": []} failed_jobs = get_failed_jobs(PROJECT_NAME, os.getenv("CI_PIPELINE_ID")) - failed_set = {job.name for job in failed_jobs.all_failures()} + failed_set = {job["name"] for job in failed_jobs.all_failures()} current_set = set(job_executions["jobs"].keys()) # Insert data for newly failing jobs new_failed_jobs = failed_set - current_set diff --git a/tasks/pipeline.py b/tasks/pipeline.py index 260d860f66f35..9a9478e7f9e8e 100644 --- a/tasks/pipeline.py +++ b/tasks/pipeline.py @@ -6,13 +6,11 @@ from datetime import datetime, timedelta, timezone import yaml -from gitlab import GitlabError -from gitlab.v4.objects import Project from invoke import task from invoke.exceptions import Exit from tasks.libs.ciproviders.github_api import GithubAPI -from tasks.libs.ciproviders.gitlab_api import get_gitlab_bot_token, get_gitlab_repo +from tasks.libs.ciproviders.gitlab import Gitlab, get_gitlab_bot_token, get_gitlab_token from tasks.libs.common.color import color_message from tasks.libs.common.utils import ( DEFAULT_BRANCH, @@ -56,7 +54,7 @@ def GitlabYamlLoader(): # Tasks to trigger pipelines -def check_deploy_pipeline(repo: Project, git_ref, release_version_6, release_version_7, repo_branch): +def check_deploy_pipeline(gitlab, git_ref, release_version_6, release_version_7, repo_branch): """ Run checks to verify a deploy pipeline is valid: - it targets a valid repo branch @@ -83,9 +81,9 @@ def check_deploy_pipeline(repo: Project, git_ref, release_version_6, release_ver if release_version_6 and match: # release_version_6 is not empty and git_ref matches v7 pattern, construct v6 tag and check. tag_name = "6." + "".join(match.groups()) - try: - repo.tags.get(tag_name) - except GitlabError: + gitlab_tag = gitlab.find_tag(tag_name) + + if ("name" not in gitlab_tag) or gitlab_tag["name"] != tag_name: print(f"Cannot find GitLab v6 tag {tag_name} while trying to build git ref {git_ref}") raise Exit(code=1) @@ -96,9 +94,9 @@ def check_deploy_pipeline(repo: Project, git_ref, release_version_6, release_ver if release_version_7 and match: # release_version_7 is not empty and git_ref matches v6 pattern, construct v7 tag and check. tag_name = "7." + "".join(match.groups()) - try: - repo.tags.get(tag_name) - except GitlabError: + gitlab_tag = gitlab.find_tag(tag_name) + + if ("name" not in gitlab_tag) or gitlab_tag["name"] != tag_name: print(f"Cannot find GitLab v7 tag {tag_name} while trying to build git ref {git_ref}") raise Exit(code=1) @@ -112,7 +110,8 @@ def clean_running_pipelines(ctx, git_ref=DEFAULT_BRANCH, here=False, use_latest_ should be cancelled. """ - agent = get_gitlab_repo() + gitlab = Gitlab(api_token=get_gitlab_token()) + gitlab.test_project_found() if here: git_ref = ctx.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip() @@ -125,14 +124,14 @@ def clean_running_pipelines(ctx, git_ref=DEFAULT_BRANCH, here=False, use_latest_ elif not sha: print(f"Git sha not provided, fetching all running pipelines on {git_ref}") - pipelines = get_running_pipelines_on_same_ref(agent, git_ref, sha) + pipelines = get_running_pipelines_on_same_ref(gitlab, git_ref, sha) print( f"Found {len(pipelines)} running pipeline(s) matching the request.", "They are ordered from the newest one to the oldest one.\n", sep='\n', ) - cancel_pipelines_with_confirmation(agent, pipelines) + cancel_pipelines_with_confirmation(gitlab, pipelines) def workflow_rules(gitlab_file=".gitlab-ci.yml"): @@ -176,33 +175,37 @@ def auto_cancel_previous_pipelines(ctx): if not os.environ.get('GITLAB_TOKEN'): raise Exit("GITLAB_TOKEN variable needed to cancel pipelines on the same ref.", 1) + gitlab = Gitlab(api_token=get_gitlab_token()) + gitlab.test_project_found() + git_ref = os.getenv("CI_COMMIT_REF_NAME") git_sha = os.getenv("CI_COMMIT_SHA") - repo = get_gitlab_repo() - pipelines = get_running_pipelines_on_same_ref(repo, git_ref) - pipelines_without_current = [p for p in pipelines if p.sha != git_sha] + pipelines = get_running_pipelines_on_same_ref(gitlab, git_ref) + pipelines_without_current = [p for p in pipelines if p["sha"] != git_sha] for pipeline in pipelines_without_current: # We cancel pipeline only if it correspond to a commit that is an ancestor of the current commit - is_ancestor = ctx.run(f'git merge-base --is-ancestor {pipeline.sha} {git_sha}', warn=True, hide="both") + is_ancestor = ctx.run(f'git merge-base --is-ancestor {pipeline["sha"]} {git_sha}', warn=True, hide="both") if is_ancestor.exited == 0: - print(f'Gracefully canceling jobs that are not canceled on pipeline {pipeline.id} ({pipeline.web_url})') - gracefully_cancel_pipeline(repo, pipeline, force_cancel_stages=["package_build"]) + print( + f'Gracefully canceling jobs that are not canceled on pipeline {pipeline["id"]} ({pipeline["web_url"]})' + ) + gracefully_cancel_pipeline(gitlab, pipeline, force_cancel_stages=["package_build"]) elif is_ancestor.exited == 1: - print(f'{pipeline.sha} is not an ancestor of {git_sha}, not cancelling pipeline {pipeline.id}') + print(f'{pipeline["sha"]} is not an ancestor of {git_sha}, not cancelling pipeline {pipeline["id"]}') elif is_ancestor.exited == 128: min_time_before_cancel = 5 print( - f'Could not determine if {pipeline.sha} is an ancestor of {git_sha}, probably because it has been deleted from the history because of force push' + f'Could not determine if {pipeline["sha"]} is an ancestor of {git_sha}, probably because it has been deleted from the history because of force push' ) - if datetime.strptime(pipeline.created_at, "%Y-%m-%dT%H:%M:%S.%fZ") < datetime.now() - timedelta( + if datetime.strptime(pipeline["created_at"], "%Y-%m-%dT%H:%M:%S.%fZ") < datetime.now() - timedelta( minutes=min_time_before_cancel ): print( - f'Pipeline started earlier than {min_time_before_cancel} minutes ago, gracefully canceling pipeline {pipeline.id}' + f'Pipeline started earlier than {min_time_before_cancel} minutes ago, gracefully canceling pipeline {pipeline["id"]}' ) - gracefully_cancel_pipeline(repo, pipeline, force_cancel_stages=["package_build"]) + gracefully_cancel_pipeline(gitlab, pipeline, force_cancel_stages=["package_build"]) else: print(is_ancestor.stderr) raise Exit(1) @@ -263,7 +266,8 @@ def run( inv pipeline.run --deploy --use-release-entries --major-versions "6,7" --git-ref "7.32.0" --repo-branch "stable" """ - repo = get_gitlab_repo() + gitlab = Gitlab(api_token=get_gitlab_token()) + gitlab.test_project_found() if (not git_ref and not here) or (git_ref and here): raise Exit("ERROR: Exactly one of --here or --git-ref must be specified.", code=1) @@ -286,7 +290,7 @@ def run( if deploy: # Check the validity of the deploy pipeline - check_deploy_pipeline(repo, git_ref, release_version_6, release_version_7, repo_branch) + check_deploy_pipeline(gitlab, git_ref, release_version_6, release_version_7, repo_branch) # Force all builds and kitchen tests to be run if not all_builds: print( @@ -305,7 +309,7 @@ def run( ) e2e_tests = True - pipelines = get_running_pipelines_on_same_ref(repo, git_ref) + pipelines = get_running_pipelines_on_same_ref(gitlab, git_ref) if pipelines: print( @@ -315,11 +319,11 @@ def run( "They are ordered from the newest one to the oldest one.\n", sep='\n', ) - cancel_pipelines_with_confirmation(repo, pipelines) + cancel_pipelines_with_confirmation(gitlab, pipelines) try: - pipeline = trigger_agent_pipeline( - repo, + pipeline_id = trigger_agent_pipeline( + gitlab, git_ref, release_version_6, release_version_7, @@ -334,7 +338,7 @@ def run( print(color_message(f"ERROR: pipeline does not match any workflow rule. Rules:\n{workflow_rules()}", "red")) return - wait_for_pipeline(repo, pipeline) + wait_for_pipeline(gitlab, pipeline_id) @task @@ -352,7 +356,8 @@ def follow(ctx, id=None, git_ref=None, here=False, project_name="DataDog/datadog inv pipeline.follow --id 1234567 """ - repo = get_gitlab_repo(project_name) + gitlab = Gitlab(project_name=project_name, api_token=get_gitlab_token()) + gitlab.test_project_found() args_given = 0 if id is not None: @@ -368,25 +373,22 @@ def follow(ctx, id=None, git_ref=None, here=False, project_name="DataDog/datadog ) if id is not None: - pipeline = repo.pipelines.get(id) - wait_for_pipeline(repo, pipeline) + wait_for_pipeline(gitlab, id) elif git_ref is not None: - wait_for_pipeline_from_ref(repo, git_ref) + wait_for_pipeline_from_ref(gitlab, git_ref) elif here: git_ref = ctx.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip() - wait_for_pipeline_from_ref(repo, git_ref) + wait_for_pipeline_from_ref(gitlab, git_ref) -def wait_for_pipeline_from_ref(repo: Project, ref): - # Get last updated pipeline - pipelines = repo.pipelines.list(ref=ref, per_page=1, order_by='updated_at') - if len(pipelines) == 0: +def wait_for_pipeline_from_ref(gitlab, ref): + pipeline = gitlab.last_pipeline_for_ref(ref) + if pipeline is not None: + wait_for_pipeline(gitlab, pipeline['id']) + else: print(f"No pipelines found for {ref}") raise Exit(code=1) - pipeline = pipelines[0] - wait_for_pipeline(repo, pipeline) - @task(iterable=['variable']) def trigger_child_pipeline(_, git_ref, project_name, variable=None, follow=True): @@ -400,9 +402,9 @@ def trigger_child_pipeline(_, git_ref, project_name, variable=None, follow=True) Use --follow to make this task wait for the pipeline to finish, and return 1 if it fails. (requires GITLAB_TOKEN). Examples: - inv pipeline.trigger-child-pipeline --git-ref "main" --project-name "DataDog/agent-release-management" --variable "RELEASE_VERSION" + inv pipeline.trigger-child-pipeline --git-ref "master" --project-name "DataDog/agent-release-management" --variables "RELEASE_VERSION" - inv pipeline.trigger-child-pipeline --git-ref "main" --project-name "DataDog/agent-release-management" --variable "VAR1" --variable "VAR2" --variable "VAR3" + inv pipeline.trigger-child-pipeline --git-ref "master" --project-name "DataDog/agent-release-management" --variables "VAR1,VAR2,VAR3" """ if not os.environ.get('CI_JOB_TOKEN'): @@ -416,7 +418,7 @@ def trigger_child_pipeline(_, git_ref, project_name, variable=None, follow=True) # set, but trigger_pipeline doesn't use it os.environ["GITLAB_TOKEN"] = os.environ['CI_JOB_TOKEN'] - repo = get_gitlab_repo(project_name) + gitlab = Gitlab(project_name=project_name, api_token=get_gitlab_token()) data = {"token": os.environ['CI_JOB_TOKEN'], "ref": git_ref, "variables": {}} @@ -441,22 +443,23 @@ def trigger_child_pipeline(_, git_ref, project_name, variable=None, follow=True) flush=True, ) - try: - data['variables'] = [{'key': key, 'value': value} for (key, value) in data['variables'].items()] + res = gitlab.trigger_pipeline(data) - pipeline = repo.pipelines.create(data) - except GitlabError as e: - raise Exit(f"Failed to create child pipeline: {e}", code=1) + if 'id' not in res: + raise Exit(f"Failed to create child pipeline: {res}", code=1) - print(f"Created a child pipeline with id={pipeline.id}, url={pipeline.web_url}", flush=True) + pipeline_id = res['id'] + pipeline_url = res['web_url'] + print(f"Created a child pipeline with id={pipeline_id}, url={pipeline_url}", flush=True) if follow: print("Waiting for child pipeline to finish...", flush=True) - wait_for_pipeline(repo, pipeline) + wait_for_pipeline(gitlab, pipeline_id) # Check pipeline status - pipestatus = pipeline.status.lower().strip() + pipeline = gitlab.pipeline(pipeline_id) + pipestatus = pipeline["status"].lower().strip() if pipestatus != "success": raise Exit(f"Error: child pipeline status {pipestatus.title()}", code=1) @@ -579,16 +582,21 @@ def changelog(ctx, new_commit_sha): ) +def _init_pipeline_schedule_task(): + gitlab = Gitlab(api_token=get_gitlab_bot_token()) + gitlab.test_project_found() + return gitlab + + @task def get_schedules(_): """ Pretty-print all pipeline schedules on the repository. """ - repo = get_gitlab_repo(token=get_gitlab_bot_token()) - - for sched in repo.pipelineschedules.list(per_page=100, all=True): - sched.pprint() + gitlab = _init_pipeline_schedule_task() + for ps in gitlab.all_pipeline_schedules(): + pprint.pprint(ps) @task @@ -597,11 +605,9 @@ def get_schedule(_, schedule_id): Pretty-print a single pipeline schedule on the repository. """ - repo = get_gitlab_repo(token=get_gitlab_bot_token()) - - sched = repo.pipelineschedules.get(schedule_id) - - sched.pprint() + gitlab = _init_pipeline_schedule_task() + result = gitlab.pipeline_schedule(schedule_id) + pprint.pprint(result) @task @@ -612,13 +618,9 @@ def create_schedule(_, description, ref, cron, cron_timezone=None, active=False) Note that unless you explicitly specify the --active flag, the schedule will be created as inactive. """ - repo = get_gitlab_repo(token=get_gitlab_bot_token()) - - sched = repo.pipelineschedules.create( - {'description': description, 'ref': ref, 'cron': cron, 'cron_timezone': cron_timezone, 'active': active} - ) - - sched.pprint() + gitlab = _init_pipeline_schedule_task() + result = gitlab.create_pipeline_schedule(description, ref, cron, cron_timezone, active) + pprint.pprint(result) @task @@ -627,14 +629,9 @@ def edit_schedule(_, schedule_id, description=None, ref=None, cron=None, cron_ti Edit an existing pipeline schedule on the repository. """ - repo = get_gitlab_repo(token=get_gitlab_bot_token()) - - data = {'description': description, 'ref': ref, 'cron': cron, 'cron_timezone': cron_timezone} - data = {key: value for (key, value) in data.items() if value is not None} - - sched = repo.pipelineschedules.update(schedule_id, data) - - pprint.pprint(sched) + gitlab = _init_pipeline_schedule_task() + result = gitlab.edit_pipeline_schedule(schedule_id, description, ref, cron, cron_timezone) + pprint.pprint(result) @task @@ -643,11 +640,9 @@ def activate_schedule(_, schedule_id): Activate an existing pipeline schedule on the repository. """ - repo = get_gitlab_repo(token=get_gitlab_bot_token()) - - sched = repo.pipelineschedules.update(schedule_id, {'active': True}) - - sched.pprint() + gitlab = _init_pipeline_schedule_task() + result = gitlab.edit_pipeline_schedule(schedule_id, active=True) + pprint.pprint(result) @task @@ -656,11 +651,9 @@ def deactivate_schedule(_, schedule_id): Deactivate an existing pipeline schedule on the repository. """ - repo = get_gitlab_repo(token=get_gitlab_bot_token()) - - sched = repo.pipelineschedules.update(schedule_id, {'active': False}) - - sched.pprint() + gitlab = _init_pipeline_schedule_task() + result = gitlab.edit_pipeline_schedule(schedule_id, active=False) + pprint.pprint(result) @task @@ -669,11 +662,9 @@ def delete_schedule(_, schedule_id): Delete an existing pipeline schedule on the repository. """ - repo = get_gitlab_repo(token=get_gitlab_bot_token()) - - repo.pipelineschedules.delete(schedule_id) - - print('Deleted schedule', schedule_id) + gitlab = _init_pipeline_schedule_task() + result = gitlab.delete_pipeline_schedule(schedule_id) + pprint.pprint(result) @task @@ -682,12 +673,9 @@ def create_schedule_variable(_, schedule_id, key, value): Create a variable for an existing schedule on the repository. """ - repo = get_gitlab_repo(token=get_gitlab_bot_token()) - - sched = repo.pipelineschedules.get(schedule_id) - sched.variables.create({'key': key, 'value': value}) - - sched.pprint() + gitlab = _init_pipeline_schedule_task() + result = gitlab.create_pipeline_schedule_variable(schedule_id, key, value) + pprint.pprint(result) @task @@ -696,12 +684,9 @@ def edit_schedule_variable(_, schedule_id, key, value): Edit an existing variable for a schedule on the repository. """ - repo = get_gitlab_repo(token=get_gitlab_bot_token()) - - sched = repo.pipelineschedules.get(schedule_id) - sched.variables.update(key, {'value': value}) - - sched.pprint() + gitlab = _init_pipeline_schedule_task() + result = gitlab.edit_pipeline_schedule_variable(schedule_id, key, value) + pprint.pprint(result) @task @@ -710,12 +695,9 @@ def delete_schedule_variable(_, schedule_id, key): Delete an existing variable for a schedule on the repository. """ - repo = get_gitlab_repo(token=get_gitlab_bot_token()) - - sched = repo.pipelineschedules.get(schedule_id) - sched.variables.delete(key) - - sched.pprint() + gitlab = _init_pipeline_schedule_task() + result = gitlab.delete_pipeline_schedule_variable(schedule_id, key) + pprint.pprint(result) @task( @@ -926,28 +908,28 @@ def test_merge_queue(ctx): pr.create_issue_comment("/merge") # Search for the generated pipeline print(f"PR {pr.html_url} is waiting for MQ pipeline generation") - agent = get_gitlab_repo() + gitlab = Gitlab(api_token=get_gitlab_token()) max_attempts = 5 for attempt in range(max_attempts): time.sleep(30) - pipelines = agent.pipelines.list(per_page=100) + pipelines = gitlab.last_pipelines() try: - pipeline = next(p for p in pipelines if p.ref.startswith(f"mq-working-branch-{test_main}")) - print(f"Pipeline found: {pipeline.web_url}") + pipeline = next(p for p in pipelines if p["ref"].startswith(f"mq-working-branch-{test_main}")) + print(f"Pipeline found: {pipeline['web_url']}") break except StopIteration: if attempt == max_attempts - 1: raise RuntimeError("No pipeline found for the merge queue") continue - success = pipeline.status == "running" + success = pipeline["status"] == "running" if success: print("Pipeline correctly created, congrats") else: - print(f"[ERROR] Impossible to generate a pipeline for the merge queue, please check {pipeline.web_url}") + print(f"[ERROR] Impossible to generate a pipeline for the merge queue, please check {pipeline['web_url']}") # Clean up print("Cleaning up") if success: - pipeline.cancel() + gitlab.cancel_pipeline(pipeline["id"]) pr.edit(state="closed") ctx.run(f"git checkout {current_branch}", hide=True) ctx.run(f"git branch -D {test_main}", hide=True) diff --git a/tasks/release.py b/tasks/release.py index 27eb8813640da..9b882ccabdbb0 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -11,12 +11,11 @@ from datetime import date from time import sleep -from gitlab import GitlabError from invoke import Failure, task from invoke.exceptions import Exit from tasks.libs.ciproviders.github_api import GithubAPI -from tasks.libs.ciproviders.gitlab_api import get_gitlab_repo +from tasks.libs.ciproviders.gitlab import Gitlab, get_gitlab_token from tasks.libs.common.color import color_message from tasks.libs.common.user_interactions import yes_no_question from tasks.libs.common.utils import ( @@ -1333,7 +1332,7 @@ def build_rc(ctx, major_versions="6,7", patch_version=False, k8s_deployments=Fal if sys.version_info[0] < 3: return Exit(message="Must use Python 3 for this task", code=1) - datadog_agent = get_gitlab_repo() + gitlab = Gitlab(project_name=GITHUB_REPO_NAME, api_token=get_gitlab_token()) list_major_versions = parse_major_versions(major_versions) # Get the version of the highest major: needed for tag_version and to know @@ -1382,11 +1381,7 @@ def build_rc(ctx, major_versions="6,7", patch_version=False, k8s_deployments=Fal print(color_message(f"Waiting until the {new_version} tag appears in Gitlab", "bold")) gitlab_tag = None while not gitlab_tag: - try: - gitlab_tag = datadog_agent.tags.get(str(new_version)) - except GitlabError: - continue - + gitlab_tag = gitlab.find_tag(str(new_version)).get("name", None) sleep(5) print(color_message("Creating RC pipeline", "bold")) diff --git a/tasks/unit-tests/gitlab_api_tests.py b/tasks/unit-tests/gitlab_api_tests.py index 24399f816c8bd..ad618b0d380ff 100644 --- a/tasks/unit-tests/gitlab_api_tests.py +++ b/tasks/unit-tests/gitlab_api_tests.py @@ -1,6 +1,97 @@ import unittest +from itertools import cycle +from unittest import mock -from tasks.libs.ciproviders.gitlab_api import generate_gitlab_full_configuration, read_includes +from invoke.exceptions import Exit + +from tasks.libs.ciproviders.gitlab import Gitlab, generate_gitlab_full_configuration, get_gitlab_token, read_includes +from tasks.libs.common.remote_api import APIError + + +class MockResponse: + def __init__(self, content, status_code): + self.content = content + self.status_code = status_code + + def json(self): + return self.content + + +#################### FAIL REQUEST ##################### + + +def fail_not_found_request(*_args, **_kwargs): + return MockResponse([], 404) + + +##################### MOCKED GITLAB ##################### + + +def mocked_502_gitlab_requests(*_args, **_kwargs): + return MockResponse( + "\r\n502 Bad Gateway\r\n\r\n

502 Bad Gateway

\r\n\r\n\r\n", + 502, + ) + + +def mocked_gitlab_project_request(*_args, **_kwargs): + return MockResponse("name", 200) + + +class SideEffect: + def __init__(self, *fargs): + self.functions = cycle(fargs) + + def __call__(self, *args, **kwargs): + func = next(self.functions) + return func(*args, **kwargs) + + +class TestStatusCode5XX(unittest.TestCase): + @mock.patch('requests.get', side_effect=SideEffect(mocked_502_gitlab_requests, mocked_gitlab_project_request)) + def test_gitlab_one_fail_one_success(self, _): + gitlab = Gitlab(api_token=get_gitlab_token()) + gitlab.requests_sleep_time = 0 + gitlab.test_project_found() + + @mock.patch( + 'requests.get', + side_effect=SideEffect( + mocked_502_gitlab_requests, + mocked_502_gitlab_requests, + mocked_502_gitlab_requests, + mocked_502_gitlab_requests, + mocked_gitlab_project_request, + ), + ) + def test_gitlab_last_one_success(self, _): + gitlab = Gitlab(api_token=get_gitlab_token()) + gitlab.requests_sleep_time = 0 + gitlab.test_project_found() + + @mock.patch('requests.get', side_effect=SideEffect(mocked_502_gitlab_requests)) + def test_gitlab_full_fail(self, _): + failed = False + try: + gitlab = Gitlab(api_token=get_gitlab_token()) + gitlab.requests_sleep_time = 0 + gitlab.test_project_found() + except Exit: + failed = True + if not failed: + Exit("GitlabAPI was expected to fail") + + @mock.patch('requests.get', side_effect=SideEffect(fail_not_found_request, mocked_gitlab_project_request)) + def test_gitlab_real_fail(self, _): + failed = False + try: + gitlab = Gitlab(api_token=get_gitlab_token()) + gitlab.requests_sleep_time = 0 + gitlab.test_project_found() + except APIError: + failed = True + if not failed: + Exit("GitlabAPI was expected to fail") class TestReadIncludes(unittest.TestCase): diff --git a/tasks/unit-tests/notify_tests.py b/tasks/unit-tests/notify_tests.py index 8d3c5b7173d1a..01b54f89170c3 100644 --- a/tasks/unit-tests/notify_tests.py +++ b/tasks/unit-tests/notify_tests.py @@ -2,10 +2,8 @@ import os import pathlib import unittest -from typing import List from unittest.mock import MagicMock, patch -from gitlab.v4.objects import ProjectJob from invoke import MockContext, Result from invoke.exceptions import UnexpectedExit @@ -13,95 +11,67 @@ from tasks.libs.types.types import FailedJobs, FailedJobType -def get_fake_jobs() -> List[ProjectJob]: - with open("tasks/unit-tests/testdata/jobs.json") as f: - jobs = json.load(f) - - return [ProjectJob(MagicMock(), attrs=job) for job in jobs] - - class TestSendMessage(unittest.TestCase): - @patch('tasks.libs.ciproviders.gitlab_api.get_gitlab_api') - def test_merge(self, api_mock): - repo_mock = api_mock.return_value.projects.get.return_value - repo_mock.jobs.get.return_value.trace.return_value = b"Log trace" - list_mock = repo_mock.pipelines.get.return_value.jobs.list - list_mock.side_effect = [get_fake_jobs(), []] - notify.send_message(MockContext(), notification_type="merge", print_to_stdout=True) - list_mock.assert_called() - @patch("tasks.notify.get_failed_jobs") def test_merge_without_get_failed_call(self, get_failed_jobs_mock): failed = FailedJobs() failed.add_failed_job( - ProjectJob( - MagicMock(), - attrs={ - "name": "job1", - "stage": "stage1", - "retry_summary": [], - "web_url": "http://www.job.com", - "failure_type": FailedJobType.INFRA_FAILURE, - "allow_failure": False, - }, - ) + { + "name": "job1", + "stage": "stage1", + "retry_summary": [], + "url": "http://www.job.com", + "failure_type": FailedJobType.INFRA_FAILURE, + "allow_failure": False, + } ) failed.add_failed_job( - ProjectJob( - MagicMock(), - attrs={ - "name": "job2", - "stage": "stage2", - "retry_summary": [], - "web_url": "http://www.job.com", - "failure_type": FailedJobType.INFRA_FAILURE, - "allow_failure": True, - }, - ) + { + "name": "job2", + "stage": "stage2", + "retry_summary": [], + "url": "http://www.job.com", + "failure_type": FailedJobType.INFRA_FAILURE, + "allow_failure": True, + } ) failed.add_failed_job( - ProjectJob( - MagicMock(), - attrs={ - "name": "job3", - "stage": "stage3", - "retry_summary": [], - "web_url": "http://www.job.com", - "failure_type": FailedJobType.JOB_FAILURE, - "allow_failure": False, - }, - ) + { + "name": "job3", + "stage": "stage3", + "retry_summary": [], + "url": "http://www.job.com", + "failure_type": FailedJobType.JOB_FAILURE, + "allow_failure": False, + } ) failed.add_failed_job( - ProjectJob( - MagicMock(), - attrs={ - "name": "job4", - "stage": "stage4", - "retry_summary": [], - "web_url": "http://www.job.com", - "failure_type": FailedJobType.JOB_FAILURE, - "allow_failure": True, - }, - ) + { + "name": "job4", + "stage": "stage4", + "retry_summary": [], + "url": "http://www.job.com", + "failure_type": FailedJobType.JOB_FAILURE, + "allow_failure": True, + } ) get_failed_jobs_mock.return_value = failed notify.send_message(MockContext(), notification_type="merge", print_to_stdout=True) get_failed_jobs_mock.assert_called() - @patch('tasks.libs.ciproviders.gitlab_api.get_gitlab_api') - def test_merge_with_get_failed_call(self, api_mock): - repo_mock = api_mock.return_value.projects.get.return_value - trace_mock = repo_mock.jobs.get.return_value.trace - list_mock = repo_mock.pipelines.get.return_value.jobs.list - - trace_mock.return_value = b"no basic auth credentials" - list_mock.return_value = get_fake_jobs() - + @patch("requests.get") + def test_merge_with_get_failed_call(self, get_mock): + with open("tasks/unit-tests/testdata/jobs.json") as f: + jobs = json.load(f) + job_list = {"json.return_value": jobs} + no_jobs = {"json.return_value": ""} + get_mock.side_effect = [ + MagicMock(status_code=200, **job_list), + MagicMock(status_code=200, **no_jobs), + MagicMock(status_code=200, text="no basic auth credentials"), + ] notify.send_message(MockContext(), notification_type="merge", print_to_stdout=True) - - trace_mock.assert_called() - list_mock.assert_called() + get_mock.assert_called() def test_post_to_channel1(self): self.assertTrue(notify._should_send_message_to_channel('main', default_branch='main')) @@ -132,40 +102,39 @@ def test_post_to_author5(self): class TestSendStats(unittest.TestCase): - @patch('tasks.libs.ciproviders.gitlab_api.get_gitlab_api') + @patch("requests.get") @patch("tasks.notify.create_count", new=MagicMock()) - def test_nominal(self, api_mock): - repo_mock = api_mock.return_value.projects.get.return_value - trace_mock = repo_mock.jobs.get.return_value.trace - list_mock = repo_mock.pipelines.get.return_value.jobs.list - - trace_mock.return_value = b"E2E INTERNAL ERROR" - list_mock.return_value = get_fake_jobs() - + def test_nominal(self, get_mock): + with open("tasks/unit-tests/testdata/jobs.json") as f: + jobs = json.load(f) + job_list = {"json.return_value": jobs} + no_jobs = {"json.return_value": ""} + get_mock.side_effect = [ + MagicMock(status_code=200, **job_list), + MagicMock(status_code=200, **no_jobs), + MagicMock(status_code=200, text="E2E INTERNAL ERROR"), + ] notify.send_stats(MockContext(), print_to_stdout=True) - - trace_mock.assert_called() - list_mock.assert_called() + get_mock.assert_called() class TestCheckConsistentFailures(unittest.TestCase): - @patch('tasks.libs.ciproviders.gitlab_api.get_gitlab_api') - def test_nominal(self, api_mock): + @patch("requests.get") + def test_nominal(self, get_mock): os.environ["CI_PIPELINE_ID"] = "456" - - repo_mock = api_mock.return_value.projects.get.return_value - trace_mock = repo_mock.jobs.get.return_value.trace - list_mock = repo_mock.pipelines.get.return_value.jobs.list - - trace_mock.return_value = b"net/http: TLS handshake timeout" - list_mock.return_value = get_fake_jobs() - + with open("tasks/unit-tests/testdata/jobs.json") as f: + jobs = json.load(f) + job_list = {"json.return_value": jobs} + no_jobs = {"json.return_value": ""} + get_mock.side_effect = [ + MagicMock(status_code=200, **job_list), + MagicMock(status_code=200, **no_jobs), + MagicMock(status_code=200, text="net/http: TLS handshake timeout"), + ] notify.check_consistent_failures( MockContext(run=Result("test")), "tasks/unit-tests/testdata/job_executions.json" ) - - trace_mock.assert_called() - list_mock.assert_called() + get_mock.assert_called() class TestRetrieveJobExecutionsCreated(unittest.TestCase): @@ -204,9 +173,7 @@ class TestUpdateStatistics(unittest.TestCase): @patch('tasks.notify.get_failed_jobs') def test_nominal(self, mock_get_failed): failed_jobs = mock_get_failed.return_value - failed_jobs.all_failures.return_value = [ - ProjectJob(MagicMock(), attrs=a) for a in [{"name": "nifnif"}, {"name": "nafnaf"}] - ] + failed_jobs.all_failures.return_value = [{"name": "nifnif"}, {"name": "nafnaf"}] j = { "jobs": { "nafnaf": {"consecutive_failures": 2, "cumulative_failures": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]}, @@ -228,9 +195,7 @@ def test_nominal(self, mock_get_failed): @patch('tasks.notify.get_failed_jobs') def test_multiple_failures(self, mock_get_failed): failed_jobs = mock_get_failed.return_value - failed_jobs.all_failures.return_value = [ - ProjectJob(MagicMock(), attrs=a) for a in [{"name": "poulidor"}, {"name": "virenque"}, {"name": "bardet"}] - ] + failed_jobs.all_failures.return_value = [{"name": "poulidor"}, {"name": "virenque"}, {"name": "bardet"}] j = { "jobs": { "poulidor": {"consecutive_failures": 8, "cumulative_failures": [0, 0, 1, 1, 1, 1, 1, 1, 1, 1]}, From 57f095ff9b1dc470e840a5b0471013cf7c60655e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hugo=20Beauz=C3=A9e-Luyssen?= Date: Fri, 12 Apr 2024 17:44:49 +0200 Subject: [PATCH 24/99] CI: kitchen_deploy: reduce contention when deploying debian packages (#24610) The kitchen_deploy jobs are publishing to a dedicated repo for each pipeline, meaning there's no need to protect against corruption coming from another pipeline --- .gitlab/kitchen_deploy/kitchen_deploy.yml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/.gitlab/kitchen_deploy/kitchen_deploy.yml b/.gitlab/kitchen_deploy/kitchen_deploy.yml index 246701370d1a5..bd34481a63dde 100644 --- a/.gitlab/kitchen_deploy/kitchen_deploy.yml +++ b/.gitlab/kitchen_deploy/kitchen_deploy.yml @@ -33,21 +33,10 @@ - filename=$(ls datadog-signing-keys*.deb); mv $filename datadog-signing-keys_${DD_PIPELINE_ID}.deb - popd -# Avoid simultaneous writes on the repo metadata file that made kitchen tests fail before -.deploy_deb_resource_group-a6: &deploy_deb_resource_group-a6 - resource_group: deploy_deb_a6 - -.deploy_deb_resource_group-a7: &deploy_deb_resource_group-a7 - resource_group: deploy_deb_a7 - -.deploy_deb_resource_group-i7: &deploy_deb_resource_group-i7 - resource_group: deploy_deb_i7 - .deploy_deb_testing-a6: stage: kitchen_deploy image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS tags: ["arch:amd64"] - <<: *deploy_deb_resource_group-a6 variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a6 before_script: @@ -58,7 +47,6 @@ stage: kitchen_deploy image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS tags: ["arch:amd64"] - <<: *deploy_deb_resource_group-i7 variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-i7 before_script: @@ -104,7 +92,6 @@ deploy_deb_testing-a6_arm64: stage: kitchen_deploy image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS tags: ["arch:amd64"] - <<: *deploy_deb_resource_group-a7 variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a7 before_script: From 1359713c94a521e71a95b3193cf5fc2b6c4fe363 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Fri, 12 Apr 2024 17:54:29 +0200 Subject: [PATCH 25/99] fix `inv -e security-agent.sync-secl-win-pkg` on macOS (#24646) --- tasks/security_agent.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tasks/security_agent.py b/tasks/security_agent.py index a938b55323fa8..0af5fc538de7e 100644 --- a/tasks/security_agent.py +++ b/tasks/security_agent.py @@ -945,5 +945,8 @@ def sync_secl_win_pkg(ctx): fto = ffrom ctx.run(f"cp pkg/security/secl/model/{ffrom} pkg/security/seclwin/model/{fto}") - ctx.run(f"sed -i '/^\\/\\/go:build/d' pkg/security/seclwin/model/{fto}") + if sys.platform == "darwin": + ctx.run(f"sed -i '' '/^\\/\\/go:build/d' pkg/security/seclwin/model/{fto}") + else: + ctx.run(f"sed -i '/^\\/\\/go:build/d' pkg/security/seclwin/model/{fto}") ctx.run(f"gofmt -s -w pkg/security/seclwin/model/{fto}") From 911bd7d0c8574dcc5fc206c70d1bdf08afabe918 Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Fri, 12 Apr 2024 17:57:39 +0200 Subject: [PATCH 26/99] feat(ci): Remove usage of unused image (#24639) * feat(ci): Remove usage of unused image * Remove reference on unused variables --- .gitlab-ci.yml | 6 +----- .gitlab/check_merge/do_not_merge.yml | 2 -- .gitlab/source_test/linux.yml | 4 ++-- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index baac10e636852..748a75f3fa7f5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -170,10 +170,6 @@ variables: DATADOG_AGENT_ARMBUILDIMAGES: v31802788-2dee8fe9 DATADOG_AGENT_SYSPROBE_BUILDIMAGES_SUFFIX: "" DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v31802788-2dee8fe9 - DATADOG_AGENT_KERNEL_MATRIX_TESTING_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_KERNEL_MATRIX_TESTING_BUILDIMAGES: v31802788-2dee8fe9 - DATADOG_AGENT_NIKOS_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_NIKOS_BUILDIMAGES: v31802788-2dee8fe9 DATADOG_AGENT_BTF_GEN_BUILDIMAGES_SUFFIX: "" DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v31802788-2dee8fe9 DATADOG_AGENT_BUILDERS: v28719426-b6a4fd9 @@ -367,7 +363,7 @@ variables: - .gitlab/container_build/fakeintake.yml - .gitlab/dev_container_deploy/fakeintake.yml compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 - + # # Workflow rules # Rules used to define whether a pipeline should run, and with which variables diff --git a/.gitlab/check_merge/do_not_merge.yml b/.gitlab/check_merge/do_not_merge.yml index 877698f1d2881..b0405b10dd467 100644 --- a/.gitlab/check_merge/do_not_merge.yml +++ b/.gitlab/check_merge/do_not_merge.yml @@ -16,8 +16,6 @@ do-not-merge: [ ! -z "$DATADOG_AGENT_WINBUILDIMAGES_SUFFIX" ] || [ ! -z "$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX" ] || [ ! -z "$DATADOG_AGENT_SYSPROBE_BUILDIMAGES_SUFFIX" ] || - [ ! -z "$DATADOG_AGENT_KERNEL_MATRIX_TESTING_BUILDIMAGES_SUFFIX" ] || - [ ! -z "$DATADOG_AGENT_NIKOS_BUILDIMAGES_SUFFIX" ] || [ ! -z "$DATADOG_AGENT_BTF_GEN_BUILDIMAGES_SUFFIX" ] || [ ! -z "$TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX" ]; then echo "Pull request uses non-empty BUILDIMAGES_SUFFIX variable" diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml index 5304729be7918..561eb1a201077 100644 --- a/.gitlab/source_test/linux.yml +++ b/.gitlab/source_test/linux.yml @@ -157,7 +157,7 @@ tests_rpm-x64-py2: - !reference [.except_disable_unit_tests] - !reference [.except_mergequeue] - when: on_success - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64_testing$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] variables: PYTHON_RUNTIMES: '2' @@ -168,7 +168,7 @@ tests_rpm-x64-py3: extends: - .rtloader_tests - .linux_tests_with_upload - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64_testing$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] variables: PYTHON_RUNTIMES: '3' From c0e41ecacc17ae029f928f1051962f9cbd42bf5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9lian=20Raimbault?= <161456554+CelianR@users.noreply.github.com> Date: Fri, 12 Apr 2024 18:27:39 +0200 Subject: [PATCH 27/99] [fix] Benchmark gitlab import error (#24647) * [fix-benchmark-gitlab-import] Test * [fix-benchmark-gitlab-import] Test * [fix-benchmark-gitlab-import] Cleaned code --- .gitlab/benchmarks/benchmarks.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab/benchmarks/benchmarks.yml b/.gitlab/benchmarks/benchmarks.yml index 7152e9c0e2d86..b38fe9b56d894 100644 --- a/.gitlab/benchmarks/benchmarks.yml +++ b/.gitlab/benchmarks/benchmarks.yml @@ -10,6 +10,7 @@ benchmark: tags: ["team:apm-k8s-tweaked-metal-datadog-agent", "specific:true"] script: - export ARTIFACTS_DIR="$(pwd)/artifacts" && mkdir -p $ARTIFACTS_DIR + - pip install -r requirements.txt - ./test/benchmarks/apm_scripts/capture-hardware-software-info.sh - ./test/benchmarks/apm_scripts/run-benchmarks.sh - ./test/benchmarks/apm_scripts/analyze-results.sh From d4a7be27b1ca8e9fbad533d590682f793fbd429d Mon Sep 17 00:00:00 2001 From: Gustavo Caso Date: Fri, 12 Apr 2024 18:33:53 +0200 Subject: [PATCH 28/99] fix jmx and check command to have a valid settings component (#24635) --- cmd/agent/subcommands/jmx/command.go | 6 +++++- pkg/cli/subcommands/check/command.go | 7 +++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/cmd/agent/subcommands/jmx/command.go b/cmd/agent/subcommands/jmx/command.go index 239e1ea7392d1..1f29b02025a64 100644 --- a/cmd/agent/subcommands/jmx/command.go +++ b/cmd/agent/subcommands/jmx/command.go @@ -43,6 +43,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/log/logimpl" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/settings" + "github.com/DataDog/datadog-agent/comp/core/settings/settingsimpl" "github.com/DataDog/datadog-agent/comp/core/status" "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/workloadmeta" @@ -141,6 +142,10 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { workloadmeta.Module(), apiimpl.Module(), authtokenimpl.Module(), + // The jmx command do not have settings that change are runtime + // still, we need to pass it to ensure the API server is proprely initialized + settingsimpl.Module(), + fx.Supply(settings.Settings{}), // TODO(components): this is a temporary hack as the StartServer() method of the API package was previously called with nil arguments // This highlights the fact that the API Server created by JMX (through ExecJmx... function) should be different from the ones created // in others commands such as run. @@ -153,7 +158,6 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { fx.Provide(func() inventoryagent.Component { return nil }), fx.Provide(func() inventoryhost.Component { return nil }), fx.Provide(func() demultiplexer.Component { return nil }), - fx.Provide(func() settings.Component { return nil }), fx.Provide(func() inventorychecks.Component { return nil }), fx.Provide(func() packagesigning.Component { return nil }), fx.Provide(func() optional.Option[rcservice.Component] { return optional.NewNoneOption[rcservice.Component]() }), diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index 79870a8ad1bae..55dfdfb18b07e 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -44,6 +44,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/log/logimpl" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/settings" + "github.com/DataDog/datadog-agent/comp/core/settings/settingsimpl" "github.com/DataDog/datadog-agent/comp/core/status" "github.com/DataDog/datadog-agent/comp/core/status/statusimpl" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" @@ -208,7 +209,10 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { }, ), statusimpl.Module(), - + // The check command do not have settings that change are runtime + // still, we need to pass it to ensure the API server is proprely initialized + settingsimpl.Module(), + fx.Supply(settings.Settings{}), // TODO(components): this is a temporary hack as the StartServer() method of the API package was previously called with nil arguments // This highlights the fact that the API Server created by JMX (through ExecJmx... function) should be different from the ones created // in others commands such as run. @@ -217,7 +221,6 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { fx.Provide(func() replay.Component { return nil }), fx.Provide(func() pidmap.Component { return nil }), fx.Provide(func() serverdebug.Component { return nil }), - fx.Provide(func() settings.Component { return nil }), fx.Provide(func() host.Component { return nil }), fx.Provide(func() inventoryagent.Component { return nil }), fx.Provide(func() inventoryhost.Component { return nil }), From 44fb1ec7e8c4b0009ddc549b78af4c9217e8ddc6 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Fri, 12 Apr 2024 19:10:47 +0200 Subject: [PATCH 29/99] [CWS] make MacroEvaluator use the cached fields (#24652) --- pkg/security/secl/compiler/eval/macro.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/pkg/security/secl/compiler/eval/macro.go b/pkg/security/secl/compiler/eval/macro.go index b01e5e4e218d2..2135c8f178fba 100644 --- a/pkg/security/secl/compiler/eval/macro.go +++ b/pkg/security/secl/compiler/eval/macro.go @@ -169,11 +169,5 @@ func (m *Macro) GetFields() []Field { // GetFields - Returns all the Field that the MacroEvaluator handles func (m *MacroEvaluator) GetFields() []Field { - fields := make([]Field, len(m.fieldValues)) - i := 0 - for key := range m.fieldValues { - fields[i] = key - i++ - } - return fields + return m.fields } From c4d753e27ea247e0cbb665b80cc9bf1ff29f9a25 Mon Sep 17 00:00:00 2001 From: Baptiste Foy Date: Fri, 12 Apr 2024 19:35:40 +0200 Subject: [PATCH 30/99] upgrade(installer): Add APM injector package installation support (#24372) * fix(errors): Clearer errors * upgrade(updater): Add injector support * fix(updater): Use privileged command to write * fix(updater): Update catalog and support ld.so.preload not existing * upgrade(updater): Add injector docker support * chore(updater): Make writing to ld.so.preload safer and remove experiment * remove catalog changes for less conflicts * fix(installer): Cleanup APM injector on setup failure * fix(updater): Remove APM injector on purge * test(installer): Add E2E test for injector installation * fix(installer): Add agent config support for apm injector & fix test * fix(installer): Add error message to helper commands and tentatively fix e2e * fix(installer): Fix e2e tests * refactor(installer): Refactor injector installation * refactor(installer): Manipulate files in go instead of string manipulation * feat(updater): Add lock to package installation commands * fix(tests): Version is not resolved anymore in docker's daemon.json * fix(installer): Allow installation of the injector before the agent * address part of the review * chore(installer): Add more tests * fix(tests): Skip some tests to be able to merge --- pkg/updater/install.go | 34 ++- pkg/updater/service/apm_inject.go | 356 ++++++++++++++++++++++ pkg/updater/service/apm_inject_test.go | 155 ++++++++++ pkg/updater/service/apm_inject_windows.go | 19 ++ pkg/updater/service/datadog_agent.go | 20 ++ pkg/updater/service/docker.go | 196 ++++++++++++ pkg/updater/service/docker_test.go | 137 +++++++++ pkg/updater/service/helper/main.go | 30 +- pkg/updater/service/systemd.go | 58 +++- pkg/updater/service/systemd_test.go | 13 +- pkg/updater/updater.go | 7 +- test/new-e2e/tests/updater/docker.go | 91 ++++++ test/new-e2e/tests/updater/linux_test.go | 141 ++++++++- 13 files changed, 1233 insertions(+), 24 deletions(-) create mode 100644 pkg/updater/service/apm_inject.go create mode 100644 pkg/updater/service/apm_inject_test.go create mode 100644 pkg/updater/service/apm_inject_windows.go create mode 100644 pkg/updater/service/docker.go create mode 100644 pkg/updater/service/docker_test.go create mode 100644 test/new-e2e/tests/updater/docker.go diff --git a/pkg/updater/install.go b/pkg/updater/install.go index b18b000a4a354..092013a087f67 100644 --- a/pkg/updater/install.go +++ b/pkg/updater/install.go @@ -13,6 +13,7 @@ import ( "os" "path/filepath" "strings" + "sync" oci "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/types" @@ -27,11 +28,15 @@ const ( datadogPackageConfigLayerMediaType types.MediaType = "application/vnd.datadog.package.config.layer.v1.tar+zstd" datadogPackageMaxSize = 3 << 30 // 3GiB defaultConfigsDir = "/etc" + + packageDatadogAgent = "datadog-agent" + packageAPMInjector = "datadog-apm-inject" ) type installer struct { repositories *repository.Repositories configsDir string + installLock sync.Mutex } func newInstaller(repositories *repository.Repositories) *installer { @@ -56,10 +61,17 @@ func (i *installer) installStable(pkg string, version string, image oci.Image) e if err != nil { return fmt.Errorf("could not create repository: %w", err) } - if pkg == "datadog-agent" { + + i.installLock.Lock() + defer i.installLock.Unlock() + switch pkg { + case packageDatadogAgent: return service.SetupAgentUnits() + case packageAPMInjector: + return service.SetupAPMInjector() + default: + return nil } - return nil } func (i *installer) installExperiment(pkg string, version string, image oci.Image) error { @@ -100,19 +112,25 @@ func (i *installer) uninstallExperiment(pkg string) error { } func (i *installer) startExperiment(pkg string) error { - // TODO(arthur): currently we only support the datadog-agent package - if pkg != "datadog-agent" { + i.installLock.Lock() + defer i.installLock.Unlock() + switch pkg { + case packageDatadogAgent: + return service.StartAgentExperiment() + default: return nil } - return service.StartAgentExperiment() } func (i *installer) stopExperiment(pkg string) error { - // TODO(arthur): currently we only support the datadog-agent package - if pkg != "datadog-agent" { + i.installLock.Lock() + defer i.installLock.Unlock() + switch pkg { + case packageDatadogAgent: + return service.StopAgentExperiment() + default: return nil } - return service.StopAgentExperiment() } func extractPackageLayers(image oci.Image, configDir string, packageDir string) error { diff --git a/pkg/updater/service/apm_inject.go b/pkg/updater/service/apm_inject.go new file mode 100644 index 0000000000000..4982b61c2a826 --- /dev/null +++ b/pkg/updater/service/apm_inject.go @@ -0,0 +1,356 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +// Package service provides a way to interact with os services +package service + +import ( + "bytes" + "fmt" + "os" + "path" + "strings" + + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +var ( + injectorConfigPrefix = []byte("# BEGIN LD PRELOAD CONFIG") + injectorConfigSuffix = []byte("# END LD PRELOAD CONFIG") +) + +const ( + injectorConfigTemplate = ` +apm_config: + receiver_socket: %s +use_dogstatsd: true +dogstatsd_socket: %s +` + datadogConfigPath = "/etc/datadog-agent/datadog.yaml" + ldSoPreloadPath = "/etc/ld.so.preload" +) + +// SetupAPMInjector sets up the injector at bootstrap +func SetupAPMInjector() error { + // Enforce dd-installer is in the dd-agent group + if err := setInstallerAgentGroup(); err != nil { + return err + } + + installer := &apmInjectorInstaller{ + installPath: "/opt/datadog-packages/datadog-apm-inject/stable", + } + return installer.Setup() +} + +// RemoveAPMInjector removes the APM injector +func RemoveAPMInjector() error { + installer := &apmInjectorInstaller{ + installPath: "/opt/datadog-packages/datadog-apm-inject/stable", + } + return installer.Remove() +} + +type apmInjectorInstaller struct { + installPath string +} + +// Setup sets up the APM injector +func (a *apmInjectorInstaller) Setup() error { + var err error + defer func() { + if err != nil { + removeErr := a.Remove() + if removeErr != nil { + log.Warnf("Failed to remove APM injector: %v", removeErr) + } + } + }() + if err := a.setAgentConfig(); err != nil { + return err + } + if err := a.setRunPermissions(); err != nil { + return err + } + if err := a.setLDPreloadConfig(); err != nil { + return err + } + if err := a.setDockerConfig(); err != nil { + return err + } + return nil +} + +func (a *apmInjectorInstaller) Remove() error { + if err := a.deleteAgentConfig(); err != nil { + return err + } + if err := a.deleteLDPreloadConfig(); err != nil { + return err + } + if err := a.deleteDockerConfig(); err != nil { + return err + } + return nil +} + +func (a *apmInjectorInstaller) setRunPermissions() error { + return os.Chmod(path.Join(a.installPath, "inject", "run"), 0777) +} + +// setLDPreloadConfig adds preload options on /etc/ld.so.preload, overriding existing ones +func (a *apmInjectorInstaller) setLDPreloadConfig() error { + var ldSoPreload []byte + stat, err := os.Stat(ldSoPreloadPath) + if err == nil { + ldSoPreload, err = os.ReadFile(ldSoPreloadPath) + if err != nil { + return err + } + } else if !os.IsNotExist(err) { + return err + } + + newLdSoPreload, err := a.setLDPreloadConfigContent(ldSoPreload) + if err != nil { + return err + } + if bytes.Equal(ldSoPreload, newLdSoPreload) { + // No changes needed + return nil + } + + perms := os.FileMode(0644) + if stat != nil { + perms = stat.Mode() + } + err = os.WriteFile("/tmp/ld.so.preload.tmp", newLdSoPreload, perms) + if err != nil { + return err + } + + return executeCommand(string(replaceLDPreloadCommand)) +} + +// setLDPreloadConfigContent sets the content of the LD preload configuration +func (a *apmInjectorInstaller) setLDPreloadConfigContent(ldSoPreload []byte) ([]byte, error) { + launcherPreloadPath := path.Join(a.installPath, "inject", "launcher.preload.so") + + if strings.Contains(string(ldSoPreload), launcherPreloadPath) { + // If the line of interest is already in /etc/ld.so.preload, return fast + return ldSoPreload, nil + } + + // Append the launcher preload path to the file + if len(ldSoPreload) > 0 && ldSoPreload[len(ldSoPreload)-1] != '\n' { + ldSoPreload = append(ldSoPreload, '\n') + } + ldSoPreload = append(ldSoPreload, []byte(launcherPreloadPath+"\n")...) + return ldSoPreload, nil +} + +// deleteLDPreloadConfig removes the preload options from /etc/ld.so.preload +func (a *apmInjectorInstaller) deleteLDPreloadConfig() error { + var ldSoPreload []byte + stat, err := os.Stat(ldSoPreloadPath) + if err == nil { + ldSoPreload, err = os.ReadFile(ldSoPreloadPath) + if err != nil { + return err + } + } else if !os.IsNotExist(err) { + return err + } else { + return nil + } + + newLdSoPreload, err := a.deleteLDPreloadConfigContent(ldSoPreload) + if err != nil { + return err + } + if bytes.Equal(ldSoPreload, newLdSoPreload) { + // No changes needed + return nil + } + + perms := os.FileMode(0644) + if stat != nil { + perms = stat.Mode() + } + err = os.WriteFile("/tmp/ld.so.preload.tmp", newLdSoPreload, perms) + if err != nil { + return err + } + + return executeCommand(string(replaceLDPreloadCommand)) +} + +// deleteLDPreloadConfigContent deletes the content of the LD preload configuration +func (a *apmInjectorInstaller) deleteLDPreloadConfigContent(ldSoPreload []byte) ([]byte, error) { + launcherPreloadPath := path.Join(a.installPath, "inject", "launcher.preload.so") + + if !strings.Contains(string(ldSoPreload), launcherPreloadPath) { + // If the line of interest isn't there, return fast + return ldSoPreload, nil + } + + // Possible configurations of the preload path, order matters + replacementsToTest := [][]byte{ + []byte(launcherPreloadPath + "\n"), + []byte("\n" + launcherPreloadPath), + []byte(launcherPreloadPath + " "), + []byte(" " + launcherPreloadPath), + } + for _, replacement := range replacementsToTest { + ldSoPreloadNew := bytes.Replace(ldSoPreload, replacement, []byte{}, 1) + if !bytes.Equal(ldSoPreloadNew, ldSoPreload) { + return ldSoPreloadNew, nil + } + } + if bytes.Equal(ldSoPreload, []byte(launcherPreloadPath)) { + // If the line is the only one in the file without newlines, return an empty file + return []byte{}, nil + } + + return nil, fmt.Errorf("failed to remove %s from %s", launcherPreloadPath, ldSoPreloadPath) +} + +// setAgentConfig adds the agent configuration for the APM injector if it is not there already +// We assume that the agent file has been created by the installer's postinst script +// +// Note: This is not safe, as it assumes there were no changes to the agent configuration made without +// restart by the user. This means that the agent can crash on restart. This is a limitation of the current +// installer system and this will be replaced by a proper experiment when available. This is a temporary +// solution to allow the APM injector to be installed, and if the agent crashes, we try to detect it and +// restore the previous configuration +func (a *apmInjectorInstaller) setAgentConfig() (err error) { + err = backupAgentConfig() + if err != nil { + return err + } + defer func() { + if err != nil { + restoreErr := restoreAgentConfig() + if restoreErr != nil { + log.Warnf("Failed to restore agent config: %v", restoreErr) + } + } + }() + + content, err := os.ReadFile(datadogConfigPath) + if err != nil { + return err + } + + newContent := a.setAgentConfigContent(content) + if bytes.Equal(content, newContent) { + // No changes needed + return nil + } + + err = os.WriteFile(datadogConfigPath, newContent, 0644) + if err != nil { + return err + } + + err = restartTraceAgent() + return +} + +func (a *apmInjectorInstaller) setAgentConfigContent(content []byte) []byte { + runPath := path.Join(a.installPath, "inject", "run") + apmSocketPath := path.Join(runPath, "apm.socket") + dsdSocketPath := path.Join(runPath, "dsd.socket") + + if !bytes.Contains(content, injectorConfigPrefix) { + content = append(content, []byte("\n")...) + content = append(content, injectorConfigPrefix...) + content = append(content, []byte( + fmt.Sprintf(injectorConfigTemplate, apmSocketPath, dsdSocketPath), + )...) + content = append(content, injectorConfigSuffix...) + content = append(content, []byte("\n")...) + } + return content +} + +// deleteAgentConfig removes the agent configuration for the APM injector +func (a *apmInjectorInstaller) deleteAgentConfig() (err error) { + err = backupAgentConfig() + if err != nil { + return err + } + defer func() { + if err != nil { + restoreErr := restoreAgentConfig() + if restoreErr != nil { + log.Warnf("Failed to restore agent config: %v", restoreErr) + } + } + }() + + content, err := os.ReadFile(datadogConfigPath) + if err != nil { + return err + } + + newContent := a.deleteAgentConfigContent(content) + if bytes.Equal(content, newContent) { + // No changes needed + return nil + } + + err = os.WriteFile(datadogConfigPath, content, 0644) + if err != nil { + return err + } + + return restartTraceAgent() +} + +// deleteAgentConfigContent deletes the agent configuration for the APM injector +func (a *apmInjectorInstaller) deleteAgentConfigContent(content []byte) []byte { + start := bytes.Index(content, injectorConfigPrefix) + end := bytes.Index(content, injectorConfigSuffix) + len(injectorConfigSuffix) + if start == -1 || end == -1 || start >= end { + // Config not found + return content + } + + return append(content[:start], content[end:]...) +} + +// backupAgentConfig backs up the agent configuration +func backupAgentConfig() error { + return executeCommandStruct(privilegeCommand{ + Command: string(backupCommand), + Path: datadogConfigPath, + }) +} + +// restoreAgentConfig restores the agent configuration & restarts the agent +func restoreAgentConfig() error { + err := executeCommandStruct(privilegeCommand{ + Command: string(restoreCommand), + Path: datadogConfigPath, + }) + if err != nil { + return err + } + return restartTraceAgent() +} + +// restartTraceAgent restarts the trace agent, both stable and experimental +func restartTraceAgent() error { + if err := restartUnit("datadog-agent-trace.service"); err != nil { + return err + } + if err := restartUnit("datadog-agent-trace-exp.service"); err != nil { + return err + } + return nil +} diff --git a/pkg/updater/service/apm_inject_test.go b/pkg/updater/service/apm_inject_test.go new file mode 100644 index 0000000000000..813f800a0ee74 --- /dev/null +++ b/pkg/updater/service/apm_inject_test.go @@ -0,0 +1,155 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +// Package service provides a way to interact with os services +package service + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSetLDPreloadConfig(t *testing.T) { + a := &apmInjectorInstaller{ + installPath: "/tmp/stable", + } + + for input, expected := range map[string]string{ + // File doesn't exist + "": "/tmp/stable/inject/launcher.preload.so\n", + // File contains unrelated entries + "/abc/def/preload.so\n": "/abc/def/preload.so\n/tmp/stable/inject/launcher.preload.so\n", + // File contains unrelated entries with no newline + "/abc/def/preload.so": "/abc/def/preload.so\n/tmp/stable/inject/launcher.preload.so\n", + } { + output, err := a.setLDPreloadConfigContent([]byte(input)) + assert.Nil(t, err) + assert.Equal(t, expected, string(output)) + } +} + +func TestRemoveLDPreloadConfig(t *testing.T) { + a := &apmInjectorInstaller{ + installPath: "/tmp/stable", + } + + for input, expected := range map[string]string{ + // File doesn't exist + "": "", + // File only contains the entry to remove + "/tmp/stable/inject/launcher.preload.so\n": "", + // File only contains the entry to remove without newline + "/tmp/stable/inject/launcher.preload.so": "", + // File contains unrelated entries + "/abc/def/preload.so\n/tmp/stable/inject/launcher.preload.so\n": "/abc/def/preload.so\n", + // File contains unrelated entries at the end + "/tmp/stable/inject/launcher.preload.so\n/def/abc/preload.so": "/def/abc/preload.so", + // File contains multiple unrelated entries + "/abc/def/preload.so\n/tmp/stable/inject/launcher.preload.so\n/def/abc/preload.so": "/abc/def/preload.so\n/def/abc/preload.so", + // File contains unrelated entries with no newline (reformatted by customer?) + "/abc/def/preload.so /tmp/stable/inject/launcher.preload.so": "/abc/def/preload.so", + // File contains unrelated entries with no newline (reformatted by customer?) + "/abc/def/preload.so /tmp/stable/inject/launcher.preload.so /def/abc/preload.so": "/abc/def/preload.so /def/abc/preload.so", + // File contains unrelated entries with no newline (reformatted by customer?) + "/tmp/stable/inject/launcher.preload.so /def/abc/preload.so": "/def/abc/preload.so", + // File doesn't contain the entry to remove (removed by customer?) + "/abc/def/preload.so /def/abc/preload.so": "/abc/def/preload.so /def/abc/preload.so", + } { + output, err := a.deleteLDPreloadConfigContent([]byte(input)) + assert.Nil(t, err) + assert.Equal(t, expected, string(output)) + } + + // File is badly formatted (non-breaking space instead of space) + input := "/tmp/stable/inject/launcher.preload.so\u00a0/def/abc/preload.so" + output, err := a.deleteLDPreloadConfigContent([]byte(input)) + assert.NotNil(t, err) + assert.Equal(t, "", string(output)) +} + +func TestSetAgentConfig(t *testing.T) { + a := &apmInjectorInstaller{ + installPath: "/tmp/stable", + } + + for input, expected := range map[string]string{ + // File doesn't exist + "": ` +# BEGIN LD PRELOAD CONFIG +apm_config: + receiver_socket: /tmp/stable/inject/run/apm.socket +use_dogstatsd: true +dogstatsd_socket: /tmp/stable/inject/run/dsd.socket +# END LD PRELOAD CONFIG +`, + // File contains unrelated entries + `api_key: 000000000 +site: datad0g.com`: `api_key: 000000000 +site: datad0g.com +# BEGIN LD PRELOAD CONFIG +apm_config: + receiver_socket: /tmp/stable/inject/run/apm.socket +use_dogstatsd: true +dogstatsd_socket: /tmp/stable/inject/run/dsd.socket +# END LD PRELOAD CONFIG +`, + // File already contains the agent config + `# BEGIN LD PRELOAD CONFIG +apm_config: + receiver_socket: /tmp/stable/inject/run/apm.socket +use_dogstatsd: true +dogstatsd_socket: /tmp/stable/inject/run/dsd.socket +# END LD PRELOAD CONFIG`: `# BEGIN LD PRELOAD CONFIG +apm_config: + receiver_socket: /tmp/stable/inject/run/apm.socket +use_dogstatsd: true +dogstatsd_socket: /tmp/stable/inject/run/dsd.socket +# END LD PRELOAD CONFIG`, + } { + output := a.setAgentConfigContent([]byte(input)) + assert.Equal(t, expected, string(output)) + } +} + +func TestRemoveAgentConfig(t *testing.T) { + a := &apmInjectorInstaller{ + installPath: "/tmp/stable", + } + + for input, expected := range map[string]string{ + // File doesn't exist + "": "", + // File only contains the agent config + `# BEGIN LD PRELOAD CONFIG + apm_config: + receiver_socket: /tmp/stable/inject/run/apm.socket + use_dogstatsd: true + dogstatsd_socket: /tmp/stable/inject/run/dsd.socket + # END LD PRELOAD CONFIG`: "", + // File contains unrelated entries + `api_key: 000000000 +site: datad0g.com +# BEGIN LD PRELOAD CONFIG +apm_config: + receiver_socket: /tmp/stable/inject/run/apm.socket +use_dogstatsd: true +dogstatsd_socket: /tmp/stable/inject/run/dsd.socket +# END LD PRELOAD CONFIG +`: `api_key: 000000000 +site: datad0g.com + +`, + // File **only** contains unrelated entries somehow + `api_key: 000000000 +site: datad0g.com`: `api_key: 000000000 +site: datad0g.com`, + } { + output := a.deleteAgentConfigContent([]byte(input)) + assert.Equal(t, expected, string(output)) + } +} diff --git a/pkg/updater/service/apm_inject_windows.go b/pkg/updater/service/apm_inject_windows.go new file mode 100644 index 0000000000000..8bbb49c5c7095 --- /dev/null +++ b/pkg/updater/service/apm_inject_windows.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build windows + +// Package service provides a way to interact with os services +package service + +// SetupAPMInjector noop +func SetupAPMInjector() error { + return nil +} + +// RemoveAPMInjector noop +func RemoveAPMInjector() error { + return nil +} diff --git a/pkg/updater/service/datadog_agent.go b/pkg/updater/service/datadog_agent.go index e183f9f5f5229..8767e7a20d864 100644 --- a/pkg/updater/service/datadog_agent.go +++ b/pkg/updater/service/datadog_agent.go @@ -9,6 +9,9 @@ package service import ( + "os/exec" + "strings" + "github.com/DataDog/datadog-agent/pkg/util/installinfo" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -52,6 +55,10 @@ func SetupAgentUnits() (err error) { } }() + if err = setInstallerAgentGroup(); err != nil { + return + } + for _, unit := range stableUnits { if err = loadUnit(unit); err != nil { return @@ -132,3 +139,16 @@ func StartAgentExperiment() error { func StopAgentExperiment() error { return startUnit(agentUnit) } + +// setInstallerAgentGroup adds the dd-installer to the dd-agent group if it's not already in it +func setInstallerAgentGroup() error { + // Get groups of dd-installer + out, err := exec.Command("id", "-Gn", "dd-installer").Output() + if err != nil { + return err + } + if strings.Contains(string(out), "dd-agent") { + return nil + } + return executeCommand(string(addInstallerToAgentGroup)) +} diff --git a/pkg/updater/service/docker.go b/pkg/updater/service/docker.go new file mode 100644 index 0000000000000..c4cdb3fc0de20 --- /dev/null +++ b/pkg/updater/service/docker.go @@ -0,0 +1,196 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +// Package service provides a way to interact with os services +package service + +import ( + "bytes" + "encoding/json" + "os" + "os/exec" + "path" + + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +type dockerDaemonConfig map[string]interface{} + +const ( + tmpDockerDaemonPath = "/tmp/daemon.json.tmp" + dockerDaemonPath = "/etc/docker/daemon.json" +) + +// setDockerConfig sets up the docker daemon to use the APM injector +// even if docker isn't installed, to prepare for if it is installed +// later +func (a *apmInjectorInstaller) setDockerConfig() error { + // Create docker dir if it doesn't exist + err := executeCommand(createDockerDirCommand) + if err != nil { + return err + } + + var file []byte + stat, err := os.Stat(dockerDaemonPath) + if err == nil { + // Read the existing configuration + file, err = os.ReadFile(dockerDaemonPath) + if err != nil { + return err + } + } else if !os.IsNotExist(err) { + return err + } + + dockerConfigJSON, err := a.setDockerConfigContent(file) + if err != nil { + return err + } + + // Write the new configuration to a temporary file + perms := os.FileMode(0644) + if stat != nil { + perms = stat.Mode() + } + err = os.WriteFile(tmpDockerDaemonPath, dockerConfigJSON, perms) + if err != nil { + return err + } + + // Move the temporary file to the final location + err = executeCommand(string(replaceDockerCommand)) + if err != nil { + return err + } + + return restartDocker() +} + +// setDockerConfigContent sets the content of the docker daemon configuration +func (a *apmInjectorInstaller) setDockerConfigContent(previousContent []byte) ([]byte, error) { + dockerConfig := dockerDaemonConfig{} + + if len(previousContent) > 0 { + err := json.Unmarshal(previousContent, &dockerConfig) + if err != nil { + return nil, err + } + } + + if _, ok := dockerConfig["default-runtime"]; ok { + dockerConfig["default-runtime-backup"] = dockerConfig["default-runtime"] + } + dockerConfig["default-runtime"] = "dd-shim" + runtimes, ok := dockerConfig["runtimes"].(map[string]interface{}) + if !ok { + runtimes = map[string]interface{}{} + } + runtimes["dd-shim"] = map[string]interface{}{ + "path": path.Join(a.installPath, "inject", "auto_inject_runc"), + } + dockerConfig["runtimes"] = runtimes + + dockerConfigJSON, err := json.MarshalIndent(dockerConfig, "", " ") + if err != nil { + return nil, err + } + + return dockerConfigJSON, nil +} + +// deleteDockerConfig restores the docker daemon configuration +func (a *apmInjectorInstaller) deleteDockerConfig() error { + var file []byte + stat, err := os.Stat(dockerDaemonPath) + if err == nil { + // Read the existing configuration + file, err = os.ReadFile(dockerDaemonPath) + if err != nil { + return err + } + } else if os.IsNotExist(err) { + // If the file doesn't exist, there's nothing to do + return nil + } + + dockerConfigJSON, err := a.deleteDockerConfigContent(file) + if err != nil { + return err + } + + // Write the new configuration to a temporary file + perms := os.FileMode(0644) + if stat != nil { + perms = stat.Mode() + } + err = os.WriteFile(tmpDockerDaemonPath, dockerConfigJSON, perms) + if err != nil { + return err + } + + // Move the temporary file to the final location + err = executeCommand(string(replaceDockerCommand)) + if err != nil { + return err + } + return restartDocker() +} + +// deleteDockerConfigContent restores the content of the docker daemon configuration +func (a *apmInjectorInstaller) deleteDockerConfigContent(previousContent []byte) ([]byte, error) { + dockerConfig := dockerDaemonConfig{} + + if len(previousContent) > 0 { + err := json.Unmarshal(previousContent, &dockerConfig) + if err != nil { + return nil, err + } + } + + if _, ok := dockerConfig["default-runtime-backup"]; ok { + dockerConfig["default-runtime"] = dockerConfig["default-runtime-backup"] + delete(dockerConfig, "default-runtime-backup") + } else { + dockerConfig["default-runtime"] = "runc" + } + runtimes, ok := dockerConfig["runtimes"].(map[string]interface{}) + if !ok { + runtimes = map[string]interface{}{} + } + delete(runtimes, "dd-shim") + dockerConfig["runtimes"] = runtimes + + dockerConfigJSON, err := json.MarshalIndent(dockerConfig, "", " ") + if err != nil { + return nil, err + } + + return dockerConfigJSON, nil +} + +// restartDocker reloads the docker daemon if it exists +func restartDocker() error { + if !isDockerInstalled() { + log.Info("updater: docker is not installed, skipping reload") + return nil + } + return executeCommand(restartDockerCommand) +} + +// isDockerInstalled checks if docker is installed on the system +func isDockerInstalled() bool { + cmd := exec.Command("which", "docker") + var outb bytes.Buffer + cmd.Stdout = &outb + err := cmd.Run() + if err != nil { + log.Warn("updater: failed to check if docker is installed, assuming it isn't: ", err) + return false + } + return len(outb.String()) != 0 +} diff --git a/pkg/updater/service/docker_test.go b/pkg/updater/service/docker_test.go new file mode 100644 index 0000000000000..912a4d680a606 --- /dev/null +++ b/pkg/updater/service/docker_test.go @@ -0,0 +1,137 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +// Package service provides a way to interact with os services +package service + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSetDockerConfig(t *testing.T) { + a := &apmInjectorInstaller{ + installPath: "/tmp/stable", + } + + for input, expected := range map[string]string{ + // File doesn't exist + "": `{ + "default-runtime": "dd-shim", + "runtimes": { + "dd-shim": { + "path": "/tmp/stable/inject/auto_inject_runc" + } + } +}`, + // File contains unrelated entries + `{ + "cgroup-parent": "abc", + "raw-logs": false +}`: `{ + "cgroup-parent": "abc", + "default-runtime": "dd-shim", + "raw-logs": false, + "runtimes": { + "dd-shim": { + "path": "/tmp/stable/inject/auto_inject_runc" + } + } +}`, + // File has already overridden the default runtime + `{ + "default-runtime": "containerd", + "runtimes": { + "containerd": { + "path": "/usr/bin/containerd" + } + } +}`: `{ + "default-runtime": "dd-shim", + "default-runtime-backup": "containerd", + "runtimes": { + "containerd": { + "path": "/usr/bin/containerd" + }, + "dd-shim": { + "path": "/tmp/stable/inject/auto_inject_runc" + } + } +}`, + } { + output, err := a.setDockerConfigContent([]byte(input)) + assert.Nil(t, err) + assert.Equal(t, expected, string(output)) + } +} + +func TestRemoveDockerConfig(t *testing.T) { + a := &apmInjectorInstaller{ + installPath: "/tmp/stable", + } + + for input, expected := range map[string]string{ + // Empty file, shouldn't happen but still tested + "": `{ + "default-runtime": "runc", + "runtimes": {} +}`, + // File only contains the injected content + `{ + "default-runtime": "dd-shim", + "runtimes": { + "dd-shim": { + "path": "/tmp/stable/inject/auto_inject_runc" + } + } + }`: `{ + "default-runtime": "runc", + "runtimes": {} +}`, + // File contained unrelated entries + `{ + "cgroup-parent": "abc", + "default-runtime": "dd-shim", + "raw-logs": false, + "runtimes": { + "dd-shim": { + "path": "/tmp/stable/inject/auto_inject_runc" + } + } +}`: `{ + "cgroup-parent": "abc", + "default-runtime": "runc", + "raw-logs": false, + "runtimes": {} +}`, + // File had already overridden the default runtime + `{ + "default-runtime": "dd-shim", + "default-runtime-backup": "containerd", + "runtimes": { + "containerd": { + "path": "/usr/bin/containerd" + }, + "dd-shim": { + "path": "/tmp/stable/inject/auto_inject_runc" + } + } +}`: `{ + "default-runtime": "containerd", + "runtimes": { + "containerd": { + "path": "/usr/bin/containerd" + } + } +}`, + } { + output, err := a.deleteDockerConfigContent([]byte(input)) + assert.Nil(t, err) + assert.Equal(t, expected, string(output)) + } +} diff --git a/pkg/updater/service/helper/main.go b/pkg/updater/service/helper/main.go index 37f9ac13a06c6..a20a1de7a99c1 100644 --- a/pkg/updater/service/helper/main.go +++ b/pkg/updater/service/helper/main.go @@ -8,6 +8,7 @@ package main import ( + "bytes" "encoding/json" "fmt" "log" @@ -25,6 +26,8 @@ var ( installPath string systemdPath = "/lib/systemd/system" // todo load it at build time from omnibus pkgDir = "/opt/datadog-packages" + agentDir = "/etc/datadog-agent" + dockerDir = "/etc/docker" testSkipUID = "" ) @@ -36,6 +39,7 @@ type privilegeCommand struct { Command string `json:"command,omitempty"` Unit string `json:"unit,omitempty"` Path string `json:"path,omitempty"` + Content string `json:"content,omitempty"` } func isValidUnitChar(c rune) bool { @@ -66,6 +70,16 @@ func buildCommand(inputCommand privilegeCommand) (*exec.Cmd, error) { return exec.Command("ln", "-sf", "/opt/datadog-packages/datadog-agent/stable/bin/agent/agent", "/usr/bin/datadog-agent"), nil case "rm-agent-symlink": return exec.Command("rm", "-f", "/usr/bin/datadog-agent"), nil + case "create-docker-dir": + return exec.Command("mkdir", "-p", "/etc/docker"), nil + case "replace-docker": + return exec.Command("mv", "/tmp/daemon.json.tmp", "/etc/docker/daemon.json"), nil + case "restart-docker": + return exec.Command("systemctl", "restart", "docker"), nil + case "replace-ld-preload": + return exec.Command("mv", "/tmp/ld.so.preload.tmp", "/etc/ld.so.preload"), nil + case "add-installer-to-agent-group": + return exec.Command("usermod", "-aG", "dd-agent", "dd-installer"), nil default: return nil, fmt.Errorf("invalid command") } @@ -99,7 +113,7 @@ func buildPathCommand(inputCommand privilegeCommand) (*exec.Cmd, error) { if absPath != path || err != nil { return nil, fmt.Errorf("invalid path") } - if !strings.HasPrefix(path, pkgDir) { + if !strings.HasPrefix(path, pkgDir) && !strings.HasPrefix(path, agentDir) { return nil, fmt.Errorf("invalid path") } switch inputCommand.Command { @@ -107,6 +121,10 @@ func buildPathCommand(inputCommand privilegeCommand) (*exec.Cmd, error) { return exec.Command("chown", "-R", "dd-agent:dd-agent", path), nil case "rm": return exec.Command("rm", "-rf", path), nil + case "backup-file": + return exec.Command("cp", "-f", path, path+".bak"), nil + case "restore-file": + return exec.Command("mv", path+".bak", path), nil default: return nil, fmt.Errorf("invalid command") } @@ -121,7 +139,7 @@ func executeCommand() error { var pc privilegeCommand err := json.Unmarshal([]byte(inputCommand), &pc) if err != nil { - return fmt.Errorf("decoding command") + return fmt.Errorf("decoding command %s", inputCommand) } currentUser := syscall.Getuid() @@ -150,8 +168,14 @@ func executeCommand() error { }() } + commandErr := new(bytes.Buffer) + command.Stderr = commandErr log.Printf("Running command: %s", command.String()) - return command.Run() + err = command.Run() + if err != nil { + return fmt.Errorf("running command (%s): %s", err.Error(), commandErr.String()) + } + return nil } func main() { diff --git a/pkg/updater/service/systemd.go b/pkg/updater/service/systemd.go index 21f70d94b0fef..2f384b010ad2e 100644 --- a/pkg/updater/service/systemd.go +++ b/pkg/updater/service/systemd.go @@ -10,25 +10,58 @@ package service import ( "encoding/json" + "os" + "path" + + "github.com/DataDog/datadog-agent/pkg/util/log" ) type unitCommand string +var ( + systemdPath = "/lib/systemd/system" // todo load it at build time from omnibus +) + const ( - startCommand unitCommand = "start" - stopCommand unitCommand = "stop" - enableCommand unitCommand = "enable" - disableCommand unitCommand = "disable" - loadCommand unitCommand = "load-unit" - removeCommand unitCommand = "remove-unit" - systemdReloadCommand = `{"command":"systemd-reload"}` - adminExecutor = "datadog-updater-admin.service" + startCommand unitCommand = "start" + stopCommand unitCommand = "stop" + enableCommand unitCommand = "enable" + disableCommand unitCommand = "disable" + loadCommand unitCommand = "load-unit" + removeCommand unitCommand = "remove-unit" + addInstallerToAgentGroup unitCommand = "add-installer-to-agent-group" + backupCommand unitCommand = `backup-file` + restoreCommand unitCommand = `restore-file` + replaceDockerCommand = `{"command":"replace-docker"}` + restartDockerCommand = `{"command":"restart-docker"}` + createDockerDirCommand = `{"command":"create-docker-dir"}` + replaceLDPreloadCommand = `{"command":"replace-ld-preload"}` + systemdReloadCommand = `{"command":"systemd-reload"}` + adminExecutor = "datadog-updater-admin.service" ) type privilegeCommand struct { Command string `json:"command,omitempty"` Unit string `json:"unit,omitempty"` Path string `json:"path,omitempty"` + Content string `json:"content,omitempty"` +} + +// restartUnit restarts a systemd unit +func restartUnit(unit string) error { + // check that the unit exists first + if _, err := os.Stat(path.Join(systemdPath, unit)); os.IsNotExist(err) { + log.Infof("Unit %s does not exist, skipping restart", unit) + return nil + } + + if err := stopUnit(unit); err != nil { + return err + } + if err := startUnit(unit); err != nil { + return err + } + return nil } func stopUnit(unit string) error { @@ -68,3 +101,12 @@ func wrapUnitCommand(command unitCommand, unit string) string { } return string(rawJSON) } + +func executeCommandStruct(command privilegeCommand) error { + rawJSON, err := json.Marshal(command) + if err != nil { + return err + } + privilegeCommandJSON := string(rawJSON) + return executeCommand(privilegeCommandJSON) +} diff --git a/pkg/updater/service/systemd_test.go b/pkg/updater/service/systemd_test.go index 85f48151561e4..51212f6caa015 100644 --- a/pkg/updater/service/systemd_test.go +++ b/pkg/updater/service/systemd_test.go @@ -26,8 +26,8 @@ func TestInvalidCommands(t *testing.T) { // assert wrong commands for input, expected := range map[string]string{ // fail assert_command characters assertion - ";": "error: decoding command\n", - "&": "error: decoding command\n", + ";": "error: decoding command ;\n", + "&": "error: decoding command &\n", `{"command":"start", "unit":"does-not-exist"}`: "error: invalid unit\n", `{"command":"start", "unit":"datadog-//"}`: "error: invalid unit\n", `{"command":"does-not-exist", "unit":"datadog-"}`: "error: invalid command\n", @@ -55,4 +55,13 @@ func TestAssertWorkingCommands(t *testing.T) { assert.Equal(t, successErr, removeUnit("datadog-agent").Error()) assert.Equal(t, successErr, createAgentSymlink().Error()) assert.Equal(t, successErr, rmAgentSymlink().Error()) + assert.Equal(t, successErr, backupAgentConfig().Error()) + assert.Equal(t, successErr, restoreAgentConfig().Error()) + + a := &apmInjectorInstaller{ + installPath: "/tmp/stable", + } + assert.Equal(t, successErr, a.setLDPreloadConfig().Error()) + assert.Equal(t, successErr, a.setAgentConfig().Error()) + assert.Equal(t, successErr, a.setDockerConfig().Error()) } diff --git a/pkg/updater/updater.go b/pkg/updater/updater.go index 76ca92b255d31..766952a8ab57b 100644 --- a/pkg/updater/updater.go +++ b/pkg/updater/updater.go @@ -109,6 +109,9 @@ func Purge() { func purge(locksPath, repositoryPath string) { service.RemoveAgentUnits() + if err := service.RemoveAPMInjector(); err != nil { + log.Warnf("updater: could not remove APM injector: %v", err) + } cleanDir(locksPath, os.RemoveAll) cleanDir(repositoryPath, service.RemoveAll) } @@ -220,7 +223,7 @@ func (u *updaterImpl) BootstrapDefault(ctx context.Context, pkg string) (err err stablePackage, ok := u.catalog.getDefaultPackage(u.bootstrapVersions, pkg, runtime.GOARCH, runtime.GOOS) if !ok { - return fmt.Errorf("could not get default package %s for %s, %s", pkg, runtime.GOARCH, runtime.GOOS) + return fmt.Errorf("could not get default package '%s' for arch '%s' and platform '%s'", pkg, runtime.GOARCH, runtime.GOOS) } return u.boostrapPackage(ctx, stablePackage.URL, stablePackage.Name, stablePackage.Version) } @@ -236,7 +239,7 @@ func (u *updaterImpl) BootstrapVersion(ctx context.Context, pkg string, version stablePackage, ok := u.catalog.getPackage(pkg, version, runtime.GOARCH, runtime.GOOS) if !ok { - return fmt.Errorf("could not get package %s version %s for %s, %s", pkg, version, runtime.GOARCH, runtime.GOOS) + return fmt.Errorf("could not get package '%s' version '%s' for arch '%s' and platform '%s'", pkg, version, runtime.GOARCH, runtime.GOOS) } return u.boostrapPackage(ctx, stablePackage.URL, stablePackage.Name, stablePackage.Version) } diff --git a/test/new-e2e/tests/updater/docker.go b/test/new-e2e/tests/updater/docker.go new file mode 100644 index 0000000000000..3762f53f589d4 --- /dev/null +++ b/test/new-e2e/tests/updater/docker.go @@ -0,0 +1,91 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package updater contains tests for the updater package +package updater + +import ( + "testing" + "time" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" + "github.com/DataDog/test-infra-definitions/components/os" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// installDocker installs docker on the host +func installDocker(distro os.Descriptor, t *testing.T, host *components.RemoteHost) { + switch distro { + case os.UbuntuDefault: + _, err := host.WriteFile("/tmp/install-docker.sh", []byte(` +sudo apt-get update +sudo apt-get install ca-certificates curl +sudo install -m 0755 -d /etc/apt/keyrings +sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc +sudo chmod a+r /etc/apt/keyrings/docker.asc +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo apt-get update +sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + `)) + require.Nil(t, err) + host.MustExecute(`sudo chmod +x /tmp/install-docker.sh`) + host.MustExecute(`sudo /tmp/install-docker.sh`) + err = host.Remove("/tmp/install-docker.sh") + require.Nil(t, err) + case os.DebianDefault: + _, err := host.WriteFile("/tmp/install-docker.sh", []byte(` +sudo apt-get update +sudo apt-get install ca-certificates curl +sudo install -m 0755 -d /etc/apt/keyrings +sudo curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc +sudo chmod a+r /etc/apt/keyrings/docker.asc + +# Add the repository to Apt sources: +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo apt-get update +sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + `)) + require.Nil(t, err) + host.MustExecute(`sudo chmod +x /tmp/install-docker.sh`) + host.MustExecute(`sudo /tmp/install-docker.sh`) + err = host.Remove("/tmp/install-docker.sh") + require.Nil(t, err) + case os.CentOSDefault, os.RedHatDefault: + _, err := host.WriteFile("/tmp/install-docker.sh", []byte(` +sudo yum install -y yum-utils +sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo +sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +sudo systemctl start docker + `)) + require.Nil(t, err) + host.MustExecute(`sudo chmod +x /tmp/install-docker.sh`) + host.MustExecute(`sudo /tmp/install-docker.sh`) + err = host.Remove("/tmp/install-docker.sh") + require.Nil(t, err) + default: + t.Fatalf("unsupported distro: %s", distro.String()) + } +} + +// launchJavaDockerContainer launches a small Java HTTP server in a docker container +// and make a call to it +func launchJavaDockerContainer(t *testing.T, host *components.RemoteHost) { + host.MustExecute(`sudo docker run -d -p8887:8888 baptistefoy702/message-server:latest`) + // for i := 0; i < 10; i++ { + assert.Eventually(t, + func() bool { + _, err := host.Execute(`curl -m 1 localhost:8887/messages`) + return err == nil + }, 10*time.Second, 100*time.Millisecond, + ) + // } +} diff --git a/test/new-e2e/tests/updater/linux_test.go b/test/new-e2e/tests/updater/linux_test.go index 4d49c168ef955..59280eaf1f0f7 100644 --- a/test/new-e2e/tests/updater/linux_test.go +++ b/test/new-e2e/tests/updater/linux_test.go @@ -12,6 +12,7 @@ import ( "regexp" "strings" "testing" + "time" "github.com/DataDog/test-infra-definitions/components/os" "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" @@ -37,12 +38,14 @@ const ( type vmUpdaterSuite struct { e2e.BaseSuite[environments.Host] packageManager string + distro os.Descriptor + arch os.Architecture } func runTest(t *testing.T, pkgManager string, arch os.Architecture, distro os.Descriptor) { reg := regexp.MustCompile(`[^a-zA-Z0-9_\-.]`) testName := reg.ReplaceAllString(distro.String()+"-"+string(arch), "_") - e2e.Run(t, &vmUpdaterSuite{packageManager: pkgManager}, e2e.WithProvisioner(awshost.ProvisionerNoFakeIntake( + e2e.Run(t, &vmUpdaterSuite{packageManager: pkgManager, distro: distro, arch: arch}, e2e.WithProvisioner(awshost.ProvisionerNoFakeIntake( awshost.WithUpdater(), awshost.WithEC2InstanceOptions(ec2.WithOSArch(distro, arch)), )), @@ -202,6 +205,142 @@ func (v *vmUpdaterSuite) TestPurgeAndInstallAgent() { } } +func (v *vmUpdaterSuite) TestPurgeAndInstallAPMInjector() { + // Temporarily disable CentOS & Redhat, as there is a bug in the APM injector + if v.distro == os.CentOSDefault || v.distro == os.RedHatDefault { + v.T().Skip("APM injector not available for CentOS or RedHat yet") + } + if v.distro == os.DebianDefault || v.distro == os.UbuntuDefault && v.arch == os.AMD64Arch { + // TODO (baptiste): Fix test + v.T().Skip("Test has been temporarily disabled") + } + + host := v.Env().RemoteHost + + /////////////////// + // Setup machine // + /////////////////// + + host.MustExecute(fmt.Sprintf("sudo %v/bin/installer/installer purge", bootUpdaterDir)) + // Install docker + installDocker(v.distro, v.T(), host) + defer func() { + // Best effort to stop any running container at the end of the test + host.Execute(`sudo docker ps -aq | xargs sudo docker stop | xargs sudo docker rm`) + }() + + ///////////////////////// + // Check initial state // + ///////////////////////// + + // packages dir exists; but there are no packages installed + host.MustExecute(`test -d /opt/datadog-packages`) + _, err := host.Execute(`test -d /opt/datadog-packages/datadog-apm-inject`) + require.NotNil(v.T(), err) + _, err = host.Execute(`test -d /opt/datadog-packages/datadog-agent`) + require.NotNil(v.T(), err) + _, err = host.Execute(`test -d /opt/datadog-packages/datadog-apm-library-java`) + require.NotNil(v.T(), err) + + // /etc/ld.so.preload does not contain the injector + _, err = host.Execute(`grep "/opt/datadog-packages/datadog-apm-inject" /etc/ld.so.preload`) + require.NotNil(v.T(), err) + + // docker daemon does not contain the injector + _, err = host.Execute(`grep "/opt/datadog-packages/datadog-apm-inject" /etc/docker/daemon.json`) + require.NotNil(v.T(), err) + + //////////////////////// + // Bootstrap packages // + //////////////////////// + + host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://docker.io/datadog/agent-package-dev:7.54.0-devel.git.247.f92fbc1.pipeline.31778392-1"`, bootUpdaterDir)) + host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://docker.io/datadog/apm-library-java-package-dev:1.32.0-SNAPSHOT-8708864e8e-pipeline.30373268.beta.8708864e-1"`, bootUpdaterDir)) + host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://docker.io/datadog/apm-inject-package-dev:0.12.3-dev.bddec85.glci481808135.g8acdc698-1"`, bootUpdaterDir)) + + //////////////////////////////// + // Check post-bootstrap state // + //////////////////////////////// + + // assert packages dir exist + host.MustExecute(`test -L /opt/datadog-packages/datadog-agent/stable`) + host.MustExecute(`test -L /opt/datadog-packages/datadog-apm-library-java/stable`) + host.MustExecute(`test -L /opt/datadog-packages/datadog-apm-inject/stable`) + + // assert /etc/ld.so.preload contains the injector + res, err := host.Execute(`grep "/opt/datadog-packages/datadog-apm-inject" /etc/ld.so.preload`) + require.Nil(v.T(), err) + require.Equal(v.T(), "/opt/datadog-packages/datadog-apm-inject/stable/inject/launcher.preload.so\n", res) + + // assert docker daemon contains the injector (removing blank spaces for easier comparison) + res, err = host.Execute(`grep "/opt/datadog-packages/datadog-apm-inject" /etc/docker/daemon.json | sed -re 's/^[[:blank:]]+|[[:blank:]]+$//g' -e 's/[[:blank:]]+/ /g'`) + require.Nil(v.T(), err) + require.Equal(v.T(), "\"path\": \"/opt/datadog-packages/datadog-apm-inject/stable/inject/auto_inject_runc\"\n", res) + + // assert agent config has been changed + raw, err := host.ReadFile("/etc/datadog-agent/datadog.yaml") + require.Nil(v.T(), err) + require.True(v.T(), strings.Contains(string(raw), "# BEGIN LD PRELOAD CONFIG"), "missing LD_PRELOAD config, config:\n%s", string(raw)) + + // assert agent is running + host.MustExecute("sudo systemctl status datadog-agent.service") + + _, err = host.Execute("sudo systemctl status datadog-agent-trace.service") + require.Nil(v.T(), err) + + // assert required files exist + requiredFiles := []string{ + "auto_inject_runc", + "launcher.preload.so", + "ld.so.preload", + "musl-launcher.preload.so", + "process", + } + for _, file := range requiredFiles { + host.MustExecute(fmt.Sprintf("test -f /opt/datadog-packages/datadog-apm-inject/stable/inject/%s", file)) + } + + // assert file ownerships + injectorDir := "/opt/datadog-packages/datadog-apm-inject" + require.Equal(v.T(), "dd-installer\n", host.MustExecute(`stat -c "%U" `+injectorDir)) + require.Equal(v.T(), "dd-installer\n", host.MustExecute(`stat -c "%G" `+injectorDir)) + require.Equal(v.T(), "drwxr-xr-x\n", host.MustExecute(`stat -c "%A" `+injectorDir)) + require.Equal(v.T(), "1\n", host.MustExecute(`sudo ls -l /opt/datadog-packages/datadog-apm-inject | awk '$9 != "stable" && $3 == "dd-installer" && $4 == "dd-installer"' | wc -l`)) + + ///////////////////////////////////// + // Check injection with a real app // + ///////////////////////////////////// + + launchJavaDockerContainer(v.T(), host) + + // check "Dropping Payload due to non-retryable error" in trace agent logs + // as we don't have an API key the payloads can't be flushed successfully, + // but this log indicates that the trace agent managed to receive the payload + require.Eventually(v.T(), func() bool { + _, err := host.Execute(`cat /var/log/datadog/trace-agent.log | grep "Dropping Payload due to non-retryable error"`) + return err == nil + }, 30*time.Second, 100*time.Millisecond) + + /////////////////////// + // Check purge state // + /////////////////////// + + host.MustExecute(fmt.Sprintf("sudo %v/bin/installer/installer purge", bootUpdaterDir)) + + _, err = host.Execute(`test -d /opt/datadog-packages/datadog-apm-inject`) + require.NotNil(v.T(), err) + _, err = host.Execute(`test -d /opt/datadog-packages/datadog-agent`) + require.NotNil(v.T(), err) + _, err = host.Execute(`test -d /opt/datadog-packages/datadog-apm-library-java`) + require.NotNil(v.T(), err) + _, err = host.Execute(`grep "/opt/datadog-packages/datadog-apm-inject" /etc/ld.so.preload`) + require.NotNil(v.T(), err) + _, err = host.Execute(`grep "/opt/datadog-packages/datadog-apm-inject" /etc/docker/daemon.json`) + require.NotNil(v.T(), err) + _, err = host.Execute(`test -f /etc/docker/daemon.json.bak`) + require.NotNil(v.T(), err) +} + func assertInstallMethod(v *vmUpdaterSuite, t *testing.T, host *components.RemoteHost) { rawYaml, err := host.ReadFile(filepath.Join(confDir, "install_info")) assert.Nil(t, err) From 1f33b617699c27397ea2b6f668c686f85d83f9c6 Mon Sep 17 00:00:00 2001 From: Dylan Yang Date: Fri, 12 Apr 2024 16:03:46 -0400 Subject: [PATCH 31/99] [SVLS-4142] Create a Lambda span on timeouts (#21481) * create a Lambda span on timeouts * don't create a cold start span when the runtime restarts during timeouts * fix linting * fix test * lint: rename name variables * lint again * small fixes * refactor timeout span logic * add mutexes * fix span completed check * revert refactor * remove cold start span changes * use mutex over rwmutex * test routes * add comment + update tests * test endExecutionSpan * add serverless.go test * add test /hello for route * only set span incomplete when /startInvocation has been hit * time out -> timeout Co-authored-by: Duncan Harvey <35278470+duncanpharvey@users.noreply.github.com> --------- Co-authored-by: Duncan Harvey <35278470+duncanpharvey@users.noreply.github.com> --- cmd/serverless/main.go | 2 +- pkg/serverless/daemon/daemon.go | 30 +++++ pkg/serverless/daemon/routes.go | 4 + pkg/serverless/daemon/routes_test.go | 54 ++++++++ .../invocationlifecycle/invocation_details.go | 1 + .../invocationlifecycle/lifecycle.go | 53 ++++---- .../invocationlifecycle/lifecycle_test.go | 117 ++++++++++++++++++ pkg/serverless/invocationlifecycle/trace.go | 43 +++++-- .../invocationlifecycle/trace_test.go | 48 +++++++ pkg/serverless/serverless.go | 22 ++++ pkg/serverless/serverless_test.go | 40 ++++++ 11 files changed, 379 insertions(+), 35 deletions(-) diff --git a/cmd/serverless/main.go b/cmd/serverless/main.go index b974114acbe6e..dd13654bc35be 100644 --- a/cmd/serverless/main.go +++ b/cmd/serverless/main.go @@ -287,7 +287,7 @@ func runAgent() { ExtraTags: serverlessDaemon.ExtraTags, Demux: serverlessDaemon.MetricAgent.Demux, ProcessTrace: ta.Process, - DetectLambdaLibrary: func() bool { return serverlessDaemon.LambdaLibraryDetected }, + DetectLambdaLibrary: serverlessDaemon.IsLambdaLibraryDetected, InferredSpansEnabled: inferredspan.IsInferredSpansEnabled(), } diff --git a/pkg/serverless/daemon/daemon.go b/pkg/serverless/daemon/daemon.go index 21386b9653449..58bc1ac85190e 100644 --- a/pkg/serverless/daemon/daemon.go +++ b/pkg/serverless/daemon/daemon.go @@ -66,6 +66,15 @@ type Daemon struct { // LambdaLibraryDetected represents whether the Datadog Lambda Library was detected in the environment LambdaLibraryDetected bool + // LambdaLibraryStateLock keeps track of whether the Datadog Lambda Library was detected in the environment + LambdaLibraryStateLock sync.Mutex + + // executionSpanIncomplete indicates whether the Lambda span has been completed by the Extension + executionSpanIncomplete bool + + // ExecutionSpanStateLock keeps track of whether the serverless Invocation routes have been hit to complete the execution span + ExecutionSpanStateLock sync.Mutex + // runtimeStateMutex is used to ensure that modifying the state of the runtime is thread-safe runtimeStateMutex sync.Mutex @@ -435,3 +444,24 @@ func (d *Daemon) setTraceTags(tagMap map[string]string) bool { } return false } + +// IsLambdaLibraryDetected returns if the Lambda Library is in use +func (d *Daemon) IsLambdaLibraryDetected() bool { + d.LambdaLibraryStateLock.Lock() + defer d.LambdaLibraryStateLock.Unlock() + return d.LambdaLibraryDetected +} + +// IsExecutionSpanIncomplete checks if the Lambda execution span was finished +func (d *Daemon) IsExecutionSpanIncomplete() bool { + d.ExecutionSpanStateLock.Lock() + defer d.ExecutionSpanStateLock.Unlock() + return d.executionSpanIncomplete +} + +// SetExecutionSpanIncomplete keeps track of whether the Extension completed the Lambda execution span +func (d *Daemon) SetExecutionSpanIncomplete(spanIncomplete bool) { + d.ExecutionSpanStateLock.Lock() + defer d.ExecutionSpanStateLock.Unlock() + d.executionSpanIncomplete = spanIncomplete +} diff --git a/pkg/serverless/daemon/routes.go b/pkg/serverless/daemon/routes.go index 1b2379d8e1822..93e113782dbb8 100644 --- a/pkg/serverless/daemon/routes.go +++ b/pkg/serverless/daemon/routes.go @@ -26,6 +26,8 @@ type Hello struct { //nolint:revive // TODO(SERV) Fix revive linter func (h *Hello) ServeHTTP(w http.ResponseWriter, r *http.Request) { log.Debug("Hit on the serverless.Hello route.") + h.daemon.LambdaLibraryStateLock.Lock() + defer h.daemon.LambdaLibraryStateLock.Unlock() h.daemon.LambdaLibraryDetected = true } @@ -53,6 +55,7 @@ type StartInvocation struct { func (s *StartInvocation) ServeHTTP(w http.ResponseWriter, r *http.Request) { log.Debug("Hit on the serverless.StartInvocation route.") + s.daemon.SetExecutionSpanIncomplete(true) startTime := time.Now() reqBody, err := io.ReadAll(r.Body) if err != nil { @@ -86,6 +89,7 @@ type EndInvocation struct { func (e *EndInvocation) ServeHTTP(w http.ResponseWriter, r *http.Request) { log.Debug("Hit on the serverless.EndInvocation route.") + e.daemon.SetExecutionSpanIncomplete(false) endTime := time.Now() ecs := e.daemon.ExecutionContext.GetCurrentState() coldStartTags := e.daemon.ExecutionContext.GetColdStartTagsForRequestID(ecs.LastRequestID) diff --git a/pkg/serverless/daemon/routes_test.go b/pkg/serverless/daemon/routes_test.go index eab3e09e6be02..0cdae0c594057 100644 --- a/pkg/serverless/daemon/routes_test.go +++ b/pkg/serverless/daemon/routes_test.go @@ -161,6 +161,30 @@ func TestTraceContext(t *testing.T) { } } +func TestHello(t *testing.T) { + assert := assert.New(t) + + port := testutil.FreeTCPPort(t) + d := StartDaemon(fmt.Sprintf("127.0.0.1:%d", port)) + time.Sleep(100 * time.Millisecond) + defer d.Stop() + d.InvocationProcessor = &invocationlifecycle.LifecycleProcessor{ + ExtraTags: d.ExtraTags, + Demux: nil, + ProcessTrace: nil, + DetectLambdaLibrary: d.IsLambdaLibraryDetected, + } + client := &http.Client{} + body := bytes.NewBuffer([]byte(`{}`)) + request, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/lambda/hello", port), body) + assert.Nil(err) + assert.False(d.IsLambdaLibraryDetected()) + response, err := client.Do(request) + assert.Nil(err) + response.Body.Close() + assert.True(d.IsLambdaLibraryDetected()) +} + func TestStartEndInvocationSpanParenting(t *testing.T) { port := testutil.FreeTCPPort(t) d := StartDaemon(fmt.Sprintf("127.0.0.1:%d", port)) @@ -332,6 +356,36 @@ func TestStartEndInvocationSpanParenting(t *testing.T) { } } +func TestStartEndInvocationIsExecutionSpanIncomplete(t *testing.T) { + assert := assert.New(t) + port := testutil.FreeTCPPort(t) + d := StartDaemon(fmt.Sprintf("127.0.0.1:%d", port)) + time.Sleep(100 * time.Millisecond) + defer d.Stop() + + m := &mockLifecycleProcessor{} + d.InvocationProcessor = m + + client := &http.Client{} + body := bytes.NewBuffer([]byte(`{"key": "value"}`)) + startReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/lambda/start-invocation", port), body) + assert.Nil(err) + startResp, err := client.Do(startReq) + assert.Nil(err) + startResp.Body.Close() + assert.True(m.OnInvokeStartCalled) + assert.True(d.IsExecutionSpanIncomplete()) + + body = bytes.NewBuffer([]byte(`{}`)) + endReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/lambda/end-invocation", port), body) + assert.Nil(err) + endResp, err := client.Do(endReq) + assert.Nil(err) + endResp.Body.Close() + assert.True(m.OnInvokeEndCalled) + assert.False(d.IsExecutionSpanIncomplete()) +} + // Helper function for reading test file func getEventFromFile(filename string) string { event, err := os.ReadFile("../trace/testdata/event_samples/" + filename) diff --git a/pkg/serverless/invocationlifecycle/invocation_details.go b/pkg/serverless/invocationlifecycle/invocation_details.go index bd0e285f8d377..0ad7d0a98b8ea 100644 --- a/pkg/serverless/invocationlifecycle/invocation_details.go +++ b/pkg/serverless/invocationlifecycle/invocation_details.go @@ -27,6 +27,7 @@ type InvocationStartDetails struct { type InvocationEndDetails struct { EndTime time.Time IsError bool + IsTimeout bool RequestID string ResponseRawPayload []byte ColdStart bool diff --git a/pkg/serverless/invocationlifecycle/lifecycle.go b/pkg/serverless/invocationlifecycle/lifecycle.go index d8c470b187db5..90e931767cef1 100644 --- a/pkg/serverless/invocationlifecycle/lifecycle.go +++ b/pkg/serverless/invocationlifecycle/lifecycle.go @@ -281,32 +281,14 @@ func (lp *LifecycleProcessor) OnInvokeEnd(endDetails *InvocationEndDetails) { spans = append(spans, span) if lp.InferredSpansEnabled { - log.Debug("[lifecycle] Attempting to complete the inferred span") - log.Debugf("[lifecycle] Inferred span context: %+v", lp.GetInferredSpan().Span) - if lp.GetInferredSpan().Span.Start != 0 { - span0, span1 := lp.requestHandler.inferredSpans[0], lp.requestHandler.inferredSpans[1] - if span1 != nil { - log.Debug("[lifecycle] Completing a secondary inferred span") - lp.setParentIDForMultipleInferredSpans() - span1.AddTagToInferredSpan("http.status_code", statusCode) - span1.AddTagToInferredSpan("peer.service", lp.GetServiceName()) - span := lp.completeInferredSpan(span1, lp.getInferredSpanStart(), endDetails.IsError) - spans = append(spans, span) - log.Debug("[lifecycle] The secondary inferred span attributes are %v", lp.requestHandler.inferredSpans[1]) - } - span0.AddTagToInferredSpan("http.status_code", statusCode) - span0.AddTagToInferredSpan("peer.service", lp.GetServiceName()) - span := lp.completeInferredSpan(span0, endDetails.EndTime, endDetails.IsError) - spans = append(spans, span) - log.Debugf("[lifecycle] The inferred span attributes are: %v", lp.GetInferredSpan()) - } else { - log.Debug("[lifecyle] Failed to complete inferred span due to a missing start time. Please check that the event payload was received with the appropriate data") - } + inferredSpans := lp.endInferredSpan(statusCode, endDetails.EndTime, endDetails.IsError) + spans = append(spans, inferredSpans...) } lp.processTrace(spans) } - if endDetails.IsError { + // We don't submit an error metric on timeouts since it should have already been submitted when the Extension receives a SHUTDOWN event + if endDetails.IsError && !endDetails.IsTimeout { serverlessMetrics.SendErrorsEnhancedMetric( lp.ExtraTags.Tags, endDetails.EndTime, lp.Demux, ) @@ -385,3 +367,30 @@ func (lp *LifecycleProcessor) setParentIDForMultipleInferredSpans() { lp.requestHandler.inferredSpans[1].Span.ParentID = lp.requestHandler.inferredSpans[0].Span.ParentID lp.requestHandler.inferredSpans[0].Span.ParentID = lp.requestHandler.inferredSpans[1].Span.SpanID } + +// endInferredSpan attempts to complete any inferred spans and send them to intake +func (lp *LifecycleProcessor) endInferredSpan(statusCode string, endTime time.Time, isError bool) []*pb.Span { + spans := make([]*pb.Span, 0, 2) + log.Debug("[lifecycle] Attempting to complete the inferred span") + log.Debugf("[lifecycle] Inferred span context: %+v", lp.GetInferredSpan().Span) + if lp.GetInferredSpan().Span.Start != 0 { + span0, span1 := lp.requestHandler.inferredSpans[0], lp.requestHandler.inferredSpans[1] + if span1 != nil { + log.Debug("[lifecycle] Completing a secondary inferred span") + lp.setParentIDForMultipleInferredSpans() + span1.AddTagToInferredSpan("http.status_code", statusCode) + span1.AddTagToInferredSpan("peer.service", lp.GetServiceName()) + span := lp.completeInferredSpan(span1, lp.getInferredSpanStart(), isError) + spans = append(spans, span) + log.Debug("[lifecycle] The secondary inferred span attributes are %v", lp.requestHandler.inferredSpans[1]) + } + span0.AddTagToInferredSpan("http.status_code", statusCode) + span0.AddTagToInferredSpan("peer.service", lp.GetServiceName()) + span := lp.completeInferredSpan(span0, endTime, isError) + spans = append(spans, span) + log.Debugf("[lifecycle] The inferred span attributes are: %v", lp.GetInferredSpan()) + } else { + log.Debug("[lifecyle] Failed to complete inferred span due to a missing start time. Please check that the event payload was received with the appropriate data") + } + return spans +} diff --git a/pkg/serverless/invocationlifecycle/lifecycle_test.go b/pkg/serverless/invocationlifecycle/lifecycle_test.go index e33d574035dd7..b7ee5aaa3057d 100644 --- a/pkg/serverless/invocationlifecycle/lifecycle_test.go +++ b/pkg/serverless/invocationlifecycle/lifecycle_test.go @@ -379,6 +379,123 @@ func TestCompleteInferredSpanWithOutStartTime(t *testing.T) { completedInferredSpan := tracePayload.TracerPayload.Chunks[0].Spans[0] assert.Equal(t, startInvocationTime.UnixNano(), completedInferredSpan.Start) } + +func TestTimeoutExecutionSpan(t *testing.T) { + t.Setenv(functionNameEnvVar, "my-function") + t.Setenv("DD_SERVICE", "mock-lambda-service") + + extraTags := &logs.Tags{ + Tags: []string{"functionname:test-function"}, + } + demux := createDemultiplexer(t) + defer demux.Stop(false) + mockDetectLambdaLibrary := func() bool { return false } + + var tracePayload *api.Payload + mockProcessTrace := func(payload *api.Payload) { + tracePayload = payload + } + + testProcessor := LifecycleProcessor{ + ExtraTags: extraTags, + ProcessTrace: mockProcessTrace, + DetectLambdaLibrary: mockDetectLambdaLibrary, + Demux: demux, + InferredSpansEnabled: true, + } + startTime := time.Now() + duration := 1 * time.Second + endTime := startTime.Add(duration) + startDetails := InvocationStartDetails{ + StartTime: time.Now(), + InvokeEventRawPayload: []byte(`{}`), + InvokedFunctionARN: "arn:aws:lambda:us-east-1:123456789012:function:my-function", + } + testProcessor.OnInvokeStart(&startDetails) + + timeoutCtx := &InvocationEndDetails{ + RequestID: "test-request-id", + Runtime: "java11", + ColdStart: false, + ProactiveInit: false, + EndTime: endTime, + IsError: true, + IsTimeout: true, + ResponseRawPayload: nil, + } + testProcessor.OnInvokeEnd(timeoutCtx) + + spans := tracePayload.TracerPayload.Chunks[0].Spans + assert.Equal(t, 1, len(spans)) + // No trace context passed + assert.NotZero(t, testProcessor.GetExecutionInfo().TraceID) + assert.Equal(t, uint64(0), testProcessor.GetExecutionInfo().SpanID) + assert.Equal(t, int32(-128), tracePayload.TracerPayload.Chunks[0].Priority) + // New trace ID and span ID has been created + assert.NotEqual(t, uint64(0), spans[0].TraceID) + assert.NotEqual(t, uint64(0), spans[0].SpanID) + assert.Equal(t, spans[0].TraceID, testProcessor.GetExecutionInfo().TraceID) + assert.Equal(t, spans[0].Error, int32(1)) + assert.Equal(t, spans[0].GetMeta()["request_id"], "test-request-id") + assert.Equal(t, spans[0].GetMeta()["language"], "java") +} + +func TestTimeoutExecutionSpanWithTraceContext(t *testing.T) { + t.Setenv(functionNameEnvVar, "my-function") + t.Setenv("DD_SERVICE", "mock-lambda-service") + + extraTags := &logs.Tags{ + Tags: []string{"functionname:test-function"}, + } + demux := createDemultiplexer(t) + defer demux.Stop(false) + mockDetectLambdaLibrary := func() bool { return false } + + var tracePayload *api.Payload + mockProcessTrace := func(payload *api.Payload) { + tracePayload = payload + } + + testProcessor := LifecycleProcessor{ + ExtraTags: extraTags, + ProcessTrace: mockProcessTrace, + DetectLambdaLibrary: mockDetectLambdaLibrary, + Demux: demux, + InferredSpansEnabled: true, + } + eventPayload := `a5a{"resource":"/users/create","path":"/users/create","httpMethod":"GET","headers":{"Accept":"*/*","Accept-Encoding":"gzip","x-datadog-parent-id":"1480558859903409531","x-datadog-sampling-priority":"1","x-datadog-trace-id":"5736943178450432258"}}0` + startTime := time.Now() + duration := 1 * time.Second + endTime := startTime.Add(duration) + startDetails := InvocationStartDetails{ + StartTime: startTime, + InvokeEventRawPayload: []byte(eventPayload), + InvokedFunctionARN: "arn:aws:lambda:us-east-1:123456789012:function:my-function", + } + testProcessor.OnInvokeStart(&startDetails) + timeoutCtx := &InvocationEndDetails{ + RequestID: "test-request-id", + Runtime: "java11", + ColdStart: false, + ProactiveInit: false, + EndTime: endTime, + IsError: true, + IsTimeout: true, + ResponseRawPayload: nil, + } + testProcessor.OnInvokeEnd(timeoutCtx) + + spans := tracePayload.TracerPayload.Chunks[0].Spans + assert.Equal(t, 1, len(spans)) + // Trace context received + assert.Equal(t, spans[0].GetTraceID(), testProcessor.GetExecutionInfo().TraceID) + assert.Equal(t, spans[0].GetParentID(), testProcessor.GetExecutionInfo().parentID) + assert.Equal(t, tracePayload.TracerPayload.Chunks[0].Priority, int32(testProcessor.GetExecutionInfo().SamplingPriority)) + assert.Equal(t, spans[0].Error, int32(1)) + assert.Equal(t, spans[0].GetMeta()["request_id"], "test-request-id") + assert.Equal(t, spans[0].GetMeta()["language"], "java") +} + func TestTriggerTypesLifecycleEventForAPIGatewayRest(t *testing.T) { startDetails := &InvocationStartDetails{ InvokeEventRawPayload: getEventFromFile("api-gateway.json"), diff --git a/pkg/serverless/invocationlifecycle/trace.go b/pkg/serverless/invocationlifecycle/trace.go index 6ed2344b1014f..cfd545ed144f4 100644 --- a/pkg/serverless/invocationlifecycle/trace.go +++ b/pkg/serverless/invocationlifecycle/trace.go @@ -18,6 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/serverless/random" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" "github.com/DataDog/datadog-agent/pkg/trace/api" "github.com/DataDog/datadog-agent/pkg/trace/info" @@ -76,18 +77,29 @@ func (lp *LifecycleProcessor) startExecutionSpan(event interface{}, rawPayload [ // It should be called at the end of the invocation. func (lp *LifecycleProcessor) endExecutionSpan(endDetails *InvocationEndDetails) *pb.Span { executionContext := lp.GetExecutionInfo() - duration := endDetails.EndTime.UnixNano() - executionContext.startTime.UnixNano() + start := executionContext.startTime.UnixNano() + + traceID := executionContext.TraceID + spanID := executionContext.SpanID + // If we fail to receive the trace and span IDs from the tracer during a timeout we create it ourselves + if endDetails.IsTimeout && traceID == 0 { + traceID = random.Random.Uint64() + lp.requestHandler.executionInfo.TraceID = traceID + } + if endDetails.IsTimeout && spanID == 0 { + spanID = random.Random.Uint64() + } executionSpan := &pb.Span{ Service: "aws.lambda", // will be replaced by the span processor Name: "aws.lambda", Resource: os.Getenv(functionNameEnvVar), Type: "serverless", - TraceID: executionContext.TraceID, - SpanID: executionContext.SpanID, + TraceID: traceID, + SpanID: spanID, ParentID: executionContext.parentID, - Start: executionContext.startTime.UnixNano(), - Duration: duration, + Start: start, + Duration: endDetails.EndTime.UnixNano() - start, Meta: lp.requestHandler.triggerTags, Metrics: lp.requestHandler.triggerMetrics, } @@ -110,17 +122,19 @@ func (lp *LifecycleProcessor) endExecutionSpan(endDetails *InvocationEndDetails) } else { capturePayloadAsTags(requestPayloadJSON, executionSpan, "function.request", 0, capturePayloadMaxDepth) } - responsePayloadJSON := make(map[string]interface{}) - if err := json.Unmarshal(endDetails.ResponseRawPayload, &responsePayloadJSON); err != nil { - log.Debugf("[lifecycle] Failed to parse response payload: %v", err) - executionSpan.Meta["function.response"] = string(endDetails.ResponseRawPayload) - } else { - capturePayloadAsTags(responsePayloadJSON, executionSpan, "function.response", 0, capturePayloadMaxDepth) + if endDetails.ResponseRawPayload != nil { + responsePayloadJSON := make(map[string]interface{}) + if err := json.Unmarshal(endDetails.ResponseRawPayload, &responsePayloadJSON); err != nil { + log.Debugf("[lifecycle] Failed to parse response payload: %v", err) + executionSpan.Meta["function.response"] = string(endDetails.ResponseRawPayload) + } else { + capturePayloadAsTags(responsePayloadJSON, executionSpan, "function.response", 0, capturePayloadMaxDepth) + } } } - if endDetails.IsError { executionSpan.Error = 1 + if len(endDetails.ErrorMsg) > 0 { executionSpan.Meta["error.msg"] = endDetails.ErrorMsg } @@ -130,6 +144,11 @@ func (lp *LifecycleProcessor) endExecutionSpan(endDetails *InvocationEndDetails) if len(endDetails.ErrorStack) > 0 { executionSpan.Meta["error.stack"] = endDetails.ErrorStack } + + if endDetails.IsTimeout { + executionSpan.Meta["error.type"] = "Impending Timeout" + executionSpan.Meta["error.msg"] = "Datadog detected an Impending Timeout" + } } return executionSpan diff --git a/pkg/serverless/invocationlifecycle/trace_test.go b/pkg/serverless/invocationlifecycle/trace_test.go index 0b925f9a25be6..6b45d32755165 100644 --- a/pkg/serverless/invocationlifecycle/trace_test.go +++ b/pkg/serverless/invocationlifecycle/trace_test.go @@ -649,6 +649,54 @@ func TestEndExecutionSpanWithError(t *testing.T) { assert.Equal(t, executionSpan.Error, int32(1)) } +func TestEndExecutionSpanWithTimeout(t *testing.T) { + t.Setenv(functionNameEnvVar, "TestFunction") + currentExecutionInfo := &ExecutionStartInfo{} + lp := &LifecycleProcessor{ + requestHandler: &RequestHandler{ + executionInfo: currentExecutionInfo, + triggerTags: make(map[string]string), + }, + } + + startTime := time.Now() + startDetails := &InvocationStartDetails{ + StartTime: startTime, + InvokeEventHeaders: http.Header{}, + } + lp.startExecutionSpan(nil, []byte("[]"), startDetails) + + assert.Zero(t, currentExecutionInfo.TraceID) + assert.Zero(t, currentExecutionInfo.SpanID) + + duration := 1 * time.Second + endTime := startTime.Add(duration) + + endDetails := &InvocationEndDetails{ + EndTime: endTime, + IsError: true, + IsTimeout: true, + RequestID: "test-request-id", + ResponseRawPayload: nil, + ColdStart: true, + ProactiveInit: false, + Runtime: "dotnet6", + } + executionSpan := lp.endExecutionSpan(endDetails) + assert.Equal(t, "aws.lambda", executionSpan.Name) + assert.Equal(t, "aws.lambda", executionSpan.Service) + assert.Equal(t, "TestFunction", executionSpan.Resource) + assert.Equal(t, "serverless", executionSpan.Type) + assert.Equal(t, "dotnet", executionSpan.Meta["language"]) + assert.Equal(t, lp.requestHandler.executionInfo.TraceID, executionSpan.TraceID) + assert.NotZero(t, executionSpan.TraceID) + assert.NotZero(t, executionSpan.SpanID) + assert.Equal(t, startTime.UnixNano(), executionSpan.Start) + assert.Equal(t, duration.Nanoseconds(), executionSpan.Duration) + assert.Equal(t, "Impending Timeout", executionSpan.Meta["error.type"]) + assert.Equal(t, "Datadog detected an Impending Timeout", executionSpan.Meta["error.msg"]) +} + func TestParseLambdaPayload(t *testing.T) { assert.Equal(t, []byte(""), ParseLambdaPayload([]byte(""))) assert.Equal(t, []byte("{}"), ParseLambdaPayload([]byte("{}"))) diff --git a/pkg/serverless/serverless.go b/pkg/serverless/serverless.go index 091494b15afce..24c04e22a08ad 100644 --- a/pkg/serverless/serverless.go +++ b/pkg/serverless/serverless.go @@ -18,6 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/serverless/daemon" "github.com/DataDog/datadog-agent/pkg/serverless/flush" + "github.com/DataDog/datadog-agent/pkg/serverless/invocationlifecycle" "github.com/DataDog/datadog-agent/pkg/serverless/metrics" "github.com/DataDog/datadog-agent/pkg/serverless/registration" "github.com/DataDog/datadog-agent/pkg/serverless/tags" @@ -139,6 +140,10 @@ func WaitForNextInvocation(stopCh chan struct{}, daemon *daemon.Daemon, id regis metricTags = tags.AddInitTypeTag(metricTags) metrics.SendTimeoutEnhancedMetric(metricTags, daemon.MetricAgent.Demux) metrics.SendErrorsEnhancedMetric(metricTags, time.Now(), daemon.MetricAgent.Demux) + + if daemon.IsExecutionSpanIncomplete() { + finishTimeoutExecutionSpan(daemon, coldStartTags.IsColdStart, coldStartTags.IsProactiveInit) + } } err := daemon.ExecutionContext.SaveCurrentExecutionContext() if err != nil { @@ -214,3 +219,20 @@ func removeQualifierFromArn(functionArn string) string { } return functionArn } + +func finishTimeoutExecutionSpan(daemon *daemon.Daemon, isColdStart bool, isProactiveInit bool) { + ecs := daemon.ExecutionContext.GetCurrentState() + timeoutDetails := &invocationlifecycle.InvocationEndDetails{ + RequestID: ecs.LastRequestID, + Runtime: ecs.Runtime, + ColdStart: isColdStart, + ProactiveInit: isProactiveInit, + EndTime: time.Now(), + IsError: true, + IsTimeout: true, + ResponseRawPayload: nil, + } + log.Debug("Could not complete the execution span due to a timeout. Attempting to finish the span without details from the tracer.") + daemon.InvocationProcessor.OnInvokeEnd(timeoutDetails) + daemon.SetExecutionSpanIncomplete(false) +} diff --git a/pkg/serverless/serverless_test.go b/pkg/serverless/serverless_test.go index ccd144ea939bd..14bd868ab6548 100644 --- a/pkg/serverless/serverless_test.go +++ b/pkg/serverless/serverless_test.go @@ -15,6 +15,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/pkg/serverless/daemon" + "github.com/DataDog/datadog-agent/pkg/serverless/invocationlifecycle" + "github.com/DataDog/datadog-agent/pkg/serverless/trace" + "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) func TestMain(m *testing.M) { @@ -69,3 +72,40 @@ func TestRemoveQualifierFromArnWithoutAlias(t *testing.T) { functionArn := removeQualifierFromArn(invokedFunctionArn) assert.Equal(t, functionArn, invokedFunctionArn) } + +type mockLifecycleProcessor struct { + isError bool + isTimeout bool + isColdStart bool + isProactiveInit bool +} + +func (m *mockLifecycleProcessor) GetExecutionInfo() *invocationlifecycle.ExecutionStartInfo { + return &invocationlifecycle.ExecutionStartInfo{} +} +func (m *mockLifecycleProcessor) OnInvokeStart(*invocationlifecycle.InvocationStartDetails) {} +func (m *mockLifecycleProcessor) OnInvokeEnd(endDetails *invocationlifecycle.InvocationEndDetails) { + m.isError = endDetails.IsError + m.isTimeout = endDetails.IsTimeout + m.isColdStart = endDetails.ColdStart + m.isProactiveInit = endDetails.ProactiveInit +} + +func TestFinishTimeoutExecutionSpan(t *testing.T) { + port := testutil.FreeTCPPort(t) + d := daemon.StartDaemon(fmt.Sprintf("127.0.0.1:%d", port)) + d.TraceAgent = &trace.ServerlessTraceAgent{} + mock := &mockLifecycleProcessor{} + d.InvocationProcessor = mock + defer d.Stop() + + assert.False(t, d.IsExecutionSpanIncomplete()) + d.SetExecutionSpanIncomplete(true) + assert.True(t, d.IsExecutionSpanIncomplete()) + finishTimeoutExecutionSpan(d, true, true) + assert.False(t, d.IsExecutionSpanIncomplete()) + assert.True(t, mock.isError) + assert.True(t, mock.isTimeout) + assert.True(t, mock.isColdStart) + assert.True(t, mock.isProactiveInit) +} From 42daab7db15271b720a9fa2ee2762f2a2bfe58c3 Mon Sep 17 00:00:00 2001 From: Simon Tsui <88simont@gmail.com> Date: Sat, 13 Apr 2024 16:25:53 -0400 Subject: [PATCH 32/99] Simont1/root tag e2e test (#24618) Co-authored-by: knusbaum --- test/new-e2e/tests/apm/docker_test.go | 12 ++++++++++++ test/new-e2e/tests/apm/tests.go | 19 +++++++++++++++++++ test/new-e2e/tests/apm/vm_test.go | 23 +++++++++++++++++++++++ 3 files changed, 54 insertions(+) diff --git a/test/new-e2e/tests/apm/docker_test.go b/test/new-e2e/tests/apm/docker_test.go index cf78c1842074a..fb1761f3b0077 100644 --- a/test/new-e2e/tests/apm/docker_test.go +++ b/test/new-e2e/tests/apm/docker_test.go @@ -113,6 +113,18 @@ func (s *DockerFakeintakeSuite) TestAutoVersionStats() { }, 2*time.Minute, 10*time.Second, "Failed finding version tags") } +func (s *DockerFakeintakeSuite) TestIsTraceRootTag() { + err := s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + s.Require().NoError(err) + + service := fmt.Sprintf("tracegen-auto-is-trace-root-tag-%s", s.transport) + defer waitTracegenShutdown(&s.Suite, s.Env().FakeIntake) + defer runTracegenDocker(s.Env().RemoteHost, service, tracegenCfg{transport: s.transport})() + s.EventuallyWithTf(func(c *assert.CollectT) { + testIsTraceRootTag(s.T(), c, s.Env().FakeIntake) + }, 2*time.Minute, 10*time.Second, "Failed finding is_trace_root tag") +} + func (s *DockerFakeintakeSuite) TestStatsForService() { err := s.Env().FakeIntake.Client().FlushServerAndResetAggregators() s.Require().NoError(err) diff --git a/test/new-e2e/tests/apm/tests.go b/test/new-e2e/tests/apm/tests.go index a3214c469ab07..5a45b6c0e5318 100644 --- a/test/new-e2e/tests/apm/tests.go +++ b/test/new-e2e/tests/apm/tests.go @@ -110,6 +110,25 @@ func testAutoVersionStats(t *testing.T, c *assert.CollectT, intake *components.F } } +func testIsTraceRootTag(t *testing.T, c *assert.CollectT, intake *components.FakeIntake) { + t.Helper() + stats, err := intake.Client().GetAPMStats() + assert.NoError(c, err) + assert.NotEmpty(c, stats) + t.Log("Got apm stats:", spew.Sdump(stats)) + for _, p := range stats { + for _, s := range p.StatsPayload.Stats { + t.Log("Client Payload:", spew.Sdump(s)) + for _, b := range s.Stats { + for _, cs := range b.Stats { + t.Logf("Got IsTraceRoot: %v", cs.GetIsTraceRoot()) + assert.Equal(t, trace.TraceRootFlag_TRUE, cs.GetIsTraceRoot()) + } + } + } + } +} + func getContainerTags(t *testing.T, tp *trace.TracerPayload) (map[string]string, bool) { ctags, ok := tp.Tags["_dd.tags.container"] if !ok { diff --git a/test/new-e2e/tests/apm/vm_test.go b/test/new-e2e/tests/apm/vm_test.go index 145223e668381..1870590cb0b79 100644 --- a/test/new-e2e/tests/apm/vm_test.go +++ b/test/new-e2e/tests/apm/vm_test.go @@ -220,6 +220,29 @@ func (s *VMFakeintakeSuite) TestAutoVersionStats() { }, 3*time.Minute, 10*time.Second, "Failed finding stats") } +func (s *VMFakeintakeSuite) TestIsTraceRootTag() { + err := s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + s.Require().NoError(err) + + service := fmt.Sprintf("tracegen-stats-%s", s.transport) + + // Wait for agent to be live + s.T().Log("Waiting for Trace Agent to be live.") + s.Require().NoError(waitRemotePort(s, 8126)) + + // Run Trace Generator + s.T().Log("Starting Trace Generator.") + defer waitTracegenShutdown(&s.Suite, s.Env().FakeIntake) + shutdown := runTracegenDocker(s.Env().RemoteHost, service, tracegenCfg{transport: s.transport}) + defer shutdown() + + s.EventuallyWithTf(func(c *assert.CollectT) { + s.logStatus() + testIsTraceRootTag(s.T(), c, s.Env().FakeIntake) + s.logJournal() + }, 3*time.Minute, 10*time.Second, "Failed finding stats") +} + func (s *VMFakeintakeSuite) TestBasicTrace() { err := s.Env().FakeIntake.Client().FlushServerAndResetAggregators() s.Require().NoError(err) From 2be60ca5581fddb9f8151663a96d34bebe9bf0e5 Mon Sep 17 00:00:00 2001 From: paullegranddc <82819397+paullegranddc@users.noreply.github.com> Date: Mon, 15 Apr 2024 02:21:48 +0200 Subject: [PATCH 33/99] [installer] Update the installer (#23902) * * Add experiment unit file * Setup and start the updater experiment * * Point unit files to the bootstraped version of the updater * Use the helper to setcap on the newly installed helper, with some validation * Activate datadog-updater unit file on bootstrap * Add file header * WIP * Cleanup installer systemd units * Fix the helper script * Fix installer helper setcap command * Rename updater to installer * Remove untrue commit * Add the installer bootstrap in rpm * Remove systemd startup from postinst * Remove /usr/lib/systemd/system package file * Fix linter + windows * Fix fucntion comment * Bootsrap datadog oci latest * add windows shims * Fix updater helper permissions on install * fix windows lint * * Fix updater helper permission check * Fix systemdpath used by updater helper * Add more error messages * check file mode correctly * fix error * Enable installer unit before starting it * fix rpmSystemdPath * Invert systemd path search * Print ls output in e2e tests * skip TestInstallerUnitLoaded on rpm --- cmd/updater/subcommands/bootstrap/command.go | 4 +- cmd/updater/subcommands/run/command.go | 22 ++++- omnibus/config/projects/installer.rb | 5 -- omnibus/config/software/installer.rb | 16 ++-- .../datadog-installer-exp.service.erb | 20 +++++ .../installer/datadog-installer.service.erb | 5 +- omnibus/package-scripts/updater-deb/postinst | 5 +- omnibus/package-scripts/updater-rpm/posttrans | 4 +- pkg/updater/install.go | 11 ++- pkg/updater/repository/repository.go | 12 ++- pkg/updater/service/cmd_executor.go | 5 ++ pkg/updater/service/cmd_executor_windows.go | 5 ++ pkg/updater/service/datadog_installer.go | 68 ++++++++++++++ .../service/datadog_installer_windows.go | 27 ++++++ pkg/updater/service/helper/main.go | 89 +++++++++++++++++-- pkg/updater/updater.go | 8 +- test/new-e2e/tests/updater/linux_test.go | 3 + 17 files changed, 267 insertions(+), 42 deletions(-) create mode 100644 omnibus/config/templates/installer/datadog-installer-exp.service.erb create mode 100644 pkg/updater/service/datadog_installer.go create mode 100644 pkg/updater/service/datadog_installer_windows.go diff --git a/cmd/updater/subcommands/bootstrap/command.go b/cmd/updater/subcommands/bootstrap/command.go index 3dcd1dfdb1f81..3a8bc79b1115e 100644 --- a/cmd/updater/subcommands/bootstrap/command.go +++ b/cmd/updater/subcommands/bootstrap/command.go @@ -47,7 +47,7 @@ func Commands(global *command.GlobalParams) []*cobra.Command { RunE: func(_ *cobra.Command, _ []string) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - return boostrapFxWrapper(ctx, &cliParams{ + return bootstrapFxWrapper(ctx, &cliParams{ GlobalParams: *global, url: url, pkg: pkg, @@ -62,7 +62,7 @@ func Commands(global *command.GlobalParams) []*cobra.Command { return []*cobra.Command{bootstrapCmd} } -func boostrapFxWrapper(ctx context.Context, params *cliParams) error { +func bootstrapFxWrapper(ctx context.Context, params *cliParams) error { return fxutil.OneShot(bootstrap, fx.Provide(func() context.Context { return ctx }), fx.Supply(params), diff --git a/cmd/updater/subcommands/run/command.go b/cmd/updater/subcommands/run/command.go index 3417fe4ac59e9..f4f3cdb5758aa 100644 --- a/cmd/updater/subcommands/run/command.go +++ b/cmd/updater/subcommands/run/command.go @@ -8,6 +8,9 @@ package run import ( "context" + "os" + "os/signal" + "syscall" "github.com/spf13/cobra" "go.uber.org/fx" @@ -25,6 +28,7 @@ import ( "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter/rctelemetryreporterimpl" "github.com/DataDog/datadog-agent/pkg/config/remote/service" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/comp/updater/localapi" "github.com/DataDog/datadog-agent/comp/updater/localapi/localapiimpl" @@ -77,6 +81,20 @@ func runFxWrapper(params *cliParams) error { ) } -func run(_ pid.Component, _ localapi.Component) error { - select {} +func run(shutdowner fx.Shutdowner, _ pid.Component, _ localapi.Component) error { + handleSignals(shutdowner) + return nil +} + +func handleSignals(shutdowner fx.Shutdowner) { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGPIPE) + for signo := range sigChan { + switch signo { + case syscall.SIGINT, syscall.SIGTERM: + log.Infof("Received signal %d (%v)", signo, signo) + _ = shutdowner.Shutdown() + return + } + } } diff --git a/omnibus/config/projects/installer.rb b/omnibus/config/projects/installer.rb index 50536c9ec7885..29cb88c586a90 100644 --- a/omnibus/config/projects/installer.rb +++ b/omnibus/config/projects/installer.rb @@ -138,11 +138,6 @@ # ------------------------------------ if linux_target? - systemd_directory = "/usr/lib/systemd/system" - if debian_target? - systemd_directory = "/lib/systemd/system" - end - extra_package_file "#{systemd_directory}/datadog-installer.service" extra_package_file '/etc/datadog-agent/' extra_package_file '/var/log/datadog/' extra_package_file '/var/run/datadog-packages/' diff --git a/omnibus/config/software/installer.rb b/omnibus/config/software/installer.rb index 3cb4fedd4ed56..e4042183f3581 100644 --- a/omnibus/config/software/installer.rb +++ b/omnibus/config/software/installer.rb @@ -46,22 +46,19 @@ # Packages mkdir "/opt/datadog-packages" - copy 'bin/installer', "#{install_dir}/bin/" - # Add installer units - systemdPath = "/lib/systemd/system/" - if not debian_target? - mkdir "/usr/lib/systemd/system/" - systemdPath = "/usr/lib/systemd/system/" - end + systemdPath = "#{install_dir}/systemd/" erb source: "datadog-installer.service.erb", dest: systemdPath + "datadog-installer.service", mode: 0644, - vars: { install_dir: install_dir, etc_dir: etc_dir} + vars: { installer_dir: "/opt/datadog-packages/datadog-installer/stable", etc_dir: etc_dir} + erb source: "datadog-installer-exp.service.erb", + dest: systemdPath + "datadog-installer-exp.service", + mode: 0644, + vars: { installer_dir: "/opt/datadog-packages/datadog-installer/experiment", etc_dir: etc_dir} - systemdPath = "#{install_dir}/systemd/" # Add stable agent units templateToFile = { "datadog-agent.service.erb" => "datadog-agent.service", @@ -69,7 +66,6 @@ "datadog-agent-process.service.erb" => "datadog-agent-process.service", "datadog-agent-security.service.erb" => "datadog-agent-security.service", "datadog-agent-sysprobe.service.erb" => "datadog-agent-sysprobe.service", - "datadog-installer.service.erb" => "datadog-installer.service", } templateToFile.each do |template, file| agent_dir = "/opt/datadog-packages/datadog-agent/stable" diff --git a/omnibus/config/templates/installer/datadog-installer-exp.service.erb b/omnibus/config/templates/installer/datadog-installer-exp.service.erb new file mode 100644 index 0000000000000..10f35f1146ee9 --- /dev/null +++ b/omnibus/config/templates/installer/datadog-installer-exp.service.erb @@ -0,0 +1,20 @@ +[Unit] +Description=Datadog Installer Experiment +After=network.target +OnFailure=datadog-installer.service +Conflicts=datadog-installer.service +JobTimeoutSec=3000 #50 minutes + +[Service] +Type=oneshot +PIDFile=<%= installer_dir %>/run/installer.pid +User=dd-installer +EnvironmentFile=-<%= etc_dir %>/environment +ExecStart=<%= installer_dir %>/bin/installer/installer run -p <%= installer_dir %>/run/installer.pid +ExecStart=<%= installer_dir %>/bin/installer/installer run -p <%= installer_dir %>/run/installer.pid +ExecStart=<%= installer_dir %>/bin/installer/installer run -p <%= installer_dir %>/run/installer.pid +ExecStart=/bin/false +ExecStop=/bin/false + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/config/templates/installer/datadog-installer.service.erb b/omnibus/config/templates/installer/datadog-installer.service.erb index de5074f2ad994..fdf06880736ee 100644 --- a/omnibus/config/templates/installer/datadog-installer.service.erb +++ b/omnibus/config/templates/installer/datadog-installer.service.erb @@ -1,14 +1,15 @@ [Unit] Description=Datadog Installer After=network.target +Conflicts=datadog-installer-exp.service [Service] Type=simple -PIDFile=<%= install_dir %>/run/installer.pid +PIDFile=<%= installer_dir %>/run/installer.pid User=dd-installer Restart=on-failure EnvironmentFile=-<%= etc_dir %>/environment -ExecStart=<%= install_dir %>/bin/installer/installer run -p <%= install_dir %>/run/installer.pid +ExecStart=<%= installer_dir %>/bin/installer/installer run -p <%= installer_dir %>/run/installer.pid # Since systemd 229, should be in [Unit] but in order to support systemd <229, # it is also supported to have it here. StartLimitInterval=10 diff --git a/omnibus/package-scripts/updater-deb/postinst b/omnibus/package-scripts/updater-deb/postinst index e661773649a1b..b0235efd31fd1 100644 --- a/omnibus/package-scripts/updater-deb/postinst +++ b/omnibus/package-scripts/updater-deb/postinst @@ -6,6 +6,7 @@ readonly PACKAGES_DIR=/opt/datadog-packages readonly INSTALL_DIR=/opt/datadog-installer +readonly BOOTSTRAP_INSTALLER=${INSTALL_DIR}/bin/installer/installer readonly HELPER=${INSTALL_DIR}/bin/installer/helper readonly LOG_DIR=/var/log/datadog readonly PACKAGES_LOCK_DIR=/var/run/datadog-packages @@ -81,8 +82,6 @@ fi chmod 750 ${HELPER} setcap cap_setuid+ep ${HELPER} -# start updater -SYSTEMCTL_SKIP_SYSV=true systemctl enable datadog-installer || true -SYSTEMCTL_SKIP_SYSV=true systemctl start datadog-installer || true +$BOOTSTRAP_INSTALLER bootstrap --url "oci://docker.io/datadog/installer-package-dev:latest" exit 0 diff --git a/omnibus/package-scripts/updater-rpm/posttrans b/omnibus/package-scripts/updater-rpm/posttrans index 273c589fb8072..1b0ab887e3838 100644 --- a/omnibus/package-scripts/updater-rpm/posttrans +++ b/omnibus/package-scripts/updater-rpm/posttrans @@ -65,9 +65,7 @@ fi chmod 750 ${HELPER} setcap cap_setuid+ep ${HELPER} -# start updater -SYSTEMCTL_SKIP_SYSV=true systemctl enable datadog-installer || true -SYSTEMCTL_SKIP_SYSV=true systemctl start datadog-installer || true +$BOOTSTRAP_INSTALLER bootstrap --url "oci://docker.io/datadog/installer-package-dev:latest" exit 0 diff --git a/pkg/updater/install.go b/pkg/updater/install.go index 092013a087f67..fe2d04aff91c1 100644 --- a/pkg/updater/install.go +++ b/pkg/updater/install.go @@ -29,8 +29,9 @@ const ( datadogPackageMaxSize = 3 << 30 // 3GiB defaultConfigsDir = "/etc" - packageDatadogAgent = "datadog-agent" - packageAPMInjector = "datadog-apm-inject" + packageDatadogAgent = "datadog-agent" + packageAPMInjector = "datadog-apm-inject" + packageDatadogInstaller = "datadog-installer" ) type installer struct { @@ -69,6 +70,8 @@ func (i *installer) installStable(pkg string, version string, image oci.Image) e return service.SetupAgentUnits() case packageAPMInjector: return service.SetupAPMInjector() + case packageDatadogInstaller: + return service.SetupInstallerUnit() default: return nil } @@ -117,6 +120,8 @@ func (i *installer) startExperiment(pkg string) error { switch pkg { case packageDatadogAgent: return service.StartAgentExperiment() + case packageDatadogInstaller: + return service.StartInstallerExperiment() default: return nil } @@ -128,6 +133,8 @@ func (i *installer) stopExperiment(pkg string) error { switch pkg { case packageDatadogAgent: return service.StopAgentExperiment() + case packageAPMInjector: + return service.StopInstallerExperiment() default: return nil } diff --git a/pkg/updater/repository/repository.go b/pkg/updater/repository/repository.go index abbf54dcf09bd..9c1b6d26eb209 100644 --- a/pkg/updater/repository/repository.go +++ b/pkg/updater/repository/repository.go @@ -13,7 +13,6 @@ import ( "os" "path/filepath" "strconv" - "strings" "github.com/DataDog/gopsutil/process" @@ -366,10 +365,19 @@ func movePackageFromSource(packageName string, rootPath string, lockedPackages m if err := os.Chmod(targetPath, 0755); err != nil { return "", fmt.Errorf("could not set permissions on package: %w", err) } - if strings.HasSuffix(rootPath, "datadog-agent") { + switch filepath.Base(rootPath) { + case "datadog-agent": if err := service.ChownDDAgent(targetPath); err != nil { return "", err } + case "datadog-installer": + helperPath := filepath.Join(rootPath, packageName, "bin/installer/helper") + if err := os.Chmod(helperPath, 0750); err != nil { + return "", fmt.Errorf("could not set permissions on installer-helper: %w", err) + } + if err := service.SetCapHelper(helperPath); err != nil { + return "", fmt.Errorf("could not set capabilities on installer-helper: %w", err) + } } return targetPath, nil diff --git a/pkg/updater/service/cmd_executor.go b/pkg/updater/service/cmd_executor.go index 072720d541437..a1abec9e4919e 100644 --- a/pkg/updater/service/cmd_executor.go +++ b/pkg/updater/service/cmd_executor.go @@ -44,6 +44,11 @@ func rmAgentSymlink() error { return executeCommand(`{"command":"rm-agent-symlink"}`) } +// SetCapHelper sets cap setuid on the newly installed helper +func SetCapHelper(path string) error { + return executeCommand(`{"command":"setcap cap_setuid+ep", "path":"` + path + `"}`) +} + func executeCommand(command string) error { cancelctx, cancelfunc := context.WithTimeout(context.Background(), execTimeout) defer cancelfunc() diff --git a/pkg/updater/service/cmd_executor_windows.go b/pkg/updater/service/cmd_executor_windows.go index 55f88aa7229fa..37b0b709ea038 100644 --- a/pkg/updater/service/cmd_executor_windows.go +++ b/pkg/updater/service/cmd_executor_windows.go @@ -22,3 +22,8 @@ func RemoveAll(path string) error { func BuildHelperForTests(_, _ string, _ bool) error { return nil } + +// SetCapHelper sets cap setuid on the newly installed helper +func SetCapHelper(_ string) error { + return nil +} diff --git a/pkg/updater/service/datadog_installer.go b/pkg/updater/service/datadog_installer.go new file mode 100644 index 0000000000000..a1c01a85a6024 --- /dev/null +++ b/pkg/updater/service/datadog_installer.go @@ -0,0 +1,68 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +package service + +import "github.com/DataDog/datadog-agent/pkg/util/log" + +const ( + installerUnit = "datadog-installer.service" + installerUnitExp = "datadog-installer-exp.service" +) + +var installerUnits = []string{installerUnit, installerUnitExp} + +// SetupInstallerUnit installs and starts the installer systemd units +func SetupInstallerUnit() (err error) { + defer func() { + if err != nil { + log.Errorf("Failed to setup installer units: %s, reverting", err) + } + }() + + for _, unit := range installerUnits { + if err = loadUnit(unit); err != nil { + return err + } + } + + if err = systemdReload(); err != nil { + return err + } + + if err = enableUnit(installerUnit); err != nil { + return err + } + + if err = startUnit(installerUnit); err != nil { + return err + } + return nil +} + +// RemoveInstallerUnit removes the installer systemd units +func RemoveInstallerUnit() { + var err error + for _, unit := range installerUnits { + if err = disableUnit(unit); err != nil { + log.Warnf("Failed to disable %s: %s", unit, err) + } + if err = removeUnit(unit); err != nil { + log.Warnf("Failed to stop %s: %s", unit, err) + } + } +} + +// StartInstallerExperiment installs the experimental systemd units for the installer +func StartInstallerExperiment() error { + return startUnit(installerUnitExp) +} + +// StopInstallerExperiment installs the stable systemd units for the installer +func StopInstallerExperiment() error { + return startUnit(installerUnit) +} diff --git a/pkg/updater/service/datadog_installer_windows.go b/pkg/updater/service/datadog_installer_windows.go new file mode 100644 index 0000000000000..e93ef99b0e1a2 --- /dev/null +++ b/pkg/updater/service/datadog_installer_windows.go @@ -0,0 +1,27 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build windows + +package service + +// SetupInstallerUnit noop +func SetupInstallerUnit() (err error) { + return nil +} + +// RemoveInstallerUnit noop +func RemoveInstallerUnit() { +} + +// StartInstallerExperiment noop +func StartInstallerExperiment() error { + return nil +} + +// StopInstallerExperiment noop +func StopInstallerExperiment() error { + return nil +} diff --git a/pkg/updater/service/helper/main.go b/pkg/updater/service/helper/main.go index a20a1de7a99c1..3f61e1bdbf685 100644 --- a/pkg/updater/service/helper/main.go +++ b/pkg/updater/service/helper/main.go @@ -23,14 +23,29 @@ import ( ) var ( - installPath string - systemdPath = "/lib/systemd/system" // todo load it at build time from omnibus - pkgDir = "/opt/datadog-packages" - agentDir = "/etc/datadog-agent" - dockerDir = "/etc/docker" - testSkipUID = "" + installPath string + debSystemdPath = "/lib/systemd/system" // todo load it at build time from omnibus + rpmSystemdPath = "/usr/lib/systemd/system" + pkgDir = "/opt/datadog-packages/" + agentDir = "/etc/datadog-agent" + dockerDir = "/etc/docker" + testSkipUID = "" + installerUser = "dd-installer" ) +// findSystemdPath todo: this is a hacky way to detect on which os family we are currently +// running and finding the correct systemd path. +// We should probably provide the correct path when we build the package +func findSystemdPath() (systemdPath string, err error) { + if _, err = os.Stat(rpmSystemdPath); err == nil { + return rpmSystemdPath, nil + } + if _, err = os.Stat(debSystemdPath); err == nil { + return debSystemdPath, nil + } + return "", fmt.Errorf("systemd unit path error: %w", err) +} + func enforceUID() bool { return testSkipUID != "true" } @@ -55,6 +70,52 @@ func isValidUnitString(s string) bool { return true } +func splitPathPrefix(path string) (first string, rest string) { + for i := 0; i < len(path); i++ { + if os.IsPathSeparator(path[i]) { + return path[:i], path[i+1:] + } + } + return first, rest +} + +func checkHelperPath(path string) (err error) { + target, found := strings.CutPrefix(path, pkgDir) + if !found { + return fmt.Errorf("installer-helper should be in packages directory") + } + helperPackage, rest := splitPathPrefix(target) + if helperPackage != "datadog-installer" { + return fmt.Errorf("installer-helper should be in datadog-installer package") + } + version, helperPath := splitPathPrefix(rest) + if version == "stable" || version == "experiment" { + return fmt.Errorf("installer-helper should be a concrete version") + } + if helperPath != "bin/installer/helper" { + return fmt.Errorf("installer-helper not a the expected path") + } + info, err := os.Stat(path) + if err != nil { + return err + } + stat, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("couldn't get update helper stats: %w", err) + } + ddUpdaterUser, err := user.Lookup(installerUser) + if err != nil { + return fmt.Errorf("failed to lookup dd-installer user: %w", err) + } + if ddUpdaterUser.Uid != strconv.Itoa(int(stat.Uid)) { + return fmt.Errorf("installer-helper should be owned by dd-installer") + } + if info.Mode() != 0750 { + return fmt.Errorf("installer-helper should only be executable by the user. Expected permssions %O, got permissions %O", 0750, info.Mode()) + } + return nil +} + func buildCommand(inputCommand privilegeCommand) (*exec.Cmd, error) { if inputCommand.Unit != "" { return buildUnitCommand(inputCommand) @@ -98,8 +159,16 @@ func buildUnitCommand(inputCommand privilegeCommand) (*exec.Cmd, error) { // --no-block is used to avoid waiting on oneshot executions return exec.Command("systemctl", command, unit, "--no-block"), nil case "load-unit": + systemdPath, err := findSystemdPath() + if err != nil { + return nil, err + } return exec.Command("cp", filepath.Join(installPath, "systemd", unit), filepath.Join(systemdPath, unit)), nil case "remove-unit": + systemdPath, err := findSystemdPath() + if err != nil { + return nil, err + } return exec.Command("rm", filepath.Join(systemdPath, unit)), nil default: return nil, fmt.Errorf("invalid command") @@ -121,6 +190,12 @@ func buildPathCommand(inputCommand privilegeCommand) (*exec.Cmd, error) { return exec.Command("chown", "-R", "dd-agent:dd-agent", path), nil case "rm": return exec.Command("rm", "-rf", path), nil + case "setcap cap_setuid+ep": + err := checkHelperPath(path) + if err != nil { + return nil, err + } + return exec.Command("setcap", "cap_setuid+ep", path), nil case "backup-file": return exec.Command("cp", "-f", path, path+".bak"), nil case "restore-file": @@ -150,7 +225,7 @@ func executeCommand() error { // only root or dd-installer can execute this command if currentUser != 0 && enforceUID() { - ddUpdaterUser, err := user.Lookup("dd-installer") + ddUpdaterUser, err := user.Lookup(installerUser) if err != nil { return fmt.Errorf("failed to lookup dd-installer user: %s", err) } diff --git a/pkg/updater/updater.go b/pkg/updater/updater.go index 766952a8ab57b..cd656fd25c9ca 100644 --- a/pkg/updater/updater.go +++ b/pkg/updater/updater.go @@ -225,7 +225,7 @@ func (u *updaterImpl) BootstrapDefault(ctx context.Context, pkg string) (err err if !ok { return fmt.Errorf("could not get default package '%s' for arch '%s' and platform '%s'", pkg, runtime.GOARCH, runtime.GOOS) } - return u.boostrapPackage(ctx, stablePackage.URL, stablePackage.Name, stablePackage.Version) + return u.bootstrapPackage(ctx, stablePackage.URL, stablePackage.Name, stablePackage.Version) } // BootstrapVersion installs the stable version of the package. @@ -241,7 +241,7 @@ func (u *updaterImpl) BootstrapVersion(ctx context.Context, pkg string, version if !ok { return fmt.Errorf("could not get package '%s' version '%s' for arch '%s' and platform '%s'", pkg, version, runtime.GOARCH, runtime.GOOS) } - return u.boostrapPackage(ctx, stablePackage.URL, stablePackage.Name, stablePackage.Version) + return u.bootstrapPackage(ctx, stablePackage.URL, stablePackage.Name, stablePackage.Version) } // BootstrapURL installs the stable version of the package. @@ -253,10 +253,10 @@ func (u *updaterImpl) BootstrapURL(ctx context.Context, url string) (err error) u.refreshState(ctx) defer u.refreshState(ctx) - return u.boostrapPackage(ctx, url, "", "") + return u.bootstrapPackage(ctx, url, "", "") } -func (u *updaterImpl) boostrapPackage(ctx context.Context, url string, expectedPackage string, expectedVersion string) error { +func (u *updaterImpl) bootstrapPackage(ctx context.Context, url string, expectedPackage string, expectedVersion string) error { // both tmp and repository paths are checked for available disk space in case they are on different partitions err := checkAvailableDiskSpace(fsDisk, defaultRepositoriesPath, os.TempDir()) if err != nil { diff --git a/test/new-e2e/tests/updater/linux_test.go b/test/new-e2e/tests/updater/linux_test.go index 59280eaf1f0f7..d905d015b80bc 100644 --- a/test/new-e2e/tests/updater/linux_test.go +++ b/test/new-e2e/tests/updater/linux_test.go @@ -100,6 +100,9 @@ func (v *vmUpdaterSuite) TestUpdaterDirs() { } func (v *vmUpdaterSuite) TestInstallerUnitLoaded() { + if v.packageManager == "rpm" { + v.T().Skip("FIXME(Paul): installer unit files disappear after bootstrap") + } require.Equal(v.T(), "enabled\n", v.Env().RemoteHost.MustExecute(`systemctl is-enabled datadog-installer.service`)) } From e3c017001b780b6272ccd49fd52fe88841a65d55 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Mon, 15 Apr 2024 09:52:47 +0200 Subject: [PATCH 34/99] [CWS] ensure directory provider lock is not held while propagating profile to manager (#24661) * [CWS] ensure directory provider lock is not held while propagating profile to manager * fix same deadlock in `loadProfile` --- .../security_profile/profile/profile_dir.go | 33 ++++++++++++++----- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/pkg/security/security_profile/profile/profile_dir.go b/pkg/security/security_profile/profile/profile_dir.go index 5299ac4f93abf..422c2a3ba9acb 100644 --- a/pkg/security/security_profile/profile/profile_dir.go +++ b/pkg/security/security_profile/profile/profile_dir.go @@ -12,6 +12,7 @@ import ( "context" "errors" "fmt" + "maps" "os" "path" "path/filepath" @@ -155,10 +156,20 @@ func (dp *DirectoryProvider) UpdateWorkloadSelectors(selectors []cgroupModel.Wor } func (dp *DirectoryProvider) onNewProfileDebouncerCallback() { + // we don't want to keep the lock for too long, especially not while calling the callback dp.Lock() - defer dp.Unlock() - for _, selector := range dp.selectors { - for profileSelector, profilePath := range dp.profileMapping { + selectors := make([]cgroupModel.WorkloadSelector, len(dp.selectors)) + copy(selectors, dp.selectors) + profileMapping := maps.Clone(dp.profileMapping) + propagateCb := dp.onNewProfileCallback + dp.Unlock() + + if propagateCb == nil { + return + } + + for _, selector := range selectors { + for profileSelector, profilePath := range profileMapping { if selector.Match(profileSelector) { // read and parse profile profile, err := LoadProtoFromFile(profilePath.path) @@ -168,7 +179,7 @@ func (dp *DirectoryProvider) onNewProfileDebouncerCallback() { } // propagate the new profile - dp.onNewProfileCallback(profileSelector, profile) + propagateCb(profileSelector, profile) } } } @@ -226,10 +237,14 @@ func (dp *DirectoryProvider) loadProfile(profilePath string) error { // lock selectors and profiles mapping dp.Lock() - defer dp.Unlock() + selectors := make([]cgroupModel.WorkloadSelector, len(dp.selectors)) + copy(selectors, dp.selectors) + profileMapping := maps.Clone(dp.profileMapping) + propagateCb := dp.onNewProfileCallback + dp.Unlock() // prioritize a persited profile over activity dumps - if existingProfile, ok := dp.profileMapping[profileManagerSelector]; ok { + if existingProfile, ok := profileMapping[profileManagerSelector]; ok { if existingProfile.selector.Tag == "*" && profile.Selector.GetImageTag() != "*" { seclog.Debugf("ignoring %s: a persisted profile already exists for workload %s", profilePath, profileManagerSelector.String()) return nil @@ -244,14 +259,14 @@ func (dp *DirectoryProvider) loadProfile(profilePath string) error { seclog.Debugf("security profile %s loaded from file system", workloadSelector) - if dp.onNewProfileCallback == nil { + if propagateCb == nil { return nil } // check if this profile matches a workload selector - for _, selector := range dp.selectors { + for _, selector := range selectors { if workloadSelector.Match(selector) { - dp.onNewProfileCallback(workloadSelector, profile) + propagateCb(workloadSelector, profile) } } return nil From 42997a45690e4f57d198ef2a0114f51c2391fe3c Mon Sep 17 00:00:00 2001 From: Raphael Gavache Date: Mon, 15 Apr 2024 10:15:21 +0200 Subject: [PATCH 35/99] Update linux_test.go (#24664) --- test/new-e2e/tests/updater/linux_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/new-e2e/tests/updater/linux_test.go b/test/new-e2e/tests/updater/linux_test.go index d905d015b80bc..6a5821abcaf99 100644 --- a/test/new-e2e/tests/updater/linux_test.go +++ b/test/new-e2e/tests/updater/linux_test.go @@ -70,7 +70,7 @@ func TestUbuntuARM(t *testing.T) { func TestDebianX86(t *testing.T) { t.Parallel() - runTest(t, "dpkg", os.AMD64Arch, os.UbuntuDefault) + runTest(t, "dpkg", os.AMD64Arch, os.DebianDefault) } func (v *vmUpdaterSuite) TestUserGroupsCreation() { From 03f70d3308d4597f9fd5c3c387c55b57eb92fb7a Mon Sep 17 00:00:00 2001 From: Florent Clarret Date: Mon, 15 Apr 2024 09:34:08 +0000 Subject: [PATCH 36/99] Bump the buildimages tags (#24665) * Bump the buildimages tags * Nudge CI * fix image --- .circleci/config.yml | 2 +- .gitlab-ci.yml | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 716b82715b5c5..075c48ede2885 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -15,7 +15,7 @@ experimental: templates: job_template: &job_template docker: - - image: gcr.io/datadoghq/agent-circleci-runner:v31802788-2dee8fe9 + - image: gcr.io/datadoghq/agent-circleci-runner:v31988376-bfbb3afb environment: USE_SYSTEM_LIBS: "1" working_directory: /go/src/github.com/DataDog/datadog-agent diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 748a75f3fa7f5..4231bd4995a1e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -163,15 +163,15 @@ variables: # To use images from datadog-agent-buildimages dev branches, set the corresponding # SUFFIX variable to _test_only DATADOG_AGENT_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_BUILDIMAGES: v31802788-2dee8fe9 + DATADOG_AGENT_BUILDIMAGES: v31988376-bfbb3afb DATADOG_AGENT_WINBUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_WINBUILDIMAGES: v31802788-2dee8fe9 + DATADOG_AGENT_WINBUILDIMAGES: v31988376-bfbb3afb DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_ARMBUILDIMAGES: v31802788-2dee8fe9 + DATADOG_AGENT_ARMBUILDIMAGES: v31988376-bfbb3afb DATADOG_AGENT_SYSPROBE_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v31802788-2dee8fe9 + DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v31988376-bfbb3afb DATADOG_AGENT_BTF_GEN_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v31802788-2dee8fe9 + DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v31988376-bfbb3afb DATADOG_AGENT_BUILDERS: v28719426-b6a4fd9 DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded @@ -1049,7 +1049,7 @@ workflow: - .gitlab-ci.yml - .gitlab/**/* compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 - + .on_invoke_tasks_changes: - <<: *if_main_branch - changes: From ee4e4e6e55e373ea537f311661d3e98c7d0fb365 Mon Sep 17 00:00:00 2001 From: AliDatadog <125997632+AliDatadog@users.noreply.github.com> Date: Mon, 15 Apr 2024 11:40:42 +0200 Subject: [PATCH 37/99] [CONTINT-4013] Add tests for the fakeintake options (#24195) * cover WithPort and WithAddress * add unit test for other options * remove WithClock and WithReadyChannel tests * test withretention and test the addr * Revert "test withretention and test the addr" This reverts commit 0e312b49e7acd756d8bc72f81a524ae6fd35ae2a. * make cleanup test more generic * reintroduce the test for WithPort and WithAddr * use localhost instead of url * hardcode 0.0.0.0 as the address --- test/fakeintake/server/server.go | 17 ++-- test/fakeintake/server/server_test.go | 139 ++++++++++++++++++-------- test/fakeintake/server/testhelper.go | 5 +- 3 files changed, 108 insertions(+), 53 deletions(-) diff --git a/test/fakeintake/server/server.go b/test/fakeintake/server/server.go index 0d99161cf1ab4..2ffbd4a9a8e66 100644 --- a/test/fakeintake/server/server.go +++ b/test/fakeintake/server/server.go @@ -54,6 +54,9 @@ func init() { } } +// Option is a function that modifies a Server +type Option func(*Server) + // Server is a struct implementing a fakeintake server type Server struct { server http.Server @@ -75,7 +78,7 @@ type Server struct { // options accept WithPort and WithReadyChan. // Call Server.Start() to start the server in a separate go-routine // If the port is 0, a port number is automatically chosen -func NewServer(options ...func(*Server)) *Server { +func NewServer(options ...Option) *Server { fi := &Server{ urlMutex: sync.RWMutex{}, clock: clock.New(), @@ -133,7 +136,7 @@ func NewServer(options ...func(*Server)) *Server { // WithAddress changes the server host:port. // If host is empty, it will bind to 0.0.0.0 // If the port is empty or 0, a port number is automatically chosen -func WithAddress(addr string) func(*Server) { +func WithAddress(addr string) Option { return func(fi *Server) { if fi.IsRunning() { log.Println("Fake intake is already running. Stop it and try again to change the port.") @@ -145,12 +148,12 @@ func WithAddress(addr string) func(*Server) { // WithPort changes the server port. // If the port is 0, a port number is automatically chosen -func WithPort(port int) func(*Server) { - return WithAddress(fmt.Sprintf(":%d", port)) +func WithPort(port int) Option { + return WithAddress(fmt.Sprintf("0.0.0.0:%d", port)) } // WithReadyChannel assign a boolean channel to get notified when the server is ready -func WithReadyChannel(ready chan bool) func(*Server) { +func WithReadyChannel(ready chan bool) Option { return func(fi *Server) { if fi.IsRunning() { log.Println("Fake intake is already running. Stop it and try again to change the ready channel.") @@ -161,7 +164,7 @@ func WithReadyChannel(ready chan bool) func(*Server) { } // WithClock changes the clock used by the server -func WithClock(clock clock.Clock) func(*Server) { +func WithClock(clock clock.Clock) Option { return func(fi *Server) { if fi.IsRunning() { log.Println("Fake intake is already running. Stop it and try again to change the clock.") @@ -172,7 +175,7 @@ func WithClock(clock clock.Clock) func(*Server) { } // WithRetention changes the retention time of payloads in the store -func WithRetention(retention time.Duration) func(*Server) { +func WithRetention(retention time.Duration) Option { return func(fi *Server) { if fi.IsRunning() { log.Println("Fake intake is already running. Stop it and try again to change the ready channel.") diff --git a/test/fakeintake/server/server_test.go b/test/fakeintake/server/server_test.go index 15d42693de8e8..6458f6c48678c 100644 --- a/test/fakeintake/server/server_test.go +++ b/test/fakeintake/server/server_test.go @@ -42,6 +42,41 @@ func TestServer(t *testing.T) { assert.Equal(t, "server not running", err.Error()) }) + for _, tt := range []struct { + name string + opt Option + expectedAddr string + }{ + { + name: "Make sure WithPort sets the port correctly", + opt: WithPort(1234), + expectedAddr: "0.0.0.0:1234", + }, + { + name: "Make sure WithAddress sets the port correctly", + opt: WithAddress("127.0.0.1:3456"), + expectedAddr: "127.0.0.1:3456", + }, + } { + t.Run(tt.name, func(t *testing.T) { + fi := NewServer(tt.opt) + assert.Equal(t, tt.expectedAddr, fi.server.Addr) + fi.Start() + assert.EventuallyWithT(t, func(collect *assert.CollectT) { + assert.True(collect, fi.IsRunning()) + resp, err := http.Get(fmt.Sprintf("http://%s/fakeintake/health", tt.expectedAddr)) + assert.NoError(collect, err) + if err != nil { + return + } + defer resp.Body.Close() + assert.Equal(collect, http.StatusOK, resp.StatusCode) + }, 500*time.Millisecond, 10*time.Millisecond) + err := fi.Stop() + assert.NoError(t, err) + }) + } + t.Run("should run after start", func(t *testing.T) { fi := NewServer(WithClock(clock.NewMock()), WithAddress("127.0.0.1:0")) fi.Start() @@ -303,56 +338,72 @@ func TestServer(t *testing.T) { assert.Empty(t, getResponse20Min.Payloads, "should be empty after cleanup") }) - t.Run("should clean payloads older than 15 minutes and keep recent payloads", func(t *testing.T) { - fi, clock := InitialiseForTests(t) - defer fi.Stop() - - PostSomeFakePayloads(t, fi.URL(), []TestTextPayload{ - { - Endpoint: "/totoro", - Data: "totoro|7|tag:valid,owner:pducolin", - }, - { - Endpoint: "/totoro", - Data: "totoro|5|tag:valid,owner:kiki", - }, - { - Endpoint: "/kiki", - Data: "I am just a poor raw log", - }, - }) + for _, tt := range []struct { + name string + opts []Option + expectedRetention time.Duration + }{ + { + name: "should clean payloads older than 5 minutes and keep recent payloads", + opts: []Option{WithRetention(5 * time.Minute)}, + expectedRetention: 5 * time.Minute, + }, + { + name: "default: should clean payloads older than 15 minutes and keep recent payloads", + expectedRetention: 15 * time.Minute, + }, + } { + t.Run(tt.name, func(t *testing.T) { + fi, clock := InitialiseForTests(t, tt.opts...) + defer fi.Stop() + + PostSomeFakePayloads(t, fi.URL(), []TestTextPayload{ + { + Endpoint: "/totoro", + Data: "totoro|7|tag:valid,owner:pducolin", + }, + { + Endpoint: "/totoro", + Data: "totoro|5|tag:valid,owner:kiki", + }, + { + Endpoint: "/kiki", + Data: "I am just a poor raw log", + }, + }) - clock.Add(10 * time.Minute) + clock.Add(tt.expectedRetention - 1*time.Minute) - PostSomeFakePayloads(t, fi.URL(), []TestTextPayload{ - { - Endpoint: "/totoro", - Data: "totoro|7|tag:valid,owner:ponyo", - }, - { - Endpoint: "/totoro", - Data: "totoro|5|tag:valid,owner:mei", - }, - }) + PostSomeFakePayloads(t, fi.URL(), []TestTextPayload{ + { + Endpoint: "/totoro", + Data: "totoro|7|tag:valid,owner:ponyo", + }, + { + Endpoint: "/totoro", + Data: "totoro|5|tag:valid,owner:mei", + }, + }) - response10Min, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/totoro") - require.NoError(t, err, "Error on GET request") - defer response10Min.Body.Close() - var getResponse10Min api.APIFakeIntakePayloadsRawGETResponse - json.NewDecoder(response10Min.Body).Decode(&getResponse10Min) - assert.Len(t, getResponse10Min.Payloads, 4, "should contain 4 elements before cleanup") + completeResponse, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/totoro") + require.NoError(t, err, "Error on GET request") + defer completeResponse.Body.Close() + var getCompleteResponse api.APIFakeIntakePayloadsRawGETResponse + json.NewDecoder(completeResponse.Body).Decode(&getCompleteResponse) + assert.Len(t, getCompleteResponse.Payloads, 4, "should contain 4 elements before cleanup") - clock.Add(10 * time.Minute) + clock.Add(tt.expectedRetention) - response20Min, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/totoro") - require.NoError(t, err, "Error on GET request") - defer response20Min.Body.Close() - var getResponse20Min api.APIFakeIntakePayloadsRawGETResponse - json.NewDecoder(response20Min.Body).Decode(&getResponse20Min) - assert.Len(t, getResponse20Min.Payloads, 2, "should contain 2 elements after cleanup of only older elements") + cleanedResponse, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/totoro") + require.NoError(t, err, "Error on GET request") + defer cleanedResponse.Body.Close() + var getCleanedResponse api.APIFakeIntakePayloadsRawGETResponse + json.NewDecoder(cleanedResponse.Body).Decode(&getCleanedResponse) + assert.Len(t, getCleanedResponse.Payloads, 2, "should contain 2 elements after cleanup of only older elements") - fi.Stop() - }) + fi.Stop() + }) + } t.Run("should clean json parsed payloads", func(t *testing.T) { fi, clock := InitialiseForTests(t) diff --git a/test/fakeintake/server/testhelper.go b/test/fakeintake/server/testhelper.go index 74f4c9883104f..033de1030bbd0 100644 --- a/test/fakeintake/server/testhelper.go +++ b/test/fakeintake/server/testhelper.go @@ -17,11 +17,12 @@ import ( // InitialiseForTests starts a server with a mock clock and waits for it to be ready. // It returns the mock clock and the server. Use defer server.Stop() to stop the server // after calling this function. -func InitialiseForTests(t *testing.T) (*Server, *clock.Mock) { +func InitialiseForTests(t *testing.T, opts ...Option) (*Server, *clock.Mock) { t.Helper() ready := make(chan bool, 1) mockClock := clock.NewMock() - fi := NewServer(WithReadyChannel(ready), WithClock(mockClock), WithAddress("127.0.0.1:0")) + opts = append(opts, WithReadyChannel(ready), WithClock(mockClock), WithAddress("127.0.0.1:0")) + fi := NewServer(opts...) fi.Start() isReady := <-ready require.True(t, isReady) From 0b25a74d47c027352ff64755819e2e2051e35df7 Mon Sep 17 00:00:00 2001 From: Raphael Gavache Date: Mon, 15 Apr 2024 11:55:39 +0200 Subject: [PATCH 38/99] Fleet rename (#24667) * First rename * finish rename * fix helper path --- cmd/updater/subcommands/bootstrap/command.go | 6 +- cmd/updater/subcommands/purge/command.go | 8 +- comp/updater/localapi/component.go | 4 +- .../localapi/localapiimpl/local_api.go | 6 +- comp/updater/localapiclient/component.go | 4 +- .../localapiclientimpl/localapiclient.go | 6 +- comp/updater/updater/component.go | 4 +- comp/updater/updater/updaterimpl/updater.go | 8 +- pkg/{updater => installer}/download.go | 4 +- pkg/{updater => installer}/download_test.go | 4 +- pkg/{updater => installer}/errors/errors.go | 46 +-- .../errors/errors_test.go | 10 +- pkg/{updater => installer}/fixtures/README.md | 0 .../oci-layout-simple-v1-linux2-amd128.tar | Bin .../fixtures/oci-layout-simple-v1.tar | Bin .../fixtures/oci-layout-simple-v2.tar | Bin .../simple-v1-config/datadog.yaml.example | 0 .../fixtures/simple-v1/executable.sh | 0 .../fixtures/simple-v1/file.txt | 0 .../simple-v2-config/datadog.yaml.example | 0 .../fixtures/simple-v2/executable-new.sh | 0 pkg/{updater => installer}/install.go | 56 ++-- pkg/{updater => installer}/install_test.go | 26 +- .../updater.go => installer/installer.go} | 268 +++++++++--------- .../installer_test.go} | 76 ++--- pkg/{updater => installer}/local_api.go | 52 ++-- pkg/{updater => installer}/local_api_test.go | 12 +- pkg/{updater => installer}/remote_config.go | 4 +- .../remote_config_test.go | 2 +- pkg/{updater => installer}/repository/link.go | 0 .../repository/link_test.go | 0 .../repository/link_windows.go | 0 .../repository/repositories.go | 0 .../repository/repositories_test.go | 2 +- .../repository/repository.go | 2 +- .../repository/repository_test.go | 2 +- .../service/apm_inject.go | 0 .../service/apm_inject_test.go | 0 .../service/apm_inject_windows.go | 0 .../service/cmd_executor.go | 2 +- .../service/cmd_executor_windows.go | 0 .../service/datadog_agent.go | 0 .../service/datadog_agent_windows.go | 0 .../service/datadog_installer.go | 0 .../service/datadog_installer_windows.go | 0 pkg/{updater => installer}/service/docker.go | 4 +- .../service/docker_test.go | 0 pkg/installer/service/helper/go.mod | 3 + .../service/helper/main.go | 0 pkg/{updater => installer}/service/systemd.go | 1 - .../service/systemd_test.go | 5 +- pkg/{updater => installer}/telemetry.go | 2 +- pkg/updater/service/helper/go.mod | 3 - tasks/installer.py | 2 +- 54 files changed, 317 insertions(+), 317 deletions(-) rename pkg/{updater => installer}/download.go (97%) rename pkg/{updater => installer}/download_test.go (98%) rename pkg/{updater => installer}/errors/errors.go (51%) rename pkg/{updater => installer}/errors/errors_test.go (81%) rename pkg/{updater => installer}/fixtures/README.md (100%) rename pkg/{updater => installer}/fixtures/oci-layout-simple-v1-linux2-amd128.tar (100%) rename pkg/{updater => installer}/fixtures/oci-layout-simple-v1.tar (100%) rename pkg/{updater => installer}/fixtures/oci-layout-simple-v2.tar (100%) rename pkg/{updater => installer}/fixtures/simple-v1-config/datadog.yaml.example (100%) rename pkg/{updater => installer}/fixtures/simple-v1/executable.sh (100%) rename pkg/{updater => installer}/fixtures/simple-v1/file.txt (100%) rename pkg/{updater => installer}/fixtures/simple-v2-config/datadog.yaml.example (100%) rename pkg/{updater => installer}/fixtures/simple-v2/executable-new.sh (100%) rename pkg/{updater => installer}/install.go (83%) rename pkg/{updater => installer}/install_test.go (90%) rename pkg/{updater/updater.go => installer/installer.go} (63%) rename pkg/{updater/updater_test.go => installer/installer_test.go} (77%) rename pkg/{updater => installer}/local_api.go (85%) rename pkg/{updater => installer}/local_api_test.go (93%) rename pkg/{updater => installer}/remote_config.go (98%) rename pkg/{updater => installer}/remote_config_test.go (99%) rename pkg/{updater => installer}/repository/link.go (100%) rename pkg/{updater => installer}/repository/link_test.go (100%) rename pkg/{updater => installer}/repository/link_windows.go (100%) rename pkg/{updater => installer}/repository/repositories.go (100%) rename pkg/{updater => installer}/repository/repositories_test.go (97%) rename pkg/{updater => installer}/repository/repository.go (99%) rename pkg/{updater => installer}/repository/repository_test.go (99%) rename pkg/{updater => installer}/service/apm_inject.go (100%) rename pkg/{updater => installer}/service/apm_inject_test.go (100%) rename pkg/{updater => installer}/service/apm_inject_windows.go (100%) rename pkg/{updater => installer}/service/cmd_executor.go (96%) rename pkg/{updater => installer}/service/cmd_executor_windows.go (100%) rename pkg/{updater => installer}/service/datadog_agent.go (100%) rename pkg/{updater => installer}/service/datadog_agent_windows.go (100%) rename pkg/{updater => installer}/service/datadog_installer.go (100%) rename pkg/{updater => installer}/service/datadog_installer_windows.go (100%) rename pkg/{updater => installer}/service/docker.go (96%) rename pkg/{updater => installer}/service/docker_test.go (100%) create mode 100644 pkg/installer/service/helper/go.mod rename pkg/{updater => installer}/service/helper/main.go (100%) rename pkg/{updater => installer}/service/systemd.go (97%) rename pkg/{updater => installer}/service/systemd_test.go (90%) rename pkg/{updater => installer}/telemetry.go (99%) delete mode 100644 pkg/updater/service/helper/go.mod diff --git a/cmd/updater/subcommands/bootstrap/command.go b/cmd/updater/subcommands/bootstrap/command.go index 3a8bc79b1115e..c029932fa7028 100644 --- a/cmd/updater/subcommands/bootstrap/command.go +++ b/cmd/updater/subcommands/bootstrap/command.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package bootstrap implements 'updater bootstrap'. +// Package bootstrap implements 'installer bootstrap'. package bootstrap import ( @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/log/logimpl" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" - "github.com/DataDog/datadog-agent/pkg/updater" + "github.com/DataDog/datadog-agent/pkg/installer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/spf13/cobra" @@ -81,7 +81,7 @@ func bootstrap(ctx context.Context, params *cliParams, config config.Component) if params.url != "" { url = params.url } - return updater.BootstrapURL(ctx, url, config) + return installer.BootstrapURL(ctx, url, config) } func packageURL(site string, pkg string, version string) string { diff --git a/cmd/updater/subcommands/purge/command.go b/cmd/updater/subcommands/purge/command.go index c63efe5407f84..406936605f610 100644 --- a/cmd/updater/subcommands/purge/command.go +++ b/cmd/updater/subcommands/purge/command.go @@ -3,14 +3,14 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package purge implements 'updater purge'. +// Package purge implements 'installer purge'. package purge import ( "github.com/DataDog/datadog-agent/cmd/updater/command" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" - "github.com/DataDog/datadog-agent/pkg/updater" + "github.com/DataDog/datadog-agent/pkg/installer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/spf13/cobra" "go.uber.org/fx" @@ -20,7 +20,7 @@ import ( func Commands(_ *command.GlobalParams) []*cobra.Command { runCmd := &cobra.Command{ Use: "purge", - Short: "Purge updater packages", + Short: "Purge installer packages", Long: ``, RunE: func(cmd *cobra.Command, args []string) error { return purgeFxWrapper() @@ -39,6 +39,6 @@ func purgeFxWrapper() error { } func purge() error { - updater.Purge() + installer.Purge() return nil } diff --git a/comp/updater/localapi/component.go b/comp/updater/localapi/component.go index c063facfc52f2..756bc42845bb0 100644 --- a/comp/updater/localapi/component.go +++ b/comp/updater/localapi/component.go @@ -7,12 +7,12 @@ package localapi import ( - "github.com/DataDog/datadog-agent/pkg/updater" + "github.com/DataDog/datadog-agent/pkg/installer" ) // team: fleet // Component is the interface for the updater local api component. type Component interface { - updater.LocalAPI + installer.LocalAPI } diff --git a/comp/updater/localapi/localapiimpl/local_api.go b/comp/updater/localapi/localapiimpl/local_api.go index d37559ccb7b96..073d1c7bb0538 100644 --- a/comp/updater/localapi/localapiimpl/local_api.go +++ b/comp/updater/localapi/localapiimpl/local_api.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package localapiimpl implements the updater local api component. +// Package localapiimpl implements the installer local api component. package localapiimpl import ( @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/log" "github.com/DataDog/datadog-agent/comp/updater/localapi" updatercomp "github.com/DataDog/datadog-agent/comp/updater/updater" - "github.com/DataDog/datadog-agent/pkg/updater" + "github.com/DataDog/datadog-agent/pkg/installer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -34,7 +34,7 @@ type dependencies struct { } func newLocalAPIComponent(lc fx.Lifecycle, dependencies dependencies) (localapi.Component, error) { - localAPI, err := updater.NewLocalAPI(dependencies.Updater) + localAPI, err := installer.NewLocalAPI(dependencies.Updater) if err != nil { return nil, fmt.Errorf("could not create local API: %w", err) } diff --git a/comp/updater/localapiclient/component.go b/comp/updater/localapiclient/component.go index 2c45f9fb8aa1d..c3ca5ecd5ba87 100644 --- a/comp/updater/localapiclient/component.go +++ b/comp/updater/localapiclient/component.go @@ -6,11 +6,11 @@ // Package localapiclient provides the local API client component. package localapiclient -import "github.com/DataDog/datadog-agent/pkg/updater" +import "github.com/DataDog/datadog-agent/pkg/installer" // team: fleet // Component is the component type. type Component interface { - updater.LocalAPIClient + installer.LocalAPIClient } diff --git a/comp/updater/localapiclient/localapiclientimpl/localapiclient.go b/comp/updater/localapiclient/localapiclientimpl/localapiclient.go index 0f2d1ecb91d65..79b2eec164f75 100644 --- a/comp/updater/localapiclient/localapiclientimpl/localapiclient.go +++ b/comp/updater/localapiclient/localapiclientimpl/localapiclient.go @@ -10,11 +10,11 @@ import ( "go.uber.org/fx" "github.com/DataDog/datadog-agent/comp/updater/localapiclient" - "github.com/DataDog/datadog-agent/pkg/updater" + "github.com/DataDog/datadog-agent/pkg/installer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// Module is the fx module for the updater local api client. +// Module is the fx module for the installer local api client. func Module() fxutil.Module { return fxutil.Component( fx.Provide(newLocalAPIClientComponent), @@ -22,5 +22,5 @@ func Module() fxutil.Module { } func newLocalAPIClientComponent() localapiclient.Component { - return updater.NewLocalAPIClient() + return installer.NewLocalAPIClient() } diff --git a/comp/updater/updater/component.go b/comp/updater/updater/component.go index efe0dad167b85..e09186c9d6c43 100644 --- a/comp/updater/updater/component.go +++ b/comp/updater/updater/component.go @@ -7,12 +7,12 @@ package updater import ( - "github.com/DataDog/datadog-agent/pkg/updater" + "github.com/DataDog/datadog-agent/pkg/installer" ) // team: fleet // Component is the interface for the updater component. type Component interface { - updater.Updater + installer.Installer } diff --git a/comp/updater/updater/updaterimpl/updater.go b/comp/updater/updater/updaterimpl/updater.go index 1e96c8607c601..3bbf6055b198c 100644 --- a/comp/updater/updater/updaterimpl/updater.go +++ b/comp/updater/updater/updaterimpl/updater.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/log" "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" updatercomp "github.com/DataDog/datadog-agent/comp/updater/updater" - "github.com/DataDog/datadog-agent/pkg/updater" + "github.com/DataDog/datadog-agent/pkg/installer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -46,10 +46,10 @@ func newUpdaterComponent(lc fx.Lifecycle, dependencies dependencies) (updatercom if !ok { return nil, errRemoteConfigRequired } - updater, err := updater.NewUpdater(remoteConfig, dependencies.Config) + installer, err := installer.NewInstaller(remoteConfig, dependencies.Config) if err != nil { return nil, fmt.Errorf("could not create updater: %w", err) } - lc.Append(fx.Hook{OnStart: updater.Start, OnStop: updater.Stop}) - return updater, nil + lc.Append(fx.Hook{OnStart: installer.Start, OnStop: installer.Stop}) + return installer, nil } diff --git a/pkg/updater/download.go b/pkg/installer/download.go similarity index 97% rename from pkg/updater/download.go rename to pkg/installer/download.go index 5d3c65f6c28dd..14b40cdd2aa04 100644 --- a/pkg/updater/download.go +++ b/pkg/installer/download.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package updater +package installer import ( "context" @@ -41,7 +41,7 @@ type downloadedPackage struct { Version string } -// downloader is the downloader used by the updater to download packages. +// downloader is the downloader used by the installer to download packages. type downloader struct { keychain authn.Keychain client *http.Client diff --git a/pkg/updater/download_test.go b/pkg/installer/download_test.go similarity index 98% rename from pkg/updater/download_test.go rename to pkg/installer/download_test.go index 8003e2de10f9b..dee8facb1bd0b 100644 --- a/pkg/updater/download_test.go +++ b/pkg/installer/download_test.go @@ -3,10 +3,10 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// for now the updater is not supported on windows +// for now the installer is not supported on windows //go:build !windows -package updater +package installer import ( "context" diff --git a/pkg/updater/errors/errors.go b/pkg/installer/errors/errors.go similarity index 51% rename from pkg/updater/errors/errors.go rename to pkg/installer/errors/errors.go index ae143de78ede5..589d8179a2fa8 100644 --- a/pkg/updater/errors/errors.go +++ b/pkg/installer/errors/errors.go @@ -3,18 +3,18 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package errors contains errors used by the updater. +// Package errors contains errors used by the installer. package errors import ( "errors" ) -// UpdaterErrorCode is an error code used by the updater. -type UpdaterErrorCode uint64 +// InstallerErrorCode is an error code used by the installer. +type InstallerErrorCode uint64 const ( - errUnknown UpdaterErrorCode = iota // This error code is purposefully not exported + errUnknown InstallerErrorCode = iota // This error code is purposefully not exported // ErrInstallFailed is the code for an install failure. ErrInstallFailed // ErrDownloadFailed is the code for a download failure. @@ -29,55 +29,55 @@ const ( ErrUpdateExperimentFailed ) -// UpdaterError is an error type used by the updater. -type UpdaterError struct { +// InstallerError is an error type used by the installer. +type InstallerError struct { err error - code UpdaterErrorCode + code InstallerErrorCode } // Error returns the error message. -func (e UpdaterError) Error() string { +func (e InstallerError) Error() string { return e.err.Error() } // Unwrap returns the wrapped error. -func (e UpdaterError) Unwrap() error { +func (e InstallerError) Unwrap() error { return e.err } // Is implements the Is method of the errors.Is interface. -func (e UpdaterError) Is(target error) bool { - _, ok := target.(*UpdaterError) +func (e InstallerError) Is(target error) bool { + _, ok := target.(*InstallerError) return ok } -// Code returns the error code of the updater error. -func (e UpdaterError) Code() UpdaterErrorCode { +// Code returns the error code of the installer error. +func (e InstallerError) Code() InstallerErrorCode { return e.code } -// Wrap wraps the given error with an updater error. -// If the given error is already an updater error, it is not wrapped and -// left as it is. Only the deepest UpdaterError remains. -func Wrap(errCode UpdaterErrorCode, err error) error { - if errors.Is(err, &UpdaterError{}) { +// Wrap wraps the given error with an installer error. +// If the given error is already an installer error, it is not wrapped and +// left as it is. Only the deepest InstallerError remains. +func Wrap(errCode InstallerErrorCode, err error) error { + if errors.Is(err, &InstallerError{}) { return err } - return &UpdaterError{ + return &InstallerError{ err: err, code: errCode, } } -// From returns a new UpdaterError from the given error. -func From(err error) *UpdaterError { +// From returns a new InstallerError from the given error. +func From(err error) *InstallerError { if err == nil { return nil } - e, ok := err.(*UpdaterError) + e, ok := err.(*InstallerError) if !ok { - return &UpdaterError{ + return &InstallerError{ err: err, code: errUnknown, } diff --git a/pkg/updater/errors/errors_test.go b/pkg/installer/errors/errors_test.go similarity index 81% rename from pkg/updater/errors/errors_test.go rename to pkg/installer/errors/errors_test.go index df38d5fc04922..44a5cdc584b95 100644 --- a/pkg/updater/errors/errors_test.go +++ b/pkg/installer/errors/errors_test.go @@ -13,12 +13,12 @@ import ( ) func TestFrom(t *testing.T) { - var err error = &UpdaterError{ + var err error = &InstallerError{ err: fmt.Errorf("test: test"), code: ErrDownloadFailed, } taskErr := From(err) - assert.Equal(t, taskErr, &UpdaterError{ + assert.Equal(t, taskErr, &InstallerError{ err: fmt.Errorf("test: test"), code: ErrDownloadFailed, }) @@ -29,15 +29,15 @@ func TestFrom(t *testing.T) { func TestWrap(t *testing.T) { err := fmt.Errorf("test: test") taskErr := Wrap(ErrDownloadFailed, err) - assert.Equal(t, taskErr, &UpdaterError{ + assert.Equal(t, taskErr, &InstallerError{ err: err, code: ErrDownloadFailed, }) // Check that Wrap doesn't change anything if the error - // is already an UpdaterError + // is already an InstallerError taskErr2 := Wrap(ErrInstallFailed, taskErr) - assert.Equal(t, taskErr2, &UpdaterError{ + assert.Equal(t, taskErr2, &InstallerError{ err: err, code: ErrDownloadFailed, }) diff --git a/pkg/updater/fixtures/README.md b/pkg/installer/fixtures/README.md similarity index 100% rename from pkg/updater/fixtures/README.md rename to pkg/installer/fixtures/README.md diff --git a/pkg/updater/fixtures/oci-layout-simple-v1-linux2-amd128.tar b/pkg/installer/fixtures/oci-layout-simple-v1-linux2-amd128.tar similarity index 100% rename from pkg/updater/fixtures/oci-layout-simple-v1-linux2-amd128.tar rename to pkg/installer/fixtures/oci-layout-simple-v1-linux2-amd128.tar diff --git a/pkg/updater/fixtures/oci-layout-simple-v1.tar b/pkg/installer/fixtures/oci-layout-simple-v1.tar similarity index 100% rename from pkg/updater/fixtures/oci-layout-simple-v1.tar rename to pkg/installer/fixtures/oci-layout-simple-v1.tar diff --git a/pkg/updater/fixtures/oci-layout-simple-v2.tar b/pkg/installer/fixtures/oci-layout-simple-v2.tar similarity index 100% rename from pkg/updater/fixtures/oci-layout-simple-v2.tar rename to pkg/installer/fixtures/oci-layout-simple-v2.tar diff --git a/pkg/updater/fixtures/simple-v1-config/datadog.yaml.example b/pkg/installer/fixtures/simple-v1-config/datadog.yaml.example similarity index 100% rename from pkg/updater/fixtures/simple-v1-config/datadog.yaml.example rename to pkg/installer/fixtures/simple-v1-config/datadog.yaml.example diff --git a/pkg/updater/fixtures/simple-v1/executable.sh b/pkg/installer/fixtures/simple-v1/executable.sh similarity index 100% rename from pkg/updater/fixtures/simple-v1/executable.sh rename to pkg/installer/fixtures/simple-v1/executable.sh diff --git a/pkg/updater/fixtures/simple-v1/file.txt b/pkg/installer/fixtures/simple-v1/file.txt similarity index 100% rename from pkg/updater/fixtures/simple-v1/file.txt rename to pkg/installer/fixtures/simple-v1/file.txt diff --git a/pkg/updater/fixtures/simple-v2-config/datadog.yaml.example b/pkg/installer/fixtures/simple-v2-config/datadog.yaml.example similarity index 100% rename from pkg/updater/fixtures/simple-v2-config/datadog.yaml.example rename to pkg/installer/fixtures/simple-v2-config/datadog.yaml.example diff --git a/pkg/updater/fixtures/simple-v2/executable-new.sh b/pkg/installer/fixtures/simple-v2/executable-new.sh similarity index 100% rename from pkg/updater/fixtures/simple-v2/executable-new.sh rename to pkg/installer/fixtures/simple-v2/executable-new.sh diff --git a/pkg/updater/install.go b/pkg/installer/install.go similarity index 83% rename from pkg/updater/install.go rename to pkg/installer/install.go index fe2d04aff91c1..db8a332a5afde 100644 --- a/pkg/updater/install.go +++ b/pkg/installer/install.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package updater +package installer import ( "archive/tar" @@ -18,8 +18,8 @@ import ( oci "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/types" - "github.com/DataDog/datadog-agent/pkg/updater/repository" - "github.com/DataDog/datadog-agent/pkg/updater/service" + "github.com/DataDog/datadog-agent/pkg/installer/repository" + "github.com/DataDog/datadog-agent/pkg/installer/service" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -34,37 +34,37 @@ const ( packageDatadogInstaller = "datadog-installer" ) -type installer struct { +type packageManager struct { repositories *repository.Repositories configsDir string installLock sync.Mutex } -func newInstaller(repositories *repository.Repositories) *installer { - return &installer{ +func newPackageManager(repositories *repository.Repositories) *packageManager { + return &packageManager{ repositories: repositories, configsDir: defaultConfigsDir, } } -func (i *installer) installStable(pkg string, version string, image oci.Image) error { +func (m *packageManager) installStable(pkg string, version string, image oci.Image) error { tmpDir, err := os.MkdirTemp("", "") if err != nil { return fmt.Errorf("could not create temporary directory: %w", err) } defer os.RemoveAll(tmpDir) - configDir := filepath.Join(i.configsDir, pkg) + configDir := filepath.Join(m.configsDir, pkg) err = extractPackageLayers(image, configDir, tmpDir) if err != nil { return fmt.Errorf("could not extract package layers: %w", err) } - err = i.repositories.Create(pkg, version, tmpDir) + err = m.repositories.Create(pkg, version, tmpDir) if err != nil { return fmt.Errorf("could not create repository: %w", err) } - i.installLock.Lock() - defer i.installLock.Unlock() + m.installLock.Lock() + defer m.installLock.Unlock() switch pkg { case packageDatadogAgent: return service.SetupAgentUnits() @@ -77,46 +77,46 @@ func (i *installer) installStable(pkg string, version string, image oci.Image) e } } -func (i *installer) installExperiment(pkg string, version string, image oci.Image) error { +func (m *packageManager) installExperiment(pkg string, version string, image oci.Image) error { tmpDir, err := os.MkdirTemp("", "") if err != nil { return fmt.Errorf("could not create temporary directory: %w", err) } defer os.RemoveAll(tmpDir) - configDir := filepath.Join(i.configsDir, pkg) + configDir := filepath.Join(m.configsDir, pkg) err = extractPackageLayers(image, configDir, tmpDir) if err != nil { return fmt.Errorf("could not extract package layers: %w", err) } - repository := i.repositories.Get(pkg) + repository := m.repositories.Get(pkg) err = repository.SetExperiment(version, tmpDir) if err != nil { return fmt.Errorf("could not set experiment: %w", err) } - return i.startExperiment(pkg) + return m.startExperiment(pkg) } -func (i *installer) promoteExperiment(pkg string) error { - repository := i.repositories.Get(pkg) +func (m *packageManager) promoteExperiment(pkg string) error { + repository := m.repositories.Get(pkg) err := repository.PromoteExperiment() if err != nil { return fmt.Errorf("could not promote experiment: %w", err) } - return i.stopExperiment(pkg) + return m.stopExperiment(pkg) } -func (i *installer) uninstallExperiment(pkg string) error { - repository := i.repositories.Get(pkg) +func (m *packageManager) uninstallExperiment(pkg string) error { + repository := m.repositories.Get(pkg) err := repository.DeleteExperiment() if err != nil { return fmt.Errorf("could not delete experiment: %w", err) } - return i.stopExperiment(pkg) + return m.stopExperiment(pkg) } -func (i *installer) startExperiment(pkg string) error { - i.installLock.Lock() - defer i.installLock.Unlock() +func (m *packageManager) startExperiment(pkg string) error { + m.installLock.Lock() + defer m.installLock.Unlock() switch pkg { case packageDatadogAgent: return service.StartAgentExperiment() @@ -127,9 +127,9 @@ func (i *installer) startExperiment(pkg string) error { } } -func (i *installer) stopExperiment(pkg string) error { - i.installLock.Lock() - defer i.installLock.Unlock() +func (m *packageManager) stopExperiment(pkg string) error { + m.installLock.Lock() + defer m.installLock.Unlock() switch pkg { case packageDatadogAgent: return service.StopAgentExperiment() @@ -222,7 +222,7 @@ func extractTarArchive(reader io.Reader, destinationPath string, maxSize int64) return fmt.Errorf("could not create symlink: %w", err) } case tar.TypeLink: - // we currently don't support hard links in the updater + // we currently don't support hard links in the installer default: log.Warnf("Unsupported tar entry type %d for %s", header.Typeflag, header.Name) } diff --git a/pkg/updater/install_test.go b/pkg/installer/install_test.go similarity index 90% rename from pkg/updater/install_test.go rename to pkg/installer/install_test.go index 49c8080608562..2ab477b3857b3 100644 --- a/pkg/updater/install_test.go +++ b/pkg/installer/install_test.go @@ -3,10 +3,10 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// for now the updater is not supported on windows +// for now the installer is not supported on windows //go:build !windows -package updater +package installer import ( "bytes" @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/updater/repository" + "github.com/DataDog/datadog-agent/pkg/installer/repository" ) func assertEqualFS(t *testing.T, expected fs.FS, actual fs.FS) { @@ -72,28 +72,28 @@ func fsContainsAll(a fs.FS, b fs.FS) error { }) } -type testInstaller struct { - installer +type testPackageManager struct { + packageManager } -func newTestInstaller(t *testing.T) *testInstaller { +func newTestPackageManager(t *testing.T) *testPackageManager { repositories := repository.NewRepositories(t.TempDir(), t.TempDir()) - return &testInstaller{ - installer{ + return &testPackageManager{ + packageManager{ repositories: repositories, configsDir: t.TempDir(), }, } } -func (i *testInstaller) ConfigFS(f fixture) fs.FS { +func (i *testPackageManager) ConfigFS(f fixture) fs.FS { return os.DirFS(filepath.Join(i.configsDir, f.pkg)) } func TestInstallStable(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() - installer := newTestInstaller(t) + installer := newTestPackageManager(t) err := installer.installStable(fixtureSimpleV1.pkg, fixtureSimpleV1.version, s.Image(fixtureSimpleV1)) assert.NoError(t, err) @@ -109,7 +109,7 @@ func TestInstallStable(t *testing.T) { func TestInstallExperiment(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() - installer := newTestInstaller(t) + installer := newTestPackageManager(t) err := installer.installStable(fixtureSimpleV1.pkg, fixtureSimpleV1.version, s.Image(fixtureSimpleV1)) assert.NoError(t, err) @@ -128,7 +128,7 @@ func TestInstallExperiment(t *testing.T) { func TestInstallPromoteExperiment(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() - installer := newTestInstaller(t) + installer := newTestPackageManager(t) err := installer.installStable(fixtureSimpleV1.pkg, fixtureSimpleV1.version, s.Image(fixtureSimpleV1)) assert.NoError(t, err) @@ -148,7 +148,7 @@ func TestInstallPromoteExperiment(t *testing.T) { func TestUninstallExperiment(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() - installer := newTestInstaller(t) + installer := newTestPackageManager(t) err := installer.installStable(fixtureSimpleV1.pkg, fixtureSimpleV1.version, s.Image(fixtureSimpleV1)) assert.NoError(t, err) diff --git a/pkg/updater/updater.go b/pkg/installer/installer.go similarity index 63% rename from pkg/updater/updater.go rename to pkg/installer/installer.go index cd656fd25c9ca..a502cd9cbb793 100644 --- a/pkg/updater/updater.go +++ b/pkg/installer/installer.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package updater implements the updater. -package updater +// Package installer implements the installer. +package installer import ( "context" @@ -22,10 +22,10 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/pkg/config/remote/client" + installerErrors "github.com/DataDog/datadog-agent/pkg/installer/errors" + "github.com/DataDog/datadog-agent/pkg/installer/repository" + "github.com/DataDog/datadog-agent/pkg/installer/service" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" - updaterErrors "github.com/DataDog/datadog-agent/pkg/updater/errors" - "github.com/DataDog/datadog-agent/pkg/updater/repository" - "github.com/DataDog/datadog-agent/pkg/updater/service" "github.com/DataDog/datadog-agent/pkg/util/filesystem" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -46,8 +46,8 @@ var ( fsDisk = filesystem.NewDisk() ) -// Updater is the updater used to update packages. -type Updater interface { +// Installer is the datadog packages installer. +type Installer interface { Start(ctx context.Context) error Stop(ctx context.Context) error @@ -61,14 +61,14 @@ type Updater interface { GetState() (map[string]repository.State, error) } -type updaterImpl struct { +type installerImpl struct { m sync.Mutex stopChan chan struct{} - repositories *repository.Repositories - downloader *downloader - installer *installer - telemetry *telemetry + repositories *repository.Repositories + downloader *downloader + packageManager *packageManager + telemetry *telemetry remoteUpdates bool rc *remoteConfig @@ -82,27 +82,27 @@ type disk interface { GetUsage(path string) (*filesystem.DiskUsage, error) } -// BootstrapURL bootstraps the updater with the given package. +// BootstrapURL installs the given package from an URL. func BootstrapURL(ctx context.Context, url string, config config.Reader) error { rc := newNoopRemoteConfig() - u, err := newUpdater(rc, defaultRepositoriesPath, defaultLocksPath, config) + i, err := newInstaller(rc, defaultRepositoriesPath, defaultLocksPath, config) if err != nil { - return fmt.Errorf("could not create updater: %w", err) + return fmt.Errorf("could not create installer: %w", err) } - err = u.Start(ctx) + err = i.Start(ctx) if err != nil { - return fmt.Errorf("could not start updater: %w", err) + return fmt.Errorf("could not start installer: %w", err) } defer func() { - err := u.Stop(ctx) + err := i.Stop(ctx) if err != nil { - log.Errorf("could not stop updater: %v", err) + log.Errorf("could not stop installer: %v", err) } }() - return u.BootstrapURL(ctx, url) + return i.BootstrapURL(ctx, url) } -// Purge removes files installed by the updater +// Purge removes files installed by the installer func Purge() { purge(defaultLocksPath, defaultRepositoriesPath) } @@ -110,7 +110,7 @@ func Purge() { func purge(locksPath, repositoryPath string) { service.RemoveAgentUnits() if err := service.RemoveAPMInjector(); err != nil { - log.Warnf("updater: could not remove APM injector: %v", err) + log.Warnf("installer: could not remove APM injector: %v", err) } cleanDir(locksPath, os.RemoveAll) cleanDir(repositoryPath, service.RemoveAll) @@ -119,28 +119,28 @@ func purge(locksPath, repositoryPath string) { func cleanDir(dir string, cleanFunc func(string) error) { entries, err := os.ReadDir(dir) if err != nil { - log.Warnf("updater: could not read directory %s: %v", dir, err) + log.Warnf("installer: could not read directory %s: %v", dir, err) } for _, entry := range entries { path := filepath.Join(dir, entry.Name()) err := cleanFunc(path) if err != nil { - log.Warnf("updater: could not remove %s: %v", path, err) + log.Warnf("installer: could not remove %s: %v", path, err) } } } -// NewUpdater returns a new Updater. -func NewUpdater(rcFetcher client.ConfigFetcher, config config.Reader) (Updater, error) { +// NewInstaller returns a new Installer. +func NewInstaller(rcFetcher client.ConfigFetcher, config config.Reader) (Installer, error) { rc, err := newRemoteConfig(rcFetcher) if err != nil { return nil, fmt.Errorf("could not create remote config client: %w", err) } - return newUpdater(rc, defaultRepositoriesPath, defaultLocksPath, config) + return newInstaller(rc, defaultRepositoriesPath, defaultLocksPath, config) } -func newUpdater(rc *remoteConfig, repositoriesPath string, locksPath string, config config.Reader) (*updaterImpl, error) { +func newInstaller(rc *remoteConfig, repositoriesPath string, locksPath string, config config.Reader) (*installerImpl, error) { repositories := repository.NewRepositories(repositoriesPath, locksPath) remoteRegistryOverride := config.GetString("updater.registry") rcClient := rc @@ -150,120 +150,120 @@ func newUpdater(rc *remoteConfig, repositoriesPath string, locksPath string, con return nil, fmt.Errorf("could not create telemetry: %w", err) } - u := &updaterImpl{ + i := &installerImpl{ remoteUpdates: config.GetBool("updater.remote_updates"), rc: rcClient, repositories: repositories, downloader: newDownloader(config, http.DefaultClient, remoteRegistryOverride), - installer: newInstaller(repositories), + packageManager: newPackageManager(repositories), telemetry: telemetry, requests: make(chan remoteAPIRequest, 32), catalog: catalog{}, bootstrapVersions: bootstrapVersions{}, stopChan: make(chan struct{}), } - u.refreshState(context.Background()) - return u, nil + i.refreshState(context.Background()) + return i, nil } // GetState returns the state. -func (u *updaterImpl) GetState() (map[string]repository.State, error) { - return u.repositories.GetState() +func (i *installerImpl) GetState() (map[string]repository.State, error) { + return i.repositories.GetState() } // Start starts remote config and the garbage collector. -func (u *updaterImpl) Start(ctx context.Context) error { - u.telemetry.Start(ctx) +func (i *installerImpl) Start(ctx context.Context) error { + i.telemetry.Start(ctx) go func() { for { select { case <-time.After(gcInterval): - u.m.Lock() - err := u.repositories.Cleanup() - u.m.Unlock() + i.m.Lock() + err := i.repositories.Cleanup() + i.m.Unlock() if err != nil { - log.Errorf("updater: could not run GC: %v", err) + log.Errorf("installer: could not run GC: %v", err) } - case <-u.stopChan: + case <-i.stopChan: return - case request := <-u.requests: - err := u.handleRemoteAPIRequest(request) + case request := <-i.requests: + err := i.handleRemoteAPIRequest(request) if err != nil { - log.Errorf("updater: could not handle remote request: %v", err) + log.Errorf("installer: could not handle remote request: %v", err) } } } }() - if !u.remoteUpdates { - log.Infof("updater: Remote updates are disabled") + if !i.remoteUpdates { + log.Infof("installer: Remote updates are disabled") return nil } - u.rc.Start(u.handleCatalogUpdate, u.scheduleRemoteAPIRequest) + i.rc.Start(i.handleCatalogUpdate, i.scheduleRemoteAPIRequest) return nil } // Stop stops the garbage collector. -func (u *updaterImpl) Stop(ctx context.Context) error { - u.rc.Close() - u.telemetry.Stop(ctx) - close(u.stopChan) - u.requestsWG.Wait() - close(u.requests) +func (i *installerImpl) Stop(ctx context.Context) error { + i.rc.Close() + i.telemetry.Stop(ctx) + close(i.stopChan) + i.requestsWG.Wait() + close(i.requests) return nil } // Bootstrap installs the stable version of the package. -func (u *updaterImpl) BootstrapDefault(ctx context.Context, pkg string) (err error) { +func (i *installerImpl) BootstrapDefault(ctx context.Context, pkg string) (err error) { span, ctx := tracer.StartSpanFromContext(ctx, "bootrap_default") defer func() { span.Finish(tracer.WithError(err)) }() - u.m.Lock() - defer u.m.Unlock() - u.refreshState(ctx) - defer u.refreshState(ctx) + i.m.Lock() + defer i.m.Unlock() + i.refreshState(ctx) + defer i.refreshState(ctx) - stablePackage, ok := u.catalog.getDefaultPackage(u.bootstrapVersions, pkg, runtime.GOARCH, runtime.GOOS) + stablePackage, ok := i.catalog.getDefaultPackage(i.bootstrapVersions, pkg, runtime.GOARCH, runtime.GOOS) if !ok { return fmt.Errorf("could not get default package '%s' for arch '%s' and platform '%s'", pkg, runtime.GOARCH, runtime.GOOS) } - return u.bootstrapPackage(ctx, stablePackage.URL, stablePackage.Name, stablePackage.Version) + return i.bootstrapPackage(ctx, stablePackage.URL, stablePackage.Name, stablePackage.Version) } // BootstrapVersion installs the stable version of the package. -func (u *updaterImpl) BootstrapVersion(ctx context.Context, pkg string, version string) (err error) { +func (i *installerImpl) BootstrapVersion(ctx context.Context, pkg string, version string) (err error) { span, ctx := tracer.StartSpanFromContext(ctx, "bootstrap_version") defer func() { span.Finish(tracer.WithError(err)) }() - u.m.Lock() - defer u.m.Unlock() - u.refreshState(ctx) - defer u.refreshState(ctx) + i.m.Lock() + defer i.m.Unlock() + i.refreshState(ctx) + defer i.refreshState(ctx) - stablePackage, ok := u.catalog.getPackage(pkg, version, runtime.GOARCH, runtime.GOOS) + stablePackage, ok := i.catalog.getPackage(pkg, version, runtime.GOARCH, runtime.GOOS) if !ok { return fmt.Errorf("could not get package '%s' version '%s' for arch '%s' and platform '%s'", pkg, version, runtime.GOARCH, runtime.GOOS) } - return u.bootstrapPackage(ctx, stablePackage.URL, stablePackage.Name, stablePackage.Version) + return i.bootstrapPackage(ctx, stablePackage.URL, stablePackage.Name, stablePackage.Version) } // BootstrapURL installs the stable version of the package. -func (u *updaterImpl) BootstrapURL(ctx context.Context, url string) (err error) { +func (i *installerImpl) BootstrapURL(ctx context.Context, url string) (err error) { span, ctx := tracer.StartSpanFromContext(ctx, "bootstrap_url") defer func() { span.Finish(tracer.WithError(err)) }() - u.m.Lock() - defer u.m.Unlock() - u.refreshState(ctx) - defer u.refreshState(ctx) + i.m.Lock() + defer i.m.Unlock() + i.refreshState(ctx) + defer i.refreshState(ctx) - return u.bootstrapPackage(ctx, url, "", "") + return i.bootstrapPackage(ctx, url, "", "") } -func (u *updaterImpl) bootstrapPackage(ctx context.Context, url string, expectedPackage string, expectedVersion string) error { +func (i *installerImpl) bootstrapPackage(ctx context.Context, url string, expectedPackage string, expectedVersion string) error { // both tmp and repository paths are checked for available disk space in case they are on different partitions err := checkAvailableDiskSpace(fsDisk, defaultRepositoriesPath, os.TempDir()) if err != nil { return fmt.Errorf("not enough disk space to install package: %w", err) } - log.Infof("Updater: Bootstrapping stable package from %s", url) - downloadedPackage, err := u.downloader.Download(ctx, url) + log.Infof("Installer: Bootstrapping stable package from %s", url) + downloadedPackage, err := i.downloader.Download(ctx, url) if err != nil { return fmt.Errorf("could not download: %w", err) } @@ -271,35 +271,35 @@ func (u *updaterImpl) bootstrapPackage(ctx context.Context, url string, expected if (expectedPackage != "" && downloadedPackage.Name != expectedPackage) || (expectedVersion != "" && downloadedPackage.Version != expectedVersion) { return fmt.Errorf("downloaded package does not match expected package: %s, %s != %s, %s", downloadedPackage.Name, downloadedPackage.Version, expectedPackage, expectedVersion) } - err = u.installer.installStable(downloadedPackage.Name, downloadedPackage.Version, downloadedPackage.Image) + err = i.packageManager.installStable(downloadedPackage.Name, downloadedPackage.Version, downloadedPackage.Image) if err != nil { return fmt.Errorf("could not install: %w", err) } - log.Infof("Updater: Successfully installed default version %s of package %s from %s", downloadedPackage.Version, downloadedPackage.Name, url) + log.Infof("Installer: Successfully installed default version %s of package %s from %s", downloadedPackage.Version, downloadedPackage.Name, url) return nil } // StartExperiment starts an experiment with the given package. -func (u *updaterImpl) StartExperiment(ctx context.Context, pkg string, version string) (err error) { +func (i *installerImpl) StartExperiment(ctx context.Context, pkg string, version string) (err error) { span, ctx := tracer.StartSpanFromContext(ctx, "bootstrap_version") defer func() { span.Finish(tracer.WithError(err)) }() - u.m.Lock() - defer u.m.Unlock() - u.refreshState(ctx) - defer u.refreshState(ctx) + i.m.Lock() + defer i.m.Unlock() + i.refreshState(ctx) + defer i.refreshState(ctx) - log.Infof("Updater: Starting experiment for package %s version %s", pkg, version) + log.Infof("Installer: Starting experiment for package %s version %s", pkg, version) // both tmp and repository paths are checked for available disk space in case they are on different partitions err = checkAvailableDiskSpace(fsDisk, defaultRepositoriesPath, os.TempDir()) if err != nil { return fmt.Errorf("not enough disk space to install package: %w", err) } - experimentPackage, ok := u.catalog.getPackage(pkg, version, runtime.GOARCH, runtime.GOOS) + experimentPackage, ok := i.catalog.getPackage(pkg, version, runtime.GOARCH, runtime.GOOS) if !ok { return fmt.Errorf("could not get package %s, %s for %s, %s", pkg, version, runtime.GOARCH, runtime.GOOS) } - downloadedPackage, err := u.downloader.Download(ctx, experimentPackage.URL) + downloadedPackage, err := i.downloader.Download(ctx, experimentPackage.URL) if err != nil { return fmt.Errorf("could not download experiment: %w", err) } @@ -307,108 +307,108 @@ func (u *updaterImpl) StartExperiment(ctx context.Context, pkg string, version s if downloadedPackage.Name != experimentPackage.Name || downloadedPackage.Version != experimentPackage.Version { return fmt.Errorf("downloaded package does not match requested package: %s, %s != %s, %s", downloadedPackage.Name, downloadedPackage.Version, experimentPackage.Name, experimentPackage.Version) } - err = u.installer.installExperiment(pkg, version, downloadedPackage.Image) + err = i.packageManager.installExperiment(pkg, version, downloadedPackage.Image) if err != nil { return fmt.Errorf("could not install experiment: %w", err) } - log.Infof("Updater: Successfully started experiment for package %s version %s", pkg, version) + log.Infof("Installer: Successfully started experiment for package %s version %s", pkg, version) return nil } // PromoteExperiment promotes the experiment to stable. -func (u *updaterImpl) PromoteExperiment(ctx context.Context, pkg string) (err error) { +func (i *installerImpl) PromoteExperiment(ctx context.Context, pkg string) (err error) { span, ctx := tracer.StartSpanFromContext(ctx, "promote_experiment") defer func() { span.Finish(tracer.WithError(err)) }() - u.m.Lock() - defer u.m.Unlock() - u.refreshState(ctx) - defer u.refreshState(ctx) + i.m.Lock() + defer i.m.Unlock() + i.refreshState(ctx) + defer i.refreshState(ctx) - log.Infof("Updater: Promoting experiment for package %s", pkg) - err = u.installer.promoteExperiment(pkg) + log.Infof("Installer: Promoting experiment for package %s", pkg) + err = i.packageManager.promoteExperiment(pkg) if err != nil { return fmt.Errorf("could not promote experiment: %w", err) } - log.Infof("Updater: Successfully promoted experiment for package %s", pkg) + log.Infof("Installer: Successfully promoted experiment for package %s", pkg) return nil } // StopExperiment stops the experiment. -func (u *updaterImpl) StopExperiment(ctx context.Context, pkg string) (err error) { +func (i *installerImpl) StopExperiment(ctx context.Context, pkg string) (err error) { span, ctx := tracer.StartSpanFromContext(ctx, "stop_experiment") defer func() { span.Finish(tracer.WithError(err)) }() - u.m.Lock() - defer u.m.Unlock() - u.refreshState(ctx) - defer u.refreshState(ctx) + i.m.Lock() + defer i.m.Unlock() + i.refreshState(ctx) + defer i.refreshState(ctx) - defer log.Infof("Updater: Stopping experiment for package %s", pkg) - err = u.installer.uninstallExperiment(pkg) + defer log.Infof("Installer: Stopping experiment for package %s", pkg) + err = i.packageManager.uninstallExperiment(pkg) if err != nil { return fmt.Errorf("could not stop experiment: %w", err) } - log.Infof("Updater: Successfully stopped experiment for package %s", pkg) + log.Infof("Installer: Successfully stopped experiment for package %s", pkg) return nil } -func (u *updaterImpl) handleCatalogUpdate(c catalog) error { - u.m.Lock() - defer u.m.Unlock() - log.Infof("Updater: Received catalog update") - u.catalog = c +func (i *installerImpl) handleCatalogUpdate(c catalog) error { + i.m.Lock() + defer i.m.Unlock() + log.Infof("Installer: Received catalog update") + i.catalog = c return nil } -func (u *updaterImpl) scheduleRemoteAPIRequest(request remoteAPIRequest) error { - u.requestsWG.Add(1) - u.requests <- request +func (i *installerImpl) scheduleRemoteAPIRequest(request remoteAPIRequest) error { + i.requestsWG.Add(1) + i.requests <- request return nil } -func (u *updaterImpl) handleRemoteAPIRequest(request remoteAPIRequest) (err error) { - defer u.requestsWG.Done() +func (i *installerImpl) handleRemoteAPIRequest(request remoteAPIRequest) (err error) { + defer i.requestsWG.Done() ctx := newRequestContext(request) - u.refreshState(ctx) - defer u.refreshState(ctx) + i.refreshState(ctx) + defer i.refreshState(ctx) - s, err := u.repositories.GetPackageState(request.Package) + s, err := i.repositories.GetPackageState(request.Package) if err != nil { - return fmt.Errorf("could not get updater state: %w", err) + return fmt.Errorf("could not get installer state: %w", err) } if s.Stable != request.ExpectedState.Stable || s.Experiment != request.ExpectedState.Experiment { log.Infof("remote request %s not executed as state does not match: expected %v, got %v", request.ID, request.ExpectedState, s) setRequestInvalid(ctx) - u.refreshState(ctx) + i.refreshState(ctx) return nil } defer func() { setRequestDone(ctx, err) }() switch request.Method { case methodStartExperiment: - log.Infof("Updater: Received remote request %s to start experiment for package %s version %s", request.ID, request.Package, request.Params) + log.Infof("Installer: Received remote request %s to start experiment for package %s version %s", request.ID, request.Package, request.Params) var params taskWithVersionParams err := json.Unmarshal(request.Params, ¶ms) if err != nil { return fmt.Errorf("could not unmarshal start experiment params: %w", err) } - return u.StartExperiment(context.Background(), request.Package, params.Version) + return i.StartExperiment(context.Background(), request.Package, params.Version) case methodStopExperiment: - log.Infof("Updater: Received remote request %s to stop experiment for package %s", request.ID, request.Package) - return u.StopExperiment(ctx, request.Package) + log.Infof("Installer: Received remote request %s to stop experiment for package %s", request.ID, request.Package) + return i.StopExperiment(ctx, request.Package) case methodPromoteExperiment: - log.Infof("Updater: Received remote request %s to promote experiment for package %s", request.ID, request.Package) - return u.PromoteExperiment(ctx, request.Package) + log.Infof("Installer: Received remote request %s to promote experiment for package %s", request.ID, request.Package) + return i.PromoteExperiment(ctx, request.Package) case methodBootstrap: var params taskWithVersionParams err := json.Unmarshal(request.Params, ¶ms) if err != nil { return fmt.Errorf("could not unmarshal start experiment params: %w", err) } - log.Infof("Updater: Received remote request %s to bootstrap package %s version %s", request.ID, request.Package, params.Version) + log.Infof("Installer: Received remote request %s to bootstrap package %s version %s", request.ID, request.Package, params.Version) if params.Version == "" { - return u.BootstrapDefault(context.Background(), request.Package) + return i.BootstrapDefault(context.Background(), request.Package) } - return u.BootstrapVersion(context.Background(), request.Package, params.Version) + return i.BootstrapVersion(context.Background(), request.Package, params.Version) default: return fmt.Errorf("unknown method: %s", request.Method) } @@ -450,7 +450,7 @@ type requestState struct { Package string ID string State pbgo.TaskState - Err *updaterErrors.UpdaterError + Err *installerErrors.InstallerError } func newRequestContext(request remoteAPIRequest) context.Context { @@ -471,15 +471,15 @@ func setRequestDone(ctx context.Context, err error) { state.State = pbgo.TaskState_DONE if err != nil { state.State = pbgo.TaskState_ERROR - state.Err = updaterErrors.From(err) + state.Err = installerErrors.From(err) } } -func (u *updaterImpl) refreshState(ctx context.Context) { - state, err := u.GetState() +func (i *installerImpl) refreshState(ctx context.Context) { + state, err := i.GetState() if err != nil { // TODO: we should report this error through RC in some way - log.Errorf("could not get updater state: %v", err) + log.Errorf("could not get installer state: %v", err) return } requestState, ok := ctx.Value(requestStateKey).(*requestState) @@ -506,5 +506,5 @@ func (u *updaterImpl) refreshState(ctx context.Context) { } packages = append(packages, p) } - u.rc.SetState(packages) + i.rc.SetState(packages) } diff --git a/pkg/updater/updater_test.go b/pkg/installer/installer_test.go similarity index 77% rename from pkg/updater/updater_test.go rename to pkg/installer/installer_test.go index 8f663f6ce31db..59430097d6d6a 100644 --- a/pkg/updater/updater_test.go +++ b/pkg/installer/installer_test.go @@ -3,10 +3,10 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// for now the updater is not supported on windows +// for now the installer is not supported on windows //go:build !windows -package updater +package installer import ( "context" @@ -21,9 +21,9 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/config/remote/client" + "github.com/DataDog/datadog-agent/pkg/installer/service" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" - "github.com/DataDog/datadog-agent/pkg/updater/service" ) type testRemoteConfigClient struct { @@ -77,21 +77,21 @@ func (c *testRemoteConfigClient) SubmitRequest(request remoteAPIRequest) { } } -func newTestUpdater(t *testing.T, s *testFixturesServer, rcc *testRemoteConfigClient, defaultFixture fixture) *updaterImpl { - u, _, _ := newTestUpdaterWithPaths(t, s, rcc, defaultFixture) +func newTestInstaller(t *testing.T, s *testFixturesServer, rcc *testRemoteConfigClient, defaultFixture fixture) *installerImpl { + u, _, _ := newTestInstallerWithPaths(t, s, rcc, defaultFixture) return u } -func newTestUpdaterWithPaths(t *testing.T, s *testFixturesServer, rcc *testRemoteConfigClient, defaultFixture fixture) (*updaterImpl, string, string) { +func newTestInstallerWithPaths(t *testing.T, s *testFixturesServer, rcc *testRemoteConfigClient, defaultFixture fixture) (*installerImpl, string, string) { cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) var b = true cfg.Set("updater.remote_updates", &b, model.SourceDefault) rc := &remoteConfig{client: rcc} rootPath := t.TempDir() locksPath := t.TempDir() - u, err := newUpdater(rc, rootPath, locksPath, cfg) + u, err := newInstaller(rc, rootPath, locksPath, cfg) assert.NoError(t, err) - u.installer.configsDir = t.TempDir() + u.packageManager.configsDir = t.TempDir() assert.Nil(t, service.BuildHelperForTests(rootPath, t.TempDir(), true)) u.catalog = s.Catalog() u.bootstrapVersions[defaultFixture.pkg] = defaultFixture.version @@ -103,12 +103,12 @@ func TestBootstrapDefault(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() - updater := newTestUpdater(t, s, rc, fixtureSimpleV1) + installer := newTestInstaller(t, s, rc, fixtureSimpleV1) - err := updater.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) + err := installer.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) assert.NoError(t, err) - r := updater.repositories.Get(fixtureSimpleV1.pkg) + r := installer.repositories.Get(fixtureSimpleV1.pkg) state, err := r.GetState() assert.NoError(t, err) assert.Equal(t, fixtureSimpleV1.version, state.Stable) @@ -120,12 +120,12 @@ func TestBootstrapURL(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() - updater := newTestUpdater(t, s, rc, fixtureSimpleV1) + installer := newTestInstaller(t, s, rc, fixtureSimpleV1) - err := updater.BootstrapURL(context.Background(), s.Package(fixtureSimpleV1).URL) + err := installer.BootstrapURL(context.Background(), s.Package(fixtureSimpleV1).URL) assert.NoError(t, err) - r := updater.repositories.Get(fixtureSimpleV1.pkg) + r := installer.repositories.Get(fixtureSimpleV1.pkg) state, err := r.GetState() assert.NoError(t, err) assert.Equal(t, fixtureSimpleV1.version, state.Stable) @@ -137,13 +137,13 @@ func TestPurge(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() - updater, rootPath, locksPath := newTestUpdaterWithPaths(t, s, rc, fixtureSimpleV1) + installer, rootPath, locksPath := newTestInstallerWithPaths(t, s, rc, fixtureSimpleV1) bootstrapAndAssert := func() { - err := updater.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) + err := installer.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) assert.NoError(t, err) - r := updater.repositories.Get(fixtureSimpleV1.pkg) + r := installer.repositories.Get(fixtureSimpleV1.pkg) state, err := r.GetState() assert.NoError(t, err) assert.Equal(t, fixtureSimpleV1.version, state.Stable) @@ -181,7 +181,7 @@ func TestBootstrapWithRC(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() - updater := newTestUpdater(t, s, rc, fixtureSimpleV1) + installer := newTestInstaller(t, s, rc, fixtureSimpleV1) rc.SubmitRequest(remoteAPIRequest{ ID: uuid.NewString(), @@ -189,9 +189,9 @@ func TestBootstrapWithRC(t *testing.T) { Method: methodBootstrap, Params: json.RawMessage(`{"version":"` + fixtureSimpleV2.version + `"}`), }) - updater.requestsWG.Wait() + installer.requestsWG.Wait() - r := updater.repositories.Get(fixtureSimpleV2.pkg) + r := installer.repositories.Get(fixtureSimpleV2.pkg) state, err := r.GetState() assert.NoError(t, err) assert.Equal(t, fixtureSimpleV2.version, state.Stable) @@ -204,13 +204,13 @@ func TestBootUpd(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() - updater := newTestUpdater(t, s, rc, fixtureSimpleV1) - updater.catalog = catalog{} + installer := newTestInstaller(t, s, rc, fixtureSimpleV1) + installer.catalog = catalog{} - err := updater.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) + err := installer.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) assert.Error(t, err) rc.SubmitCatalog(s.Catalog()) - err = updater.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) + err = installer.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) assert.NoError(t, err) } @@ -218,9 +218,9 @@ func TestStartExperiment(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() - updater := newTestUpdater(t, s, rc, fixtureSimpleV1) + installer := newTestInstaller(t, s, rc, fixtureSimpleV1) - err := updater.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) + err := installer.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) assert.NoError(t, err) rc.SubmitRequest(remoteAPIRequest{ ID: uuid.NewString(), @@ -231,9 +231,9 @@ func TestStartExperiment(t *testing.T) { Method: methodStartExperiment, Params: json.RawMessage(`{"version":"` + fixtureSimpleV2.version + `"}`), }) - updater.requestsWG.Wait() + installer.requestsWG.Wait() - r := updater.repositories.Get(fixtureSimpleV1.pkg) + r := installer.repositories.Get(fixtureSimpleV1.pkg) state, err := r.GetState() assert.NoError(t, err) assert.Equal(t, fixtureSimpleV1.version, state.Stable) @@ -246,9 +246,9 @@ func TestPromoteExperiment(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() - updater := newTestUpdater(t, s, rc, fixtureSimpleV1) + installer := newTestInstaller(t, s, rc, fixtureSimpleV1) - err := updater.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) + err := installer.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) assert.NoError(t, err) rc.SubmitRequest(remoteAPIRequest{ ID: uuid.NewString(), @@ -259,7 +259,7 @@ func TestPromoteExperiment(t *testing.T) { Method: methodStartExperiment, Params: json.RawMessage(`{"version":"` + fixtureSimpleV2.version + `"}`), }) - updater.requestsWG.Wait() + installer.requestsWG.Wait() rc.SubmitRequest(remoteAPIRequest{ ID: uuid.NewString(), Package: fixtureSimpleV1.pkg, @@ -269,9 +269,9 @@ func TestPromoteExperiment(t *testing.T) { }, Method: methodPromoteExperiment, }) - updater.requestsWG.Wait() + installer.requestsWG.Wait() - r := updater.repositories.Get(fixtureSimpleV1.pkg) + r := installer.repositories.Get(fixtureSimpleV1.pkg) state, err := r.GetState() assert.NoError(t, err) assert.Equal(t, fixtureSimpleV2.version, state.Stable) @@ -283,9 +283,9 @@ func TestStopExperiment(t *testing.T) { s := newTestFixturesServer(t) defer s.Close() rc := newTestRemoteConfigClient() - updater := newTestUpdater(t, s, rc, fixtureSimpleV1) + installer := newTestInstaller(t, s, rc, fixtureSimpleV1) - err := updater.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) + err := installer.BootstrapDefault(context.Background(), fixtureSimpleV1.pkg) assert.NoError(t, err) rc.SubmitRequest(remoteAPIRequest{ ID: uuid.NewString(), @@ -296,8 +296,8 @@ func TestStopExperiment(t *testing.T) { Method: methodStartExperiment, Params: json.RawMessage(`{"version":"` + fixtureSimpleV2.version + `"}`), }) - updater.requestsWG.Wait() - r := updater.repositories.Get(fixtureSimpleV1.pkg) + installer.requestsWG.Wait() + r := installer.repositories.Get(fixtureSimpleV1.pkg) state, err := r.GetState() assert.NoError(t, err) assert.True(t, state.HasExperiment()) @@ -310,7 +310,7 @@ func TestStopExperiment(t *testing.T) { }, Method: methodStopExperiment, }) - updater.requestsWG.Wait() + installer.requestsWG.Wait() state, err = r.GetState() assert.NoError(t, err) diff --git a/pkg/updater/local_api.go b/pkg/installer/local_api.go similarity index 85% rename from pkg/updater/local_api.go rename to pkg/installer/local_api.go index eee4acc564706..d586e6f7092d4 100644 --- a/pkg/updater/local_api.go +++ b/pkg/installer/local_api.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package updater +package installer import ( "bytes" @@ -16,13 +16,13 @@ import ( "github.com/gorilla/mux" - "github.com/DataDog/datadog-agent/pkg/updater/repository" + "github.com/DataDog/datadog-agent/pkg/installer/repository" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/version" ) const ( - defaultSocketPath = defaultRepositoriesPath + "/updater.sock" + defaultSocketPath = defaultRepositoriesPath + "/installer.sock" ) // StatusResponse is the response to the status endpoint. @@ -42,21 +42,21 @@ type APIError struct { Message string `json:"message"` } -// LocalAPI is the interface for the locally exposed API to interact with the updater. +// LocalAPI is the interface for the locally exposed API to interact with the installer. type LocalAPI interface { Start(context.Context) error Stop(context.Context) error } -// localAPIImpl is a locally exposed API to interact with the updater. +// localAPIImpl is a locally exposed API to interact with the installer. type localAPIImpl struct { - updater Updater - listener net.Listener - server *http.Server + installer Installer + listener net.Listener + server *http.Server } // NewLocalAPI returns a new LocalAPI. -func NewLocalAPI(updater Updater) (LocalAPI, error) { +func NewLocalAPI(installer Installer) (LocalAPI, error) { socketPath := defaultSocketPath err := os.RemoveAll(socketPath) if err != nil { @@ -70,9 +70,9 @@ func NewLocalAPI(updater Updater) (LocalAPI, error) { return nil, fmt.Errorf("error setting socket permissions: %v", err) } return &localAPIImpl{ - server: &http.Server{}, - listener: listener, - updater: updater, + server: &http.Server{}, + listener: listener, + installer: installer, }, nil } @@ -109,7 +109,7 @@ func (l *localAPIImpl) status(w http.ResponseWriter, _ *http.Request) { defer func() { _ = json.NewEncoder(w).Encode(response) }() - pacakges, err := l.updater.GetState() + pacakges, err := l.installer.GetState() if err != nil { w.WriteHeader(http.StatusInternalServerError) response.Error = &APIError{Message: err.Error()} @@ -121,7 +121,7 @@ func (l *localAPIImpl) status(w http.ResponseWriter, _ *http.Request) { } } -// example: curl -X POST --unix-socket /opt/datadog-packages/updater.sock -H 'Content-Type: application/json' http://updater/datadog-agent/experiment/start -d '{"version":"1.21.5"}' +// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/experiment/start -d '{"version":"1.21.5"}' func (l *localAPIImpl) startExperiment(w http.ResponseWriter, r *http.Request) { pkg := mux.Vars(r)["package"] w.Header().Set("Content-Type", "application/json") @@ -137,7 +137,7 @@ func (l *localAPIImpl) startExperiment(w http.ResponseWriter, r *http.Request) { return } log.Infof("Received local request to start experiment for package %s version %s", pkg, request.Version) - err = l.updater.StartExperiment(r.Context(), pkg, request.Version) + err = l.installer.StartExperiment(r.Context(), pkg, request.Version) if err != nil { w.WriteHeader(http.StatusInternalServerError) response.Error = &APIError{Message: err.Error()} @@ -145,7 +145,7 @@ func (l *localAPIImpl) startExperiment(w http.ResponseWriter, r *http.Request) { } } -// example: curl -X POST --unix-socket /opt/datadog-packages/updater.sock -H 'Content-Type: application/json' http://updater/datadog-agent/experiment/stop -d '{}' +// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/experiment/stop -d '{}' func (l *localAPIImpl) stopExperiment(w http.ResponseWriter, r *http.Request) { pkg := mux.Vars(r)["package"] w.Header().Set("Content-Type", "application/json") @@ -154,7 +154,7 @@ func (l *localAPIImpl) stopExperiment(w http.ResponseWriter, r *http.Request) { _ = json.NewEncoder(w).Encode(response) }() log.Infof("Received local request to stop experiment for package %s", pkg) - err := l.updater.StopExperiment(r.Context(), pkg) + err := l.installer.StopExperiment(r.Context(), pkg) if err != nil { w.WriteHeader(http.StatusInternalServerError) response.Error = &APIError{Message: err.Error()} @@ -162,7 +162,7 @@ func (l *localAPIImpl) stopExperiment(w http.ResponseWriter, r *http.Request) { } } -// example: curl -X POST --unix-socket /opt/datadog-packages/updater.sock -H 'Content-Type: application/json' http://updater/datadog-agent/experiment/promote -d '{}' +// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/experiment/promote -d '{}' func (l *localAPIImpl) promoteExperiment(w http.ResponseWriter, r *http.Request) { pkg := mux.Vars(r)["package"] w.Header().Set("Content-Type", "application/json") @@ -171,7 +171,7 @@ func (l *localAPIImpl) promoteExperiment(w http.ResponseWriter, r *http.Request) _ = json.NewEncoder(w).Encode(response) }() log.Infof("Received local request to promote experiment for package %s", pkg) - err := l.updater.PromoteExperiment(r.Context(), pkg) + err := l.installer.PromoteExperiment(r.Context(), pkg) if err != nil { w.WriteHeader(http.StatusInternalServerError) response.Error = &APIError{Message: err.Error()} @@ -179,7 +179,7 @@ func (l *localAPIImpl) promoteExperiment(w http.ResponseWriter, r *http.Request) } } -// example: curl -X POST --unix-socket /opt/datadog-packages/updater.sock -H 'Content-Type: application/json' http://updater/datadog-agent/bootstrap -d '{"version":"1.21.5"}' +// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/bootstrap -d '{"version":"1.21.5"}' func (l *localAPIImpl) bootstrap(w http.ResponseWriter, r *http.Request) { pkg := mux.Vars(r)["package"] w.Header().Set("Content-Type", "application/json") @@ -199,10 +199,10 @@ func (l *localAPIImpl) bootstrap(w http.ResponseWriter, r *http.Request) { } if request.Version != "" { log.Infof("Received local request to bootstrap package %s version %s", pkg, request.Version) - err = l.updater.BootstrapVersion(r.Context(), pkg, request.Version) + err = l.installer.BootstrapVersion(r.Context(), pkg, request.Version) } else { log.Infof("Received local request to bootstrap package %s", pkg) - err = l.updater.BootstrapDefault(r.Context(), pkg) + err = l.installer.BootstrapDefault(r.Context(), pkg) } if err != nil { @@ -212,7 +212,7 @@ func (l *localAPIImpl) bootstrap(w http.ResponseWriter, r *http.Request) { } } -// LocalAPIClient is a client to interact with the locally exposed updater API. +// LocalAPIClient is a client to interact with the locally exposed installer API. type LocalAPIClient interface { Status() (StatusResponse, error) @@ -222,7 +222,7 @@ type LocalAPIClient interface { BootstrapVersion(pkg, version string) error } -// LocalAPIClient is a client to interact with the locally exposed updater API. +// LocalAPIClient is a client to interact with the locally exposed installer API. type localAPIClientImpl struct { client *http.Client addr string @@ -231,7 +231,7 @@ type localAPIClientImpl struct { // NewLocalAPIClient returns a new LocalAPIClient. func NewLocalAPIClient() LocalAPIClient { return &localAPIClientImpl{ - addr: "updater", // this has no meaning when using a unix socket + addr: "installer", // this has no meaning when using a unix socket client: &http.Client{ Transport: &http.Transport{ Dial: func(_, _ string) (net.Conn, error) { @@ -242,7 +242,7 @@ func NewLocalAPIClient() LocalAPIClient { } } -// Status returns the status of the updater. +// Status returns the status of the installer. func (c *localAPIClientImpl) Status() (StatusResponse, error) { var response StatusResponse req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s/status", c.addr), nil) diff --git a/pkg/updater/local_api_test.go b/pkg/installer/local_api_test.go similarity index 93% rename from pkg/updater/local_api_test.go rename to pkg/installer/local_api_test.go index b77f46b942518..330626f92a13d 100644 --- a/pkg/updater/local_api_test.go +++ b/pkg/installer/local_api_test.go @@ -3,10 +3,10 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// for now the updater is not supported on windows +// for now the installer is not supported on windows //go:build !windows -package updater +package installer import ( "context" @@ -25,14 +25,14 @@ type testLocalAPI struct { func newTestLocalAPI(t *testing.T, s *testFixturesServer) *testLocalAPI { rc := newTestRemoteConfigClient() - updater := newTestUpdater(t, s, rc, fixtureSimpleV1) + installer := newTestInstaller(t, s, rc, fixtureSimpleV1) rc.SubmitCatalog(s.Catalog()) l, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) apiServer := &localAPIImpl{ - server: &http.Server{}, - listener: l, - updater: updater, + server: &http.Server{}, + listener: l, + installer: installer, } apiServer.Start(context.Background()) apiClient := &localAPIClientImpl{ diff --git a/pkg/updater/remote_config.go b/pkg/installer/remote_config.go similarity index 98% rename from pkg/updater/remote_config.go rename to pkg/installer/remote_config.go index e8f3d890d5bb0..55be969321f56 100644 --- a/pkg/updater/remote_config.go +++ b/pkg/installer/remote_config.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package updater +package installer import ( "encoding/json" @@ -116,7 +116,7 @@ func handleUpdaterCatalogDDUpdate(h handleCatalogUpdate) client.Handler { var catalog catalog err := json.Unmarshal(config.Config, &catalog) if err != nil { - log.Errorf("could not unmarshal updater catalog: %s", err) + log.Errorf("could not unmarshal installer catalog: %s", err) applyStateCallback(configPath, state.ApplyStatus{State: state.ApplyStateError, Error: err.Error()}) return } diff --git a/pkg/updater/remote_config_test.go b/pkg/installer/remote_config_test.go similarity index 99% rename from pkg/updater/remote_config_test.go rename to pkg/installer/remote_config_test.go index 9cd859537decc..2346e64e77134 100644 --- a/pkg/updater/remote_config_test.go +++ b/pkg/installer/remote_config_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package updater +package installer import ( "encoding/json" diff --git a/pkg/updater/repository/link.go b/pkg/installer/repository/link.go similarity index 100% rename from pkg/updater/repository/link.go rename to pkg/installer/repository/link.go diff --git a/pkg/updater/repository/link_test.go b/pkg/installer/repository/link_test.go similarity index 100% rename from pkg/updater/repository/link_test.go rename to pkg/installer/repository/link_test.go diff --git a/pkg/updater/repository/link_windows.go b/pkg/installer/repository/link_windows.go similarity index 100% rename from pkg/updater/repository/link_windows.go rename to pkg/installer/repository/link_windows.go diff --git a/pkg/updater/repository/repositories.go b/pkg/installer/repository/repositories.go similarity index 100% rename from pkg/updater/repository/repositories.go rename to pkg/installer/repository/repositories.go diff --git a/pkg/updater/repository/repositories_test.go b/pkg/installer/repository/repositories_test.go similarity index 97% rename from pkg/updater/repository/repositories_test.go rename to pkg/installer/repository/repositories_test.go index c4d2a3c36fa1f..e404ede6f934c 100644 --- a/pkg/updater/repository/repositories_test.go +++ b/pkg/installer/repository/repositories_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/updater/service" + "github.com/DataDog/datadog-agent/pkg/installer/service" ) func newTestRepositories(t *testing.T) *Repositories { diff --git a/pkg/updater/repository/repository.go b/pkg/installer/repository/repository.go similarity index 99% rename from pkg/updater/repository/repository.go rename to pkg/installer/repository/repository.go index 9c1b6d26eb209..a8ad6516f772f 100644 --- a/pkg/updater/repository/repository.go +++ b/pkg/installer/repository/repository.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/gopsutil/process" - "github.com/DataDog/datadog-agent/pkg/updater/service" + "github.com/DataDog/datadog-agent/pkg/installer/service" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/updater/repository/repository_test.go b/pkg/installer/repository/repository_test.go similarity index 99% rename from pkg/updater/repository/repository_test.go rename to pkg/installer/repository/repository_test.go index 0996fc8b0fd61..b5f69388972ed 100644 --- a/pkg/updater/repository/repository_test.go +++ b/pkg/installer/repository/repository_test.go @@ -13,7 +13,7 @@ import ( "path" "testing" - "github.com/DataDog/datadog-agent/pkg/updater/service" + "github.com/DataDog/datadog-agent/pkg/installer/service" "github.com/stretchr/testify/assert" ) diff --git a/pkg/updater/service/apm_inject.go b/pkg/installer/service/apm_inject.go similarity index 100% rename from pkg/updater/service/apm_inject.go rename to pkg/installer/service/apm_inject.go diff --git a/pkg/updater/service/apm_inject_test.go b/pkg/installer/service/apm_inject_test.go similarity index 100% rename from pkg/updater/service/apm_inject_test.go rename to pkg/installer/service/apm_inject_test.go diff --git a/pkg/updater/service/apm_inject_windows.go b/pkg/installer/service/apm_inject_windows.go similarity index 100% rename from pkg/updater/service/apm_inject_windows.go rename to pkg/installer/service/apm_inject_windows.go diff --git a/pkg/updater/service/cmd_executor.go b/pkg/installer/service/cmd_executor.go similarity index 96% rename from pkg/updater/service/cmd_executor.go rename to pkg/installer/service/cmd_executor.go index a1abec9e4919e..4c93406dbbd3e 100644 --- a/pkg/updater/service/cmd_executor.go +++ b/pkg/installer/service/cmd_executor.go @@ -79,7 +79,7 @@ func BuildHelperForTests(pkgDir, binPath string, skipUIDCheck bool) error { targetDir := "datadog-agent/pkg" index := strings.Index(localPath, targetDir) pkgPath := localPath[:index+len(targetDir)] - helperPath := filepath.Join(pkgPath, "updater", "service", "helper", "main.go") + helperPath := filepath.Join(pkgPath, "installer", "service", "helper", "main.go") cmd := exec.Command("go", "build", fmt.Sprintf(`-ldflags=-X main.pkgDir=%s -X main.testSkipUID=%v`, pkgDir, skipUIDCheck), "-o", updaterHelper, helperPath) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/pkg/updater/service/cmd_executor_windows.go b/pkg/installer/service/cmd_executor_windows.go similarity index 100% rename from pkg/updater/service/cmd_executor_windows.go rename to pkg/installer/service/cmd_executor_windows.go diff --git a/pkg/updater/service/datadog_agent.go b/pkg/installer/service/datadog_agent.go similarity index 100% rename from pkg/updater/service/datadog_agent.go rename to pkg/installer/service/datadog_agent.go diff --git a/pkg/updater/service/datadog_agent_windows.go b/pkg/installer/service/datadog_agent_windows.go similarity index 100% rename from pkg/updater/service/datadog_agent_windows.go rename to pkg/installer/service/datadog_agent_windows.go diff --git a/pkg/updater/service/datadog_installer.go b/pkg/installer/service/datadog_installer.go similarity index 100% rename from pkg/updater/service/datadog_installer.go rename to pkg/installer/service/datadog_installer.go diff --git a/pkg/updater/service/datadog_installer_windows.go b/pkg/installer/service/datadog_installer_windows.go similarity index 100% rename from pkg/updater/service/datadog_installer_windows.go rename to pkg/installer/service/datadog_installer_windows.go diff --git a/pkg/updater/service/docker.go b/pkg/installer/service/docker.go similarity index 96% rename from pkg/updater/service/docker.go rename to pkg/installer/service/docker.go index c4cdb3fc0de20..b7ca21bd035e3 100644 --- a/pkg/updater/service/docker.go +++ b/pkg/installer/service/docker.go @@ -176,7 +176,7 @@ func (a *apmInjectorInstaller) deleteDockerConfigContent(previousContent []byte) // restartDocker reloads the docker daemon if it exists func restartDocker() error { if !isDockerInstalled() { - log.Info("updater: docker is not installed, skipping reload") + log.Info("installer: docker is not installed, skipping reload") return nil } return executeCommand(restartDockerCommand) @@ -189,7 +189,7 @@ func isDockerInstalled() bool { cmd.Stdout = &outb err := cmd.Run() if err != nil { - log.Warn("updater: failed to check if docker is installed, assuming it isn't: ", err) + log.Warn("installer: failed to check if docker is installed, assuming it isn't: ", err) return false } return len(outb.String()) != 0 diff --git a/pkg/updater/service/docker_test.go b/pkg/installer/service/docker_test.go similarity index 100% rename from pkg/updater/service/docker_test.go rename to pkg/installer/service/docker_test.go diff --git a/pkg/installer/service/helper/go.mod b/pkg/installer/service/helper/go.mod new file mode 100644 index 0000000000000..f583731ed6624 --- /dev/null +++ b/pkg/installer/service/helper/go.mod @@ -0,0 +1,3 @@ +module github.com/DataDog/datadog-agent/pkg/installer/service/helper + +go 1.21.7 diff --git a/pkg/updater/service/helper/main.go b/pkg/installer/service/helper/main.go similarity index 100% rename from pkg/updater/service/helper/main.go rename to pkg/installer/service/helper/main.go diff --git a/pkg/updater/service/systemd.go b/pkg/installer/service/systemd.go similarity index 97% rename from pkg/updater/service/systemd.go rename to pkg/installer/service/systemd.go index 2f384b010ad2e..26685c18a7578 100644 --- a/pkg/updater/service/systemd.go +++ b/pkg/installer/service/systemd.go @@ -37,7 +37,6 @@ const ( createDockerDirCommand = `{"command":"create-docker-dir"}` replaceLDPreloadCommand = `{"command":"replace-ld-preload"}` systemdReloadCommand = `{"command":"systemd-reload"}` - adminExecutor = "datadog-updater-admin.service" ) type privilegeCommand struct { diff --git a/pkg/updater/service/systemd_test.go b/pkg/installer/service/systemd_test.go similarity index 90% rename from pkg/updater/service/systemd_test.go rename to pkg/installer/service/systemd_test.go index 51212f6caa015..cdb071be1fcfa 100644 --- a/pkg/updater/service/systemd_test.go +++ b/pkg/installer/service/systemd_test.go @@ -46,13 +46,14 @@ func TestAssertWorkingCommands(t *testing.T) { // missing permissions on test setup, e2e tests verify the successful commands successErr := "error: failed to lookup dd-installer user: user: unknown user dd-installer\n" + successSystemd := "error: systemd unit path error: stat /lib/systemd/system: no such file or directory\n" require.Equal(t, successErr, startUnit("datadog-agent").Error()) assert.Equal(t, successErr, stopUnit("datadog-agent").Error()) assert.Equal(t, successErr, enableUnit("datadog-agent").Error()) assert.Equal(t, successErr, disableUnit("datadog-agent").Error()) - assert.Equal(t, successErr, loadUnit("datadog-agent").Error()) - assert.Equal(t, successErr, removeUnit("datadog-agent").Error()) + assert.Equal(t, successSystemd, loadUnit("datadog-agent").Error()) + assert.Equal(t, successSystemd, removeUnit("datadog-agent").Error()) assert.Equal(t, successErr, createAgentSymlink().Error()) assert.Equal(t, successErr, rmAgentSymlink().Error()) assert.Equal(t, successErr, backupAgentConfig().Error()) diff --git a/pkg/updater/telemetry.go b/pkg/installer/telemetry.go similarity index 99% rename from pkg/updater/telemetry.go rename to pkg/installer/telemetry.go index 2879c641e5b2a..7760e47c98fda 100644 --- a/pkg/updater/telemetry.go +++ b/pkg/installer/telemetry.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package updater +package installer import ( "context" diff --git a/pkg/updater/service/helper/go.mod b/pkg/updater/service/helper/go.mod deleted file mode 100644 index 281f85f1ebfa3..0000000000000 --- a/pkg/updater/service/helper/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/DataDog/datadog-agent/pkg/updater/service/helper - -go 1.21.7 diff --git a/tasks/installer.py b/tasks/installer.py index ad62d7c5b2103..b99b24d1cac2e 100644 --- a/tasks/installer.py +++ b/tasks/installer.py @@ -52,7 +52,7 @@ def build( helper_bin = os.path.join(BIN_PATH, bin_name("helper")) helper_ldflags = f"-X main.installPath={install_path} -w -s" - helper_path = os.path.join("pkg", "updater", "service", "helper") + helper_path = os.path.join("pkg", "installer", "service", "helper") cmd = f"CGO_ENABLED=0 go build {build_type} -tags \"{go_build_tags}\" " cmd += f"-o {helper_bin} -gcflags=\"{gcflags}\" -ldflags=\"{helper_ldflags}\" {helper_path}/main.go" From be98dcc308b291659da87e6e06da0dc6da0c1a4a Mon Sep 17 00:00:00 2001 From: jedupau <72938258+jedupau@users.noreply.github.com> Date: Mon, 15 Apr 2024 12:31:22 +0200 Subject: [PATCH 39/99] add a test to check metadata tags (#24577) --- .../internal/devicecheck/devicecheck_test.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go index 331f234a34d0f..040fabf818c12 100644 --- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go +++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go @@ -6,6 +6,7 @@ package devicecheck import ( + "encoding/json" "errors" "fmt" "path/filepath" @@ -33,6 +34,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/profile" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/report" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/session" + "github.com/DataDog/datadog-agent/pkg/networkdevice/metadata" ) func TestProfileWithSysObjectIdDetection(t *testing.T) { @@ -561,11 +563,21 @@ profiles: deviceCk, err := NewDeviceCheck(config, "1.2.3.4", sessionFactory) assert.Nil(t, err) + snmpTags := []string{"snmp_device:1.2.3.4", "snmp_profile:f5-big-ip", "device_vendor:f5", "snmp_host:foo_sys_name", + "static_tag:from_profile_root", "static_tag:from_base_profile", "some_tag:some_tag_value", "prefix:f", "suffix:oo_sys_name"} + sender := mocksender.NewMockSender("123") // required to initiate aggregator sender.On("Gauge", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return() sender.On("MonotonicCount", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return() sender.On("ServiceCheck", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return() - sender.On("EventPlatformEvent", mock.Anything, mock.Anything).Return() + sender.On("EventPlatformEvent", mock.Anything, mock.Anything).Return().Run(func(args mock.Arguments) { + arg := args.Get(0).([]uint8) + var data metadata.NetworkDevicesMetadata + json.Unmarshal(arg, &data) + + tags := data.Devices[0].Tags + assert.Subset(t, tags, snmpTags) + }) sender.On("Commit").Return() deviceCk.SetSender(report.NewMetricSender(sender, "", nil, report.MakeInterfaceBandwidthState())) @@ -811,9 +823,6 @@ profiles: err = deviceCk.Run(time.Now()) assert.Nil(t, err) - snmpTags := []string{"snmp_device:1.2.3.4", "snmp_profile:f5-big-ip", "device_vendor:f5", "snmp_host:foo_sys_name", - "static_tag:from_profile_root", "some_tag:some_tag_value", "prefix:f", "suffix:oo_sys_name"} - sender.AssertServiceCheck(t, "snmp.can_check", servicecheck.ServiceCheckOK, "", snmpTags, "") sender.AssertMetric(t, "Gauge", deviceReachableMetric, 1., "", snmpTags) sender.AssertMetric(t, "Gauge", deviceUnreachableMetric, 0., "", snmpTags) From 5bdc5e5d2a74e74c53961f8a5917186f47b41605 Mon Sep 17 00:00:00 2001 From: Pierre Gimalac Date: Mon, 15 Apr 2024 13:16:22 +0200 Subject: [PATCH 40/99] [ASCII-1482] Fix APM status type error (#24636) * fix: accept interface and cast to float64 to fix type error * chore: add releasenote * test: add some tests around mkhuman * Update releasenotes/notes/status-trace-section-typing-ccd984242ee6ce99.yaml Co-authored-by: Srdjan Grubor --------- Co-authored-by: Srdjan Grubor --- comp/core/status/go.mod | 1 + comp/core/status/go.sum | 14 ++++++++++++-- comp/core/status/render_helpers.go | 8 +++++--- comp/core/status/render_helpers_test.go | 9 ++++++--- comp/core/status/statusimpl/go.mod | 2 +- comp/core/status/statusimpl/go.sum | 8 ++++---- ...atus-trace-section-typing-ccd984242ee6ce99.yaml | 11 +++++++++++ 7 files changed, 40 insertions(+), 13 deletions(-) create mode 100644 releasenotes/notes/status-trace-section-typing-ccd984242ee6ce99.yaml diff --git a/comp/core/status/go.mod b/comp/core/status/go.mod index 656370890b00f..f3ad03ba40b60 100644 --- a/comp/core/status/go.mod +++ b/comp/core/status/go.mod @@ -5,6 +5,7 @@ go 1.21.9 require ( github.com/dustin/go-humanize v1.0.1 github.com/fatih/color v1.16.0 + github.com/spf13/cast v1.6.0 github.com/stretchr/testify v1.9.0 go.uber.org/fx v1.18.2 golang.org/x/text v0.14.0 diff --git a/comp/core/status/go.sum b/comp/core/status/go.sum index f4e75aa6168e5..0ccd5d3babfb7 100644 --- a/comp/core/status/go.sum +++ b/comp/core/status/go.sum @@ -9,13 +9,19 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -26,6 +32,10 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= diff --git a/comp/core/status/render_helpers.go b/comp/core/status/render_helpers.go index 30b07e26556e6..4a1449c8d927b 100644 --- a/comp/core/status/render_helpers.go +++ b/comp/core/status/render_helpers.go @@ -21,6 +21,7 @@ import ( "github.com/dustin/go-humanize" "github.com/fatih/color" + "github.com/spf13/cast" "golang.org/x/text/cases" "golang.org/x/text/language" @@ -194,12 +195,13 @@ func toUnsortedList(s map[string]interface{}) string { } // mkHuman adds commas to large numbers to assist readability in status outputs -func mkHuman(f float64) string { - return humanize.Commaf(f) +func mkHuman(f interface{}) string { + return humanize.Commaf(cast.ToFloat64(f)) } // mkHumanDuration makes time values more readable -func mkHumanDuration(f float64, unit string) string { +func mkHumanDuration(i interface{}, unit string) string { + f := cast.ToFloat64(i) var duration time.Duration if unit != "" { duration, _ = time.ParseDuration(fmt.Sprintf("%f%s", f, unit)) diff --git a/comp/core/status/render_helpers_test.go b/comp/core/status/render_helpers_test.go index 166f87d3f069c..8e711e3736661 100644 --- a/comp/core/status/render_helpers_test.go +++ b/comp/core/status/render_helpers_test.go @@ -8,6 +8,7 @@ package status import ( "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -21,7 +22,9 @@ func TestNtpWarning(t *testing.T) { func TestMkHuman(t *testing.T) { f := 1695783.0 fStr := mkHuman(f) - if fStr != "1,695,783" { - t.Errorf("Large number formatting is incorrectly adding commas in agent statuses") - } + assert.Equal(t, "1,695,783", fStr, "Large number formatting is incorrectly adding commas in agent statuses") + + assert.Equal(t, "1", mkHuman(1)) + assert.Equal(t, "1", mkHuman("1")) + assert.Equal(t, "1.5", mkHuman(float32(1.5))) } diff --git a/comp/core/status/statusimpl/go.mod b/comp/core/status/statusimpl/go.mod index dbee1499559c6..5042265453400 100644 --- a/comp/core/status/statusimpl/go.mod +++ b/comp/core/status/statusimpl/go.mod @@ -81,7 +81,7 @@ require ( github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/afero v1.1.2 // indirect - github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect diff --git a/comp/core/status/statusimpl/go.sum b/comp/core/status/statusimpl/go.sum index a65d0e854b7be..2bc69fe447cdc 100644 --- a/comp/core/status/statusimpl/go.sum +++ b/comp/core/status/statusimpl/go.sum @@ -47,8 +47,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= @@ -195,8 +195,8 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= diff --git a/releasenotes/notes/status-trace-section-typing-ccd984242ee6ce99.yaml b/releasenotes/notes/status-trace-section-typing-ccd984242ee6ce99.yaml new file mode 100644 index 0000000000000..6895a3876af4d --- /dev/null +++ b/releasenotes/notes/status-trace-section-typing-ccd984242ee6ce99.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix type conversion error while generating the trace-agent status. From ddd33817334bb3af1ede657eae0066cb0f477224 Mon Sep 17 00:00:00 2001 From: Olivier G <52180542+ogaca-dd@users.noreply.github.com> Date: Mon, 15 Apr 2024 13:39:58 +0200 Subject: [PATCH 41/99] Create adscheduler component (#24454) * Create adscheduler * Fix lint-components * Improvement adscheduler description * Rename SchedulerProvider to Scheduler --- cmd/agent/subcommands/run/command.go | 8 ++--- comp/README.md | 4 +++ .../adschedulerimpl/adscheduler.go | 35 +++++++++++++++++++ comp/logs/adscheduler/component.go | 12 +++++++ comp/logs/agent/agent.go | 29 +++++++++------ comp/logs/agent/scheduler_provider.go | 25 +++++++++++++ 6 files changed, 96 insertions(+), 17 deletions(-) create mode 100644 comp/logs/adscheduler/adschedulerimpl/adscheduler.go create mode 100644 comp/logs/adscheduler/component.go create mode 100644 comp/logs/agent/scheduler_provider.go diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 412d88a604dfb..b5938a2d42531 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -88,6 +88,7 @@ import ( langDetectionCl "github.com/DataDog/datadog-agent/comp/languagedetection/client" langDetectionClimpl "github.com/DataDog/datadog-agent/comp/languagedetection/client/clientimpl" "github.com/DataDog/datadog-agent/comp/logs" + "github.com/DataDog/datadog-agent/comp/logs/adscheduler/adschedulerimpl" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/metadata" "github.com/DataDog/datadog-agent/comp/metadata/host" @@ -121,7 +122,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/remote/data" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" "github.com/DataDog/datadog-agent/pkg/jmxfetch" - adScheduler "github.com/DataDog/datadog-agent/pkg/logs/schedulers/ad" "github.com/DataDog/datadog-agent/pkg/serializer" clusteragentStatus "github.com/DataDog/datadog-agent/pkg/status/clusteragent" endpointsStatus "github.com/DataDog/datadog-agent/pkg/status/endpoints" @@ -428,6 +428,7 @@ func getSharedFxOption() fx.Option { } }), healthprobeimpl.Module(), + adschedulerimpl.Module(), fx.Provide(func(serverDebug dogstatsddebug.Component) settings.Settings { return settings.Settings{ "log_level": commonsettings.NewLogLevelRuntimeSetting(), @@ -522,11 +523,6 @@ func startAgent( } } - if logsAgent, ok := logsAgent.Get(); ok { - // TODO: (components) - once adScheduler is a component, inject it into the logs agent. - logsAgent.AddScheduler(adScheduler.New(ac)) - } - // start the cloudfoundry container tagger if pkgconfig.IsFeaturePresent(pkgconfig.CloudFoundry) && !pkgconfig.Datadog.GetBool("cloud_foundry_buildpack") { containerTagger, err := containertagger.NewContainerTagger(wmeta) diff --git a/comp/README.md b/comp/README.md index a20c6fe22ca41..95a593d9a2a80 100644 --- a/comp/README.md +++ b/comp/README.md @@ -252,6 +252,10 @@ Package client implements a component to send process metadata to the Cluster-Ag +### [comp/logs/adscheduler](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/logs/adscheduler) + +Package adscheduler is glue code to connect autodiscovery to the logs agent. It receives and filters events and converts them into log sources. + ### [comp/logs/agent](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/logs/agent) Package agent contains logs agent component. diff --git a/comp/logs/adscheduler/adschedulerimpl/adscheduler.go b/comp/logs/adscheduler/adschedulerimpl/adscheduler.go new file mode 100644 index 0000000000000..7507f092f7aa7 --- /dev/null +++ b/comp/logs/adscheduler/adschedulerimpl/adscheduler.go @@ -0,0 +1,35 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package adschedulerimpl contains the AD scheduler implementation. +package adschedulerimpl + +import ( + "go.uber.org/fx" + + "github.com/DataDog/datadog-agent/comp/core/autodiscovery" + logsadscheduler "github.com/DataDog/datadog-agent/pkg/logs/schedulers/ad" + + "github.com/DataDog/datadog-agent/comp/logs/agent" + + "github.com/DataDog/datadog-agent/pkg/util/fxutil" +) + +// Module defines the fx options for this component. +func Module() fxutil.Module { + return fxutil.Component( + fx.Provide(newADScheduler), + ) +} + +type dependencies struct { + fx.In + Autodiscovery autodiscovery.Component +} + +func newADScheduler(deps dependencies) agent.SchedulerProvider { + scheduler := logsadscheduler.New(deps.Autodiscovery) + return agent.NewSchedulerProvider(scheduler) +} diff --git a/comp/logs/adscheduler/component.go b/comp/logs/adscheduler/component.go new file mode 100644 index 0000000000000..c04779b5a11d8 --- /dev/null +++ b/comp/logs/adscheduler/component.go @@ -0,0 +1,12 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package adscheduler is glue code to connect autodiscovery to the logs agent. It receives and filters events and converts them into log sources. +package adscheduler + +// team: agent-metrics-logs + +// Component is the component type. +type Component interface{} diff --git a/comp/logs/agent/agent.go b/comp/logs/agent/agent.go index e3daf84a50cd0..f4efc8de3e79b 100644 --- a/comp/logs/agent/agent.go +++ b/comp/logs/agent/agent.go @@ -61,12 +61,13 @@ const ( type dependencies struct { fx.In - Lc fx.Lifecycle - Log logComponent.Component - Config configComponent.Component - InventoryAgent inventoryagent.Component - Hostname hostname.Component - WMeta optional.Option[workloadmeta.Component] + Lc fx.Lifecycle + Log logComponent.Component + Config configComponent.Component + InventoryAgent inventoryagent.Component + Hostname hostname.Component + WMeta optional.Option[workloadmeta.Component] + SchedulerProviders []schedulers.Scheduler `group:"log-agent-scheduler"` } type provides struct { @@ -100,6 +101,7 @@ type agent struct { diagnosticMessageReceiver *diagnostic.BufferedMessageReceiver flarecontroller *flareController.FlareController wmeta optional.Option[workloadmeta.Component] + schedulerProviders []schedulers.Scheduler // started is true if the logs agent is running started *atomic.Bool @@ -118,11 +120,12 @@ func newLogsAgent(deps dependencies) provides { hostname: deps.Hostname, started: atomic.NewBool(false), - sources: sources.NewLogSources(), - services: service.NewServices(), - tracker: tailers.NewTailerTracker(), - flarecontroller: flareController.NewFlareController(), - wmeta: deps.WMeta, + sources: sources.NewLogSources(), + services: service.NewServices(), + tracker: tailers.NewTailerTracker(), + flarecontroller: flareController.NewFlareController(), + wmeta: deps.WMeta, + schedulerProviders: deps.SchedulerProviders, } deps.Lc.Append(fx.Hook{ OnStart: logsAgent.start, @@ -176,6 +179,10 @@ func (a *agent) start(context.Context) error { a.startPipeline() a.log.Info("logs-agent started") + for _, scheduler := range a.schedulerProviders { + a.AddScheduler(scheduler) + } + return nil } diff --git a/comp/logs/agent/scheduler_provider.go b/comp/logs/agent/scheduler_provider.go new file mode 100644 index 0000000000000..102276249c493 --- /dev/null +++ b/comp/logs/agent/scheduler_provider.go @@ -0,0 +1,25 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package agent + +import ( + "github.com/DataDog/datadog-agent/pkg/logs/schedulers" + "go.uber.org/fx" +) + +// SchedulerProvider provides a scheduler for the log Agent. +type SchedulerProvider struct { + fx.Out + + Scheduler schedulers.Scheduler `group:"log-agent-scheduler"` +} + +// NewSchedulerProvider returns a new SchedulerProvider. +func NewSchedulerProvider(scheduler schedulers.Scheduler) SchedulerProvider { + return SchedulerProvider{ + Scheduler: scheduler, + } +} From 296ba3ab4c836bebdce4677d05b1225218535913 Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Mon, 15 Apr 2024 14:20:16 +0200 Subject: [PATCH 42/99] Fast tests: Add a list of files that should trigger all the test always (#24633) * Fast tests: Add a list of files that should trigger all the test always * Add some test cases * Add test cases --- tasks/go_test.py | 24 +++++++++++++++++++++++- tasks/unit-tests/go_tests.py | 26 +++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/tasks/go_test.py b/tasks/go_test.py index d09d696d7ebd7..03756014c5391 100644 --- a/tasks/go_test.py +++ b/tasks/go_test.py @@ -5,6 +5,7 @@ # Recent versions of Python should be able to use dict and list directly in type hints, # so we only need to check that we don't run this code with old Python versions. +import fnmatch import glob import json import operator @@ -39,6 +40,7 @@ GO_COV_TEST_PATH = "test_with_coverage" GO_TEST_RESULT_TMP_JSON = 'module_test_output.json' WINDOWS_MAX_PACKAGES_NUMBER = 150 +TRIGGER_ALL_TESTS_PATHS = ["tasks/go_test.py", "tasks/build_tags.py", ".gitlab/source_test/*"] class TestProfiler: @@ -756,6 +758,18 @@ def get_impacted_packages(ctx, build_tags=None): dependencies = create_dependencies(ctx, build_tags) files = get_modified_files(ctx) + # Safeguard to be sure that the files that should trigger all test are not renamed without being updated + for file in TRIGGER_ALL_TESTS_PATHS: + if len(glob.glob(file)) == 0: + raise Exit( + code=1, + message=f"No file matched {file} make sure you modified TRIGGER_ALL_TEST_FILES if you renamed one of them", + ) + + # Some files like tasks/go_test.py should trigger all tests + if should_run_all_tests(files, TRIGGER_ALL_TESTS_PATHS): + return DEFAULT_MODULES.values() + modified_packages = { f"github.com/DataDog/datadog-agent/{os.path.dirname(file)}" for file in files @@ -843,7 +857,7 @@ def format_packages(ctx, impacted_packages): module_path = get_go_module(package).replace("./", "") # Check if the module is in the target list of the modules we want to test - if module_path not in DEFAULT_MODULES: + if module_path not in DEFAULT_MODULES or not DEFAULT_MODULES[module_path].condition(): continue # Check if the package is in the target list of the module we want to test @@ -918,6 +932,14 @@ def get_go_module(path): raise Exception(f"No go.mod file found for package at {path}") +def should_run_all_tests(files, trigger_files): + for trigger_file in trigger_files: + if len(fnmatch.filter(files, trigger_file)): + print(f"Triggering all tests because a file matching {trigger_file} was modified") + return True + return False + + @task(iterable=['flavors']) def lint_go( ctx, diff --git a/tasks/unit-tests/go_tests.py b/tasks/unit-tests/go_tests.py index 7ea0b507a89b5..8dce2e597755b 100644 --- a/tasks/unit-tests/go_tests.py +++ b/tasks/unit-tests/go_tests.py @@ -1,6 +1,6 @@ import unittest -from tasks.go_test import find_impacted_packages +from tasks.go_test import find_impacted_packages, should_run_all_tests class TestUtils(unittest.TestCase): @@ -45,3 +45,27 @@ def test_impacted_packages_4(self): changed_files = {"pkg3"} expected_impacted_packages = {"pkg3"} self.assertEqual(find_impacted_packages(dependencies, changed_files), expected_impacted_packages) + + def test_should_run_all_tests_1(self): + modified_files = ["pkg/foo.go", "pkg/bar.go"] + trigger_files = ["pkg/foo.go"] + + self.assertTrue(should_run_all_tests(modified_files, trigger_files)) + + def test_should_run_all_tests_2(self): + modified_files = ["pkg/toto/bar.go"] + trigger_files = ["pkg/*"] + + self.assertTrue(should_run_all_tests(modified_files, trigger_files)) + + def test_should_run_all_tests_3(self): + modified_files = ["pkg/foo.go", "pkg/bar.go"] + trigger_files = ["pkg/toto/bar.go"] + + self.assertFalse(should_run_all_tests(modified_files, trigger_files)) + + def test_should_run_all_tests_4(self): + modified_files = ["pkg/foo.go", "pkg/bar.go"] + trigger_files = ["pkgs/*"] + + self.assertFalse(should_run_all_tests(modified_files, trigger_files)) From da2bcc04946e1829c31c8296b97c3fb5b6ca2524 Mon Sep 17 00:00:00 2001 From: AliDatadog <125997632+AliDatadog@users.noreply.github.com> Date: Mon, 15 Apr 2024 14:22:11 +0200 Subject: [PATCH 43/99] [CONTINT-3853][Dogstatsd] Add support of origin detection with container name + pod uid (#24482) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * parse the container-name with the pod uid * make sure to register the collector in the generic metrics provider * Apply suggestions from code review Co-authored-by: Lénaïc Huard * remove the interface * add e2e tests (requires to bump test-infra-definitions) --------- Co-authored-by: Lénaïc Huard --- comp/core/tagger/tagger.go | 38 ++++++++++- comp/core/tagger/tagger_test.go | 68 +++++++++++++++++++ .../containers/metrics/kubelet/collector.go | 5 +- .../metrics/provider/metacollector.go | 3 + .../metrics/provider/metacollector_test.go | 55 +++++++++++++++ pkg/util/containers/metrics/provider/mock.go | 2 +- test/new-e2e/tests/containers/k8s_test.go | 19 ++++-- 7 files changed, 179 insertions(+), 11 deletions(-) diff --git a/comp/core/tagger/tagger.go b/comp/core/tagger/tagger.go index 21d0559189c9c..46cecff87b77b 100644 --- a/comp/core/tagger/tagger.go +++ b/comp/core/tagger/tagger.go @@ -8,7 +8,9 @@ package tagger import ( "context" "reflect" + "regexp" "sync" + "time" configComponent "github.com/DataDog/datadog-agent/comp/core/config" logComp "github.com/DataDog/datadog-agent/comp/core/log" @@ -27,6 +29,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/common" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" + "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -35,6 +38,8 @@ import ( "go.uber.org/fx" ) +var entityIDRegex = regexp.MustCompile(`^en-(init\.)?([a-fA-F0-9-]+)/([a-zA-Z0-9-_]+)$`) + type dependencies struct { fx.In @@ -408,7 +413,7 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty if originInfo.FromTag != "" && originInfo.FromTag != "none" { // Check if the value is not "none" in order to avoid calling the tagger for entity that doesn't exist. // Currently only supported for pods - originFromClient = kubelet.KubePodTaggerEntityPrefix + originInfo.FromTag + originFromClient = t.parseEntityID(originInfo.FromTag, metrics.GetProvider(optional.NewOption(t.wmeta)).GetMetaCollector()) } else if originInfo.FromTag == "" && len(originInfo.FromMsg) > 0 { // originInfo.FromMsg is the container ID sent by the newer clients. originFromClient = containers.BuildTaggerEntityName(originInfo.FromMsg) @@ -442,6 +447,37 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty } +// parseEntityID parses the entity ID and returns the correct tagger entity +// It can be either just a pod uid or `en-(init.)$(POD_UID)/$(CONTAINER_NAME)` +func (t *TaggerClient) parseEntityID(entityID string, metricsProvider provider.ContainerIDForPodUIDAndContNameRetriever) string { + // Parse the (init.)$(POD_UID)/$(CONTAINER_NAME) entity ID with a regex + parts := entityIDRegex.FindStringSubmatch(entityID) + var cname, podUID string + initCont := false + switch len(parts) { + case 0: + return kubelet.KubePodTaggerEntityPrefix + entityID + case 3: + podUID = parts[1] + cname = parts[2] + case 4: + podUID = parts[2] + cname = parts[3] + initCont = parts[1] == "init." + } + cid, err := metricsProvider.ContainerIDForPodUIDAndContName( + podUID, + cname, + initCont, + time.Second, + ) + if err != nil { + log.Debugf("Error getting container ID for pod UID and container name: %s", err) + return entityID + } + return containers.BuildTaggerEntityName(cid) +} + // taggerCardinality converts tagger cardinality string to collectors.TagCardinality // It defaults to DogstatsdCardinality if the string is empty or unknown func taggerCardinality(cardinality string) collectors.TagCardinality { diff --git a/comp/core/tagger/tagger_test.go b/comp/core/tagger/tagger_test.go index b21defe231c86..5266c369dabce 100644 --- a/comp/core/tagger/tagger_test.go +++ b/comp/core/tagger/tagger_test.go @@ -7,6 +7,7 @@ package tagger import ( "testing" + "time" "github.com/stretchr/testify/assert" @@ -14,7 +15,9 @@ import ( "github.com/DataDog/datadog-agent/pkg/config" taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types" "github.com/DataDog/datadog-agent/pkg/tagset" + "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" ) // TODO Improve test coverage with dogstatsd/enrich tests once Origin Detection is refactored. @@ -84,3 +87,68 @@ func TestEnrichTagsOptOut(t *testing.T) { EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "originID", FromMsg: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) assert.Equal(t, []string{}, tb.Get()) } + +type fakeCIDProvider struct { + entries map[string]string + initEntries map[string]string +} + +func (f *fakeCIDProvider) ContainerIDForPodUIDAndContName(podUID, contName string, initCont bool, _ time.Duration) (string, error) { + id := podUID + "/" + contName + if initCont { + return f.initEntries[id], nil + } + return f.entries[id], nil +} + +func TestParseEntityID(t *testing.T) { + for _, tt := range []struct { + name string + entityID string + expected string + cidProvider *fakeCIDProvider + }{ + { + name: "empty", + entityID: "", + expected: kubelet.KubePodTaggerEntityPrefix, + cidProvider: &fakeCIDProvider{}, + }, + { + name: "pod uid", + entityID: "my-pod_uid", + expected: kubelet.KubePodTaggerEntityPrefix + "my-pod_uid", + cidProvider: &fakeCIDProvider{}, + }, + { + name: "container + pod uid", + entityID: "en-62381f4f-a19f-4f37-9413-90b738f92f83/appp", + expected: containers.BuildTaggerEntityName("cid"), + cidProvider: &fakeCIDProvider{ + entries: map[string]string{ + "62381f4f-a19f-4f37-9413-90b738f92f83/appp": "cid", + }, + }, + }, + { + name: "init container + pod uid", + entityID: "en-init.62381f4f-a19f-4f37-9413-90b738f92f83/appp", + expected: containers.BuildTaggerEntityName("init-cid"), + cidProvider: &fakeCIDProvider{ + initEntries: map[string]string{ + "62381f4f-a19f-4f37-9413-90b738f92f83/appp": "init-cid", + }, + }, + }, + { + name: "not found", + entityID: "en-init.62381f4f-a19f-4f37-9413-90b738f92f83/init-my-cont_name", + cidProvider: &fakeCIDProvider{}, + }, + } { + t.Run(tt.name, func(t *testing.T) { + fakeCl := TaggerClient{} + assert.Equal(t, tt.expected, fakeCl.parseEntityID(tt.entityID, tt.cidProvider)) + }) + } +} diff --git a/pkg/util/containers/metrics/kubelet/collector.go b/pkg/util/containers/metrics/kubelet/collector.go index dab4e843723ad..c540456d3b7df 100644 --- a/pkg/util/containers/metrics/kubelet/collector.go +++ b/pkg/util/containers/metrics/kubelet/collector.go @@ -77,8 +77,9 @@ func newKubeletCollector(_ *provider.Cache, wmeta workloadmeta.Component) (provi } collectors := &provider.Collectors{ - Stats: provider.MakeRef[provider.ContainerStatsGetter](collector, collectorPriority), - Network: provider.MakeRef[provider.ContainerNetworkStatsGetter](collector, collectorPriority), + Stats: provider.MakeRef[provider.ContainerStatsGetter](collector, collectorPriority), + Network: provider.MakeRef[provider.ContainerNetworkStatsGetter](collector, collectorPriority), + ContainerIDForPodUIDAndContName: provider.MakeRef[provider.ContainerIDForPodUIDAndContNameRetriever](collector, collectorPriority), } return provider.CollectorMetadata{ diff --git a/pkg/util/containers/metrics/provider/metacollector.go b/pkg/util/containers/metrics/provider/metacollector.go index 6a8f50d379b02..45d23f5807600 100644 --- a/pkg/util/containers/metrics/provider/metacollector.go +++ b/pkg/util/containers/metrics/provider/metacollector.go @@ -31,6 +31,9 @@ func (mc *metaCollector) collectorsUpdatedCallback(collectorsCatalog CollectorCa mc.selfContainerIDcollectors = buildUniqueCollectors(collectorsCatalog, func(c *Collectors) CollectorRef[SelfContainerIDRetriever] { return c.SelfContainerID }) mc.containerIDFromPIDcollectors = buildUniqueCollectors(collectorsCatalog, func(c *Collectors) CollectorRef[ContainerIDForPIDRetriever] { return c.ContainerIDForPID }) mc.containerIDFromInodeCollectors = buildUniqueCollectors(collectorsCatalog, func(c *Collectors) CollectorRef[ContainerIDForInodeRetriever] { return c.ContainerIDForInode }) + mc.ContainerIDForPodUIDAndContNameCollectors = buildUniqueCollectors(collectorsCatalog, func(c *Collectors) CollectorRef[ContainerIDForPodUIDAndContNameRetriever] { + return c.ContainerIDForPodUIDAndContName + }) } // GetSelfContainerID returns the container ID for current container. diff --git a/pkg/util/containers/metrics/provider/metacollector_test.go b/pkg/util/containers/metrics/provider/metacollector_test.go index 6cab889e1538e..88eae68a538e7 100644 --- a/pkg/util/containers/metrics/provider/metacollector_test.go +++ b/pkg/util/containers/metrics/provider/metacollector_test.go @@ -19,6 +19,10 @@ func TestMetaCollector(t *testing.T) { 1: "foo1", }, selfContainerID: "agent1", + cIDForPodCont: map[string]string{ + "pc-pod1/foo": "cID1", + "pc-pod1/i-foo": "cID2", + }, } actualCollector2 := &dummyCollector{ id: "foo2", @@ -26,6 +30,10 @@ func TestMetaCollector(t *testing.T) { 2: "foo2", }, selfContainerID: "agent2", + cIDForPodCont: map[string]string{ + "pc-pod1/foo": "cID3", + "pc-pod1/i-foo": "cID4", + }, } actualCollector3 := &dummyCollector{ id: "foo3", @@ -46,6 +54,9 @@ func TestMetaCollector(t *testing.T) { Collector: actualCollector1, Priority: 0, }, + ContainerIDForPodUIDAndContName: CollectorRef[ContainerIDForPodUIDAndContNameRetriever]{ + Collector: actualCollector1, + }, }, RuntimeMetadata{runtime: RuntimeNameDocker}: &Collectors{ ContainerIDForPID: CollectorRef[ContainerIDForPIDRetriever]{ @@ -56,6 +67,10 @@ func TestMetaCollector(t *testing.T) { Collector: actualCollector2, Priority: 1, }, + ContainerIDForPodUIDAndContName: CollectorRef[ContainerIDForPodUIDAndContNameRetriever]{ + Collector: actualCollector2, + Priority: 1, + }, }, }) @@ -71,6 +86,30 @@ func TestMetaCollector(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "", cID3) + cIDPodUIDAndContName, err := metaCollector.ContainerIDForPodUIDAndContName("pod1", "foo", false, 0) + assert.NoError(t, err) + assert.Equal(t, "cID1", cIDPodUIDAndContName) + + cIDPodUIDAndContNameInit, err := metaCollector.ContainerIDForPodUIDAndContName("pod1", "foo", true, 0) + assert.NoError(t, err) + assert.Equal(t, "cID2", cIDPodUIDAndContNameInit) + + cIDPodUID, err := metaCollector.ContainerIDForPodUIDAndContName("pod1", "", false, 0) + assert.NoError(t, err) + assert.Equal(t, "", cIDPodUID) + + cIDContName, err := metaCollector.ContainerIDForPodUIDAndContName("", "foo", false, 0) + assert.NoError(t, err) + assert.Equal(t, "", cIDContName) + + cIDEmpty, err := metaCollector.ContainerIDForPodUIDAndContName("", "", false, 0) + assert.NoError(t, err) + assert.Equal(t, "", cIDEmpty) + + cIDEmptyInit, err := metaCollector.ContainerIDForPodUIDAndContName("", "", true, 0) + assert.NoError(t, err) + assert.Equal(t, "", cIDEmptyInit) + // Add the failing collector metaCollector.collectorsUpdatedCallback( CollectorCatalog{ @@ -83,6 +122,10 @@ func TestMetaCollector(t *testing.T) { Collector: actualCollector1, Priority: 0, }, + ContainerIDForPodUIDAndContName: CollectorRef[ContainerIDForPodUIDAndContNameRetriever]{ + Collector: actualCollector1, + Priority: 0, + }, }, RuntimeMetadata{runtime: RuntimeNameDocker}: &Collectors{ ContainerIDForPID: CollectorRef[ContainerIDForPIDRetriever]{ @@ -93,6 +136,10 @@ func TestMetaCollector(t *testing.T) { Collector: actualCollector2, Priority: 1, }, + ContainerIDForPodUIDAndContName: CollectorRef[ContainerIDForPodUIDAndContNameRetriever]{ + Collector: actualCollector2, + Priority: 1, + }, }, RuntimeMetadata{runtime: RuntimeNameCRIO}: &Collectors{ ContainerIDForPID: CollectorRef[ContainerIDForPIDRetriever]{ @@ -103,6 +150,10 @@ func TestMetaCollector(t *testing.T) { Collector: actualCollector3, Priority: 2, }, + ContainerIDForPodUIDAndContName: CollectorRef[ContainerIDForPodUIDAndContNameRetriever]{ + Collector: actualCollector3, + Priority: 2, + }, }, }, ) @@ -111,6 +162,10 @@ func TestMetaCollector(t *testing.T) { assert.Equal(t, err, actualCollector3.err) assert.Equal(t, "", cID4) + cIDPodUIDAndContName, err = metaCollector.ContainerIDForPodUIDAndContName("pod3", "foo", false, 0) + assert.Equal(t, err, actualCollector3.err) + assert.Equal(t, "", cIDPodUIDAndContName) + selfCID, err := metaCollector.GetSelfContainerID() assert.NoError(t, err) assert.Equal(t, "agent1", selfCID) diff --git a/pkg/util/containers/metrics/provider/mock.go b/pkg/util/containers/metrics/provider/mock.go index f70ea573338dd..7f879f98d3321 100644 --- a/pkg/util/containers/metrics/provider/mock.go +++ b/pkg/util/containers/metrics/provider/mock.go @@ -74,7 +74,7 @@ func (d *dummyCollector) ContainerIDForPodUIDAndContName(podUID, contName string initPrefix = "i-" } cacheKey := contPodUIDContNameToCidCachePrefix + podUID + "/" + initPrefix + contName - return d.cIDForPodCont[cacheKey], nil + return d.cIDForPodCont[cacheKey], d.err } // Helpers not part of Collector interface diff --git a/test/new-e2e/tests/containers/k8s_test.go b/test/new-e2e/tests/containers/k8s_test.go index 88df065ff3551..a6f8815e9ee66 100644 --- a/test/new-e2e/tests/containers/k8s_test.go +++ b/test/new-e2e/tests/containers/k8s_test.go @@ -34,13 +34,14 @@ import ( ) const ( - kubeNamespaceDogstatsWorkload = "workload-dogstatsd" - kubeNamespaceDogstatsStandaloneWorkload = "workload-dogstatsd-standalone" - kubeNamespaceTracegenWorkload = "workload-tracegen" - kubeDeploymentDogstatsdUDPOrigin = "dogstatsd-udp-origin-detection" - kubeDeploymentDogstatsdUDS = "dogstatsd-uds" - kubeDeploymentTracegenTCPWorkload = "tracegen-tcp" - kubeDeploymentTracegenUDSWorkload = "tracegen-uds" + kubeNamespaceDogstatsWorkload = "workload-dogstatsd" + kubeNamespaceDogstatsStandaloneWorkload = "workload-dogstatsd-standalone" + kubeNamespaceTracegenWorkload = "workload-tracegen" + kubeDeploymentDogstatsdUDPOrigin = "dogstatsd-udp-origin-detection" + kubeDeploymentDogstatsdUDS = "dogstatsd-uds" + kubeDeploymentDogstatsdUDPOriginContNameInjected = "dogstatsd-udp-contname-injected" + kubeDeploymentTracegenTCPWorkload = "tracegen-tcp" + kubeDeploymentTracegenUDSWorkload = "tracegen-uds" ) var GitCommit string @@ -599,6 +600,8 @@ func (suite *k8sSuite) TestDogstatsdInAgent() { suite.testDogstatsdContainerID(kubeNamespaceDogstatsWorkload, kubeDeploymentDogstatsdUDS) // Test with UDP + Origin detection suite.testDogstatsdContainerID(kubeNamespaceDogstatsWorkload, kubeDeploymentDogstatsdUDPOrigin) + // Test with UDP + DD_ENTITY_ID with container name injected + suite.testDogstatsdContainerID(kubeNamespaceDogstatsWorkload, kubeDeploymentDogstatsdUDPOriginContNameInjected) // Test with UDP + DD_ENTITY_ID suite.testDogstatsdPodUID(kubeNamespaceDogstatsWorkload) } @@ -609,6 +612,8 @@ func (suite *k8sSuite) TestDogstatsdStandalone() { // Dogstatsd standalone does not support origin detection // Test with UDP + DD_ENTITY_ID suite.testDogstatsdPodUID(kubeNamespaceDogstatsWorkload) + // Test with UDP + DD_ENTITY_ID with container name injected + suite.testDogstatsdContainerID(kubeNamespaceDogstatsWorkload, kubeDeploymentDogstatsdUDPOriginContNameInjected) } func (suite *k8sSuite) testDogstatsdPodUID(kubeNamespace string) { From 6dd4e033cc46cd295b66632b6e0cd59df731ee06 Mon Sep 17 00:00:00 2001 From: Yoann Ghigoff Date: Mon, 15 Apr 2024 14:30:07 +0200 Subject: [PATCH 44/99] [CWS] prevent selftests channel from blocking messages goroutine (#24580) * ebpfless: fix selftests chan blocking messages goroutine * ebpfless: make selftest rule more restrictive --- pkg/security/probe/selftests/ebpfless.go | 2 +- pkg/security/probe/selftests/tester.go | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pkg/security/probe/selftests/ebpfless.go b/pkg/security/probe/selftests/ebpfless.go index 224037594ea1d..578143231e490 100644 --- a/pkg/security/probe/selftests/ebpfless.go +++ b/pkg/security/probe/selftests/ebpfless.go @@ -29,7 +29,7 @@ func (o *EBPFLessSelfTest) GetRuleDefinition() *rules.RuleDefinition { return &rules.RuleDefinition{ ID: o.ruleID, - Expression: `exec.file.path != "" && process.parent.pid == 0`, + Expression: `exec.file.path != "" && process.parent.pid == 0 && process.ppid == 0`, Every: time.Duration(math.MaxInt64), } } diff --git a/pkg/security/probe/selftests/tester.go b/pkg/security/probe/selftests/tester.go index 5e41f240ca3f0..29713d631c0d8 100644 --- a/pkg/security/probe/selftests/tester.go +++ b/pkg/security/probe/selftests/tester.go @@ -214,7 +214,12 @@ func (t *SelfTester) IsExpectedEvent(rule *rules.Rule, event eval.Event, _ *prob Event: s, } - t.eventChan <- selfTestEvent + select { + case t.eventChan <- selfTestEvent: + default: + log.Errorf("self test channel is full, discarding event.\n") + } + return true } return false From 48db7c4558cdeb3d50d8a434df0631506758167c Mon Sep 17 00:00:00 2001 From: Yoann Ghigoff Date: Mon, 15 Apr 2024 14:30:59 +0200 Subject: [PATCH 45/99] ebpfless: fix container context of procfs-based exec events (#24581) --- pkg/security/probe/probe_ebpfless.go | 71 ++++++++++++++++------------ pkg/security/proto/ebpfless/msg.go | 3 ++ pkg/security/ptracer/cws.go | 1 + pkg/security/ptracer/proc.go | 12 +++-- pkg/security/ptracer/utils.go | 12 ++++- 5 files changed, 64 insertions(+), 35 deletions(-) diff --git a/pkg/security/probe/probe_ebpfless.go b/pkg/security/probe/probe_ebpfless.go index 423f25da8922e..1c21d1e9a92fa 100644 --- a/pkg/security/probe/probe_ebpfless.go +++ b/pkg/security/probe/probe_ebpfless.go @@ -42,12 +42,12 @@ const ( ) type client struct { - conn net.Conn - probe *EBPFLessProbe - seqNum uint64 - nsID uint64 - containerContext *ebpfless.ContainerContext - entrypointArgs []string + conn net.Conn + probe *EBPFLessProbe + seqNum uint64 + nsID uint64 + containerID string + containerName string } type clientMsg struct { @@ -59,7 +59,8 @@ type clientMsg struct { type EBPFLessProbe struct { sync.Mutex - Resolvers *resolvers.EBPFLessResolvers + Resolvers *resolvers.EBPFLessResolvers + containerContexts map[string]*ebpfless.ContainerContext // Constants and configuration opts Opts @@ -92,10 +93,11 @@ func (p *EBPFLessProbe) handleClientMsg(cl *client, msg *ebpfless.Message) { ) cl.nsID = msg.Hello.NSID - cl.containerContext = msg.Hello.ContainerContext - cl.entrypointArgs = msg.Hello.EntrypointArgs - if cl.containerContext != nil { - seclog.Infof("tracing started for container ID [%s] (Name: [%s]) with entrypoint %q", cl.containerContext.ID, cl.containerContext.Name, cl.entrypointArgs) + if msg.Hello.ContainerContext != nil { + cl.containerID = msg.Hello.ContainerContext.ID + cl.containerName = msg.Hello.ContainerContext.Name + p.containerContexts[msg.Hello.ContainerContext.ID] = msg.Hello.ContainerContext + seclog.Infof("tracing started for container ID [%s] (Name: [%s]) with entrypoint %q", msg.Hello.ContainerContext.ID, msg.Hello.ContainerContext.Name, msg.Hello.EntrypointArgs) } } case ebpfless.MessageTypeSyscall: @@ -132,7 +134,7 @@ func (p *EBPFLessProbe) handleSyscallMsg(cl *client, syscallMsg *ebpfless.Syscal entry := p.Resolvers.ProcessResolver.AddExecEntry( process.CacheResolverKey{Pid: syscallMsg.PID, NSID: cl.nsID}, syscallMsg.Exec.PPID, syscallMsg.Exec.File.Filename, syscallMsg.Exec.Args, syscallMsg.Exec.ArgsTruncated, syscallMsg.Exec.Envs, syscallMsg.Exec.EnvsTruncated, - cl.containerContext.ID, syscallMsg.Timestamp, syscallMsg.Exec.TTY) + syscallMsg.ContainerID, syscallMsg.Timestamp, syscallMsg.Exec.TTY) if syscallMsg.Exec.Credentials != nil { entry.Credentials.UID = syscallMsg.Exec.Credentials.UID entry.Credentials.EUID = syscallMsg.Exec.Credentials.EUID @@ -256,11 +258,13 @@ func (p *EBPFLessProbe) handleSyscallMsg(cl *client, syscallMsg *ebpfless.Syscal } // container context - event.ContainerContext.ID = cl.containerContext.ID - event.ContainerContext.CreatedAt = cl.containerContext.CreatedAt - event.ContainerContext.Tags = []string{ - "image_name:" + cl.containerContext.ImageShortName, - "image_tag:" + cl.containerContext.ImageTag, + event.ContainerContext.ID = syscallMsg.ContainerID + if containerContext, exists := p.containerContexts[syscallMsg.ContainerID]; exists { + event.ContainerContext.CreatedAt = containerContext.CreatedAt + event.ContainerContext.Tags = []string{ + "image_name:" + containerContext.ImageShortName, + "image_tag:" + containerContext.ImageTag, + } } // use ProcessCacheEntry process context as process context @@ -400,9 +404,8 @@ func (p *EBPFLessProbe) handleNewClient(conn net.Conn, ch chan clientMsg) { delete(p.clients, conn) p.Unlock() - if client.containerContext != nil { - seclog.Infof("tracing stopped for container ID [%s] (Name: [%s])", client.containerContext.ID, client.containerContext.Name) - } + msg.Type = ebpfless.MessageTypeGoodbye + ch <- msg return } @@ -445,6 +448,13 @@ func (p *EBPFLessProbe) Start() error { go func() { for msg := range ch { + if msg.Type == ebpfless.MessageTypeGoodbye { + if msg.client.containerID != "" { + delete(p.containerContexts, msg.client.containerID) + seclog.Infof("tracing stopped for container ID [%s] (Name: [%s])", msg.client.containerID, msg.client.containerName) + } + continue + } p.handleClientMsg(msg.client, &msg.Message) } }() @@ -549,16 +559,17 @@ func NewEBPFLessProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFLess var grpcOpts []grpc.ServerOption p := &EBPFLessProbe{ - probe: probe, - config: config, - opts: opts, - statsdClient: opts.StatsdClient, - server: grpc.NewServer(grpcOpts...), - ctx: ctx, - cancelFnc: cancelFnc, - buf: make([]byte, 4096), - clients: make(map[net.Conn]*client), - processKiller: NewProcessKiller(), + probe: probe, + config: config, + opts: opts, + statsdClient: opts.StatsdClient, + server: grpc.NewServer(grpcOpts...), + ctx: ctx, + cancelFnc: cancelFnc, + buf: make([]byte, 4096), + clients: make(map[net.Conn]*client), + processKiller: NewProcessKiller(), + containerContexts: make(map[string]*ebpfless.ContainerContext), } resolversOpts := resolvers.Opts{ diff --git a/pkg/security/proto/ebpfless/msg.go b/pkg/security/proto/ebpfless/msg.go index 7da9dbe2b7e27..f68309a5e762c 100644 --- a/pkg/security/proto/ebpfless/msg.go +++ b/pkg/security/proto/ebpfless/msg.go @@ -22,6 +22,8 @@ const ( MessageTypeHello // MessageTypeSyscall syscall type MessageTypeSyscall + // MessageTypeGoodbye event type + MessageTypeGoodbye ) // SyscallType defines the type of a syscall message @@ -259,6 +261,7 @@ type SyscallMsg struct { PID uint32 Timestamp uint64 Retval int64 + ContainerID string Exec *ExecSyscallMsg `json:",omitempty"` Open *OpenSyscallMsg `json:",omitempty"` Fork *ForkSyscallMsg `json:",omitempty"` diff --git a/pkg/security/ptracer/cws.go b/pkg/security/ptracer/cws.go index 018f0203483ce..f3bb3ddfffbe6 100644 --- a/pkg/security/ptracer/cws.go +++ b/pkg/security/ptracer/cws.go @@ -319,6 +319,7 @@ func StartCWSPtracer(args []string, envs []string, probeAddr string, opts Opts) } msg.PID = uint32(process.Tgid) msg.Timestamp = uint64(time.Now().UnixNano()) + msg.ContainerID = containerID send(&ebpfless.Message{ Type: ebpfless.MessageTypeSyscall, Syscall: msg, diff --git a/pkg/security/ptracer/proc.go b/pkg/security/ptracer/proc.go index 49f6718e20b3a..f50fdcea2b6d1 100644 --- a/pkg/security/ptracer/proc.go +++ b/pkg/security/ptracer/proc.go @@ -225,12 +225,18 @@ func procToMsg(proc *ProcProcess) (*ebpfless.Message, error) { envs, truncated, _ := collectPIDEnvVars(proc.Pid) + containerID, err := getProcContainerID(int(proc.Pid)) + if err != nil { + return nil, fmt.Errorf("snapshot failed for %d: couldn't get container ID: %w", proc.Pid, err) + } + return &ebpfless.Message{ Type: ebpfless.MessageTypeSyscall, Syscall: &ebpfless.SyscallMsg{ - Type: ebpfless.SyscallTypeExec, - PID: uint32(proc.Pid), - Timestamp: uint64(time.Unix(0, proc.CreateTime*int64(time.Millisecond)).UnixNano()), + Type: ebpfless.SyscallTypeExec, + PID: uint32(proc.Pid), + Timestamp: uint64(time.Unix(0, proc.CreateTime*int64(time.Millisecond)).UnixNano()), + ContainerID: containerID, Exec: &ebpfless.ExecSyscallMsg{ File: ebpfless.FileSyscallMsg{ Filename: filename, diff --git a/pkg/security/ptracer/utils.go b/pkg/security/ptracer/utils.go index 5d1756af19eb1..0aef9b09706f8 100644 --- a/pkg/security/ptracer/utils.go +++ b/pkg/security/ptracer/utils.go @@ -74,8 +74,8 @@ func getProcControlGroupsFromFile(path string) ([]controlGroup, error) { } -func getCurrentProcContainerID() (string, error) { - cgroups, err := getProcControlGroupsFromFile("/proc/self/cgroup") +func getContainerIDFromProcFS(cgroupPath string) (string, error) { + cgroups, err := getProcControlGroupsFromFile(cgroupPath) if err != nil { return "", err } @@ -89,6 +89,14 @@ func getCurrentProcContainerID() (string, error) { return "", nil } +func getCurrentProcContainerID() (string, error) { + return getContainerIDFromProcFS("/proc/self/cgroup") +} + +func getProcContainerID(pid int) (string, error) { + return getContainerIDFromProcFS(fmt.Sprintf("/proc/%d/cgroup", pid)) +} + func getNSID() uint64 { var stat syscall.Stat_t if err := syscall.Stat("/proc/self/ns/pid", &stat); err != nil { From 4f5d6a5c0c2b09b1882dc3105e743f7d7a66bca1 Mon Sep 17 00:00:00 2001 From: Raphael Gavache Date: Mon, 15 Apr 2024 14:45:26 +0200 Subject: [PATCH 46/99] Rename cmd to installer (#24677) --- cmd/{updater => installer}/command/command.go | 14 +++++----- cmd/{updater => installer}/main.go | 8 +++--- .../subcommands/bootstrap/command.go | 6 ++-- .../subcommands/bootstrap/command_test.go | 2 +- .../subcommands/experiment/command.go | 4 +-- .../subcommands/experiment/command_test.go | 2 +- .../subcommands/purge/command.go | 4 +-- .../subcommands/purge/command_test.go | 2 +- .../subcommands/run/command.go | 8 +++--- .../subcommands/run/command_test.go | 2 +- .../subcommands/status/command.go | 4 +-- .../subcommands/status/command_test.go | 2 +- .../subcommands/status/status.tmpl | 2 +- cmd/installer/subcommands/subcommands.go | 28 +++++++++++++++++++ cmd/{updater => installer}/user_all.go | 2 +- cmd/{updater => installer}/user_windows.go | 2 +- cmd/updater/subcommands/subcommands.go | 28 ------------------- tasks/installer.py | 2 +- 18 files changed, 61 insertions(+), 61 deletions(-) rename cmd/{updater => installer}/command/command.go (85%) rename cmd/{updater => installer}/main.go (64%) rename cmd/{updater => installer}/subcommands/bootstrap/command.go (95%) rename cmd/{updater => installer}/subcommands/bootstrap/command_test.go (95%) rename cmd/{updater => installer}/subcommands/experiment/command.go (95%) rename cmd/{updater => installer}/subcommands/experiment/command_test.go (93%) rename cmd/{updater => installer}/subcommands/purge/command.go (89%) rename cmd/{updater => installer}/subcommands/purge/command_test.go (89%) rename cmd/{updater => installer}/subcommands/run/command.go (92%) rename cmd/{updater => installer}/subcommands/run/command_test.go (89%) rename cmd/{updater => installer}/subcommands/status/command.go (95%) rename cmd/{updater => installer}/subcommands/status/command_test.go (89%) rename cmd/{updater => installer}/subcommands/status/status.tmpl (93%) create mode 100644 cmd/installer/subcommands/subcommands.go rename cmd/{updater => installer}/user_all.go (96%) rename cmd/{updater => installer}/user_windows.go (87%) delete mode 100644 cmd/updater/subcommands/subcommands.go diff --git a/cmd/updater/command/command.go b/cmd/installer/command/command.go similarity index 85% rename from cmd/updater/command/command.go rename to cmd/installer/command/command.go index 17bd984021f87..18fe8af01fb50 100644 --- a/cmd/updater/command/command.go +++ b/cmd/installer/command/command.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package command implements the top-level `updater` binary, including its subcommands. +// Package command implements the top-level `installer` binary, including its subcommands. package command import ( @@ -17,12 +17,12 @@ import ( // common constants for all the updater subcommands. const ( - ConfigName = "updater" - LoggerName = "UPDATER" + ConfigName = "installer" + LoggerName = "INSTALLER" DefaultLogLevel = "off" ) -// GlobalParams contains the values of updater-global Cobra flags. +// GlobalParams contains the values of installer-global Cobra flags. // // A pointer to this type is passed to SubcommandFactory's, but its contents // are not valid until Cobra calls the subcommand's Run or RunE function. @@ -53,13 +53,13 @@ func MakeCommand(subcommandFactories []SubcommandFactory) *cobra.Command { // AgentCmd is the root command agentCmd := &cobra.Command{ Use: fmt.Sprintf("%s [command]", os.Args[0]), - Short: "Datadog Updater at your service.", + Short: "Datadog Installer at your service.", Long: ` -Datadog Updater updates your agents based on requests received from the Datadog UI.`, +Datadog Installer installs datadog-packages based on your commands.`, SilenceUsage: true, } - agentCmd.PersistentFlags().StringVarP(&globalParams.ConfFilePath, "cfgpath", "c", "", "path to directory containing updater.yaml") + agentCmd.PersistentFlags().StringVarP(&globalParams.ConfFilePath, "cfgpath", "c", "", "path to directory containing installer.yaml") agentCmd.PersistentFlags().StringVarP(&globalParams.RepositoriesDir, "repositories", "d", "/opt/datadog-packages", "path to directory containing repositories") agentCmd.PersistentFlags().StringVarP(&globalParams.PIDFilePath, "pidfile", "p", "", "path to the pidfile") _ = agentCmd.MarkFlagRequired("package") diff --git a/cmd/updater/main.go b/cmd/installer/main.go similarity index 64% rename from cmd/updater/main.go rename to cmd/installer/main.go index 490255c807a51..9c5f13c6da4a0 100644 --- a/cmd/updater/main.go +++ b/cmd/installer/main.go @@ -3,19 +3,19 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2023-present Datadog, Inc. -// Package main implements 'updater'. +// Package main implements 'installer'. package main import ( "os" + "github.com/DataDog/datadog-agent/cmd/installer/command" + "github.com/DataDog/datadog-agent/cmd/installer/subcommands" "github.com/DataDog/datadog-agent/cmd/internal/runcmd" - "github.com/DataDog/datadog-agent/cmd/updater/command" - "github.com/DataDog/datadog-agent/cmd/updater/subcommands" ) func main() { // root user is changed to dd-installer to avoid permission issues rootToDDInstaller() - os.Exit(runcmd.Run(command.MakeCommand(subcommands.UpdaterSubcommands()))) + os.Exit(runcmd.Run(command.MakeCommand(subcommands.InstallerSubcommands()))) } diff --git a/cmd/updater/subcommands/bootstrap/command.go b/cmd/installer/subcommands/bootstrap/command.go similarity index 95% rename from cmd/updater/subcommands/bootstrap/command.go rename to cmd/installer/subcommands/bootstrap/command.go index c029932fa7028..32b81a01dcabe 100644 --- a/cmd/updater/subcommands/bootstrap/command.go +++ b/cmd/installer/subcommands/bootstrap/command.go @@ -12,7 +12,7 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/cmd/updater/command" + "github.com/DataDog/datadog-agent/cmd/installer/command" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" @@ -41,7 +41,7 @@ func Commands(global *command.GlobalParams) []*cobra.Command { bootstrapCmd := &cobra.Command{ Use: "bootstrap", Short: "Bootstraps the package with the first version.", - Long: `Installs the first version of the package managed by this updater. + Long: `Installs the first version of the package managed by the installer. This first version is sent remotely to the agent and can be configured from the UI. This command will exit after the first version is installed.`, RunE: func(_ *cobra.Command, _ []string) error { @@ -70,7 +70,7 @@ func bootstrapFxWrapper(ctx context.Context, params *cliParams) error { ConfigParams: config.NewAgentParams(params.GlobalParams.ConfFilePath), SecretParams: secrets.NewEnabledParams(), SysprobeConfigParams: sysprobeconfigimpl.NewParams(), - LogParams: logimpl.ForOneShot("UPDATER", "info", true), + LogParams: logimpl.ForOneShot("INSTALLER", "info", true), }), core.Bundle(), ) diff --git a/cmd/updater/subcommands/bootstrap/command_test.go b/cmd/installer/subcommands/bootstrap/command_test.go similarity index 95% rename from cmd/updater/subcommands/bootstrap/command_test.go rename to cmd/installer/subcommands/bootstrap/command_test.go index 10783ce50fb69..ba889dbc59b6e 100644 --- a/cmd/updater/subcommands/bootstrap/command_test.go +++ b/cmd/installer/subcommands/bootstrap/command_test.go @@ -8,7 +8,7 @@ package bootstrap import ( "testing" - "github.com/DataDog/datadog-agent/cmd/updater/command" + "github.com/DataDog/datadog-agent/cmd/installer/command" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) diff --git a/cmd/updater/subcommands/experiment/command.go b/cmd/installer/subcommands/experiment/command.go similarity index 95% rename from cmd/updater/subcommands/experiment/command.go rename to cmd/installer/subcommands/experiment/command.go index 0218f93508e85..43d03acb8065c 100644 --- a/cmd/updater/subcommands/experiment/command.go +++ b/cmd/installer/subcommands/experiment/command.go @@ -3,13 +3,13 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package experiment implements 'updater {start, stop, promote}-experiment' subcommands. +// Package experiment implements 'installer {start, stop, promote}-experiment' subcommands. package experiment import ( "fmt" - "github.com/DataDog/datadog-agent/cmd/updater/command" + "github.com/DataDog/datadog-agent/cmd/installer/command" "github.com/DataDog/datadog-agent/comp/updater/localapiclient" "github.com/DataDog/datadog-agent/comp/updater/localapiclient/localapiclientimpl" "github.com/DataDog/datadog-agent/pkg/util/fxutil" diff --git a/cmd/updater/subcommands/experiment/command_test.go b/cmd/installer/subcommands/experiment/command_test.go similarity index 93% rename from cmd/updater/subcommands/experiment/command_test.go rename to cmd/installer/subcommands/experiment/command_test.go index a745c33315a9a..bdcc9f9037e7d 100644 --- a/cmd/updater/subcommands/experiment/command_test.go +++ b/cmd/installer/subcommands/experiment/command_test.go @@ -8,7 +8,7 @@ package experiment import ( "testing" - "github.com/DataDog/datadog-agent/cmd/updater/command" + "github.com/DataDog/datadog-agent/cmd/installer/command" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) diff --git a/cmd/updater/subcommands/purge/command.go b/cmd/installer/subcommands/purge/command.go similarity index 89% rename from cmd/updater/subcommands/purge/command.go rename to cmd/installer/subcommands/purge/command.go index 406936605f610..245aba27dc2e1 100644 --- a/cmd/updater/subcommands/purge/command.go +++ b/cmd/installer/subcommands/purge/command.go @@ -7,7 +7,7 @@ package purge import ( - "github.com/DataDog/datadog-agent/cmd/updater/command" + "github.com/DataDog/datadog-agent/cmd/installer/command" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" "github.com/DataDog/datadog-agent/pkg/installer" @@ -32,7 +32,7 @@ func Commands(_ *command.GlobalParams) []*cobra.Command { func purgeFxWrapper() error { return fxutil.OneShot(purge, fx.Supply(core.BundleParams{ - LogParams: logimpl.ForOneShot("UPDATER", "info", true), + LogParams: logimpl.ForOneShot("INSTALLER", "info", true), }), core.Bundle(), ) diff --git a/cmd/updater/subcommands/purge/command_test.go b/cmd/installer/subcommands/purge/command_test.go similarity index 89% rename from cmd/updater/subcommands/purge/command_test.go rename to cmd/installer/subcommands/purge/command_test.go index d19e58b8949cf..a48b3b22ed4bf 100644 --- a/cmd/updater/subcommands/purge/command_test.go +++ b/cmd/installer/subcommands/purge/command_test.go @@ -8,7 +8,7 @@ package purge import ( "testing" - "github.com/DataDog/datadog-agent/cmd/updater/command" + "github.com/DataDog/datadog-agent/cmd/installer/command" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) diff --git a/cmd/updater/subcommands/run/command.go b/cmd/installer/subcommands/run/command.go similarity index 92% rename from cmd/updater/subcommands/run/command.go rename to cmd/installer/subcommands/run/command.go index f4f3cdb5758aa..335d5d2ee506a 100644 --- a/cmd/updater/subcommands/run/command.go +++ b/cmd/installer/subcommands/run/command.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package run implements 'updater run'. +// Package run implements 'installer run'. package run import ( @@ -15,7 +15,7 @@ import ( "github.com/spf13/cobra" "go.uber.org/fx" - "github.com/DataDog/datadog-agent/cmd/updater/command" + "github.com/DataDog/datadog-agent/cmd/installer/command" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" @@ -44,7 +44,7 @@ type cliParams struct { func Commands(global *command.GlobalParams) []*cobra.Command { runCmd := &cobra.Command{ Use: "run", - Short: "Runs the updater", + Short: "Runs the installer", Long: ``, RunE: func(cmd *cobra.Command, args []string) error { return runFxWrapper(&cliParams{ @@ -65,7 +65,7 @@ func runFxWrapper(params *cliParams) error { ConfigParams: config.NewAgentParams(params.GlobalParams.ConfFilePath), SecretParams: secrets.NewEnabledParams(), SysprobeConfigParams: sysprobeconfigimpl.NewParams(), - LogParams: logimpl.ForDaemon("UPDATER", "updater.log_file", pkgconfig.DefaultUpdaterLogFile), + LogParams: logimpl.ForDaemon("INSTALLER", "installer.log_file", pkgconfig.DefaultUpdaterLogFile), }), core.Bundle(), fx.Supply(&rcservice.Params{ diff --git a/cmd/updater/subcommands/run/command_test.go b/cmd/installer/subcommands/run/command_test.go similarity index 89% rename from cmd/updater/subcommands/run/command_test.go rename to cmd/installer/subcommands/run/command_test.go index 67ad3198cca2a..32eb946c7d326 100644 --- a/cmd/updater/subcommands/run/command_test.go +++ b/cmd/installer/subcommands/run/command_test.go @@ -8,7 +8,7 @@ package run import ( "testing" - "github.com/DataDog/datadog-agent/cmd/updater/command" + "github.com/DataDog/datadog-agent/cmd/installer/command" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) diff --git a/cmd/updater/subcommands/status/command.go b/cmd/installer/subcommands/status/command.go similarity index 95% rename from cmd/updater/subcommands/status/command.go rename to cmd/installer/subcommands/status/command.go index 6a1826c43730a..ce33dfe4cb2ac 100644 --- a/cmd/updater/subcommands/status/command.go +++ b/cmd/installer/subcommands/status/command.go @@ -12,7 +12,7 @@ import ( "os" "text/template" - "github.com/DataDog/datadog-agent/cmd/updater/command" + "github.com/DataDog/datadog-agent/cmd/installer/command" "github.com/DataDog/datadog-agent/comp/updater/localapiclient" "github.com/DataDog/datadog-agent/comp/updater/localapiclient/localapiclientimpl" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -29,7 +29,7 @@ type cliParams struct { func Commands(global *command.GlobalParams) []*cobra.Command { statusCmd := &cobra.Command{ Use: "status", - Short: "Print the updater status", + Short: "Print the installer status", Long: ``, RunE: func(cmd *cobra.Command, args []string) error { return statusFxWrapper(&cliParams{ diff --git a/cmd/updater/subcommands/status/command_test.go b/cmd/installer/subcommands/status/command_test.go similarity index 89% rename from cmd/updater/subcommands/status/command_test.go rename to cmd/installer/subcommands/status/command_test.go index 5bef567aa73c0..dbe4db01c78f2 100644 --- a/cmd/updater/subcommands/status/command_test.go +++ b/cmd/installer/subcommands/status/command_test.go @@ -8,7 +8,7 @@ package status import ( "testing" - "github.com/DataDog/datadog-agent/cmd/updater/command" + "github.com/DataDog/datadog-agent/cmd/installer/command" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) diff --git a/cmd/updater/subcommands/status/status.tmpl b/cmd/installer/subcommands/status/status.tmpl similarity index 93% rename from cmd/updater/subcommands/status/status.tmpl rename to cmd/installer/subcommands/status/status.tmpl index 9ce1106c496c0..2ec3dd179deac 100644 --- a/cmd/updater/subcommands/status/status.tmpl +++ b/cmd/installer/subcommands/status/status.tmpl @@ -1,4 +1,4 @@ -Datadog Updater v{{ .Version }} +Datadog Installer v{{ .Version }} {{ range $name, $package := .Packages }} {{ boldText $name }} State: {{ if $package.Stable }}{{ yellowText "unknown (unimplemented)" }}{{ else }} {{ redText "no stable version" }}{{ end }} diff --git a/cmd/installer/subcommands/subcommands.go b/cmd/installer/subcommands/subcommands.go new file mode 100644 index 0000000000000..76e5e917029c0 --- /dev/null +++ b/cmd/installer/subcommands/subcommands.go @@ -0,0 +1,28 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package subcommands contains the installer subcommands +package subcommands + +import ( + "github.com/DataDog/datadog-agent/cmd/installer/command" + "github.com/DataDog/datadog-agent/cmd/installer/subcommands/bootstrap" + "github.com/DataDog/datadog-agent/cmd/installer/subcommands/experiment" + "github.com/DataDog/datadog-agent/cmd/installer/subcommands/purge" + "github.com/DataDog/datadog-agent/cmd/installer/subcommands/run" + "github.com/DataDog/datadog-agent/cmd/installer/subcommands/status" +) + +// InstallerSubcommands returns SubcommandFactories for the subcommands +// supported with the current build flags. +func InstallerSubcommands() []command.SubcommandFactory { + return []command.SubcommandFactory{ + run.Commands, + bootstrap.Commands, + status.Commands, + experiment.Commands, + purge.Commands, + } +} diff --git a/cmd/updater/user_all.go b/cmd/installer/user_all.go similarity index 96% rename from cmd/updater/user_all.go rename to cmd/installer/user_all.go index a8b0defe196d8..cb9d03678a269 100644 --- a/cmd/updater/user_all.go +++ b/cmd/installer/user_all.go @@ -5,7 +5,7 @@ //go:build !windows -// Package main implements 'updater'. +// Package main implements 'installer'. package main import ( diff --git a/cmd/updater/user_windows.go b/cmd/installer/user_windows.go similarity index 87% rename from cmd/updater/user_windows.go rename to cmd/installer/user_windows.go index 7a30415620292..368d0c3b614ea 100644 --- a/cmd/updater/user_windows.go +++ b/cmd/installer/user_windows.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2023-present Datadog, Inc. -// Package main implements 'updater'. +// Package main implements 'installer'. package main func rootToDDInstaller() {} diff --git a/cmd/updater/subcommands/subcommands.go b/cmd/updater/subcommands/subcommands.go deleted file mode 100644 index 7d310736c5ff0..0000000000000 --- a/cmd/updater/subcommands/subcommands.go +++ /dev/null @@ -1,28 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Package subcommands contains the updater subcommands -package subcommands - -import ( - "github.com/DataDog/datadog-agent/cmd/updater/command" - "github.com/DataDog/datadog-agent/cmd/updater/subcommands/bootstrap" - "github.com/DataDog/datadog-agent/cmd/updater/subcommands/experiment" - "github.com/DataDog/datadog-agent/cmd/updater/subcommands/purge" - "github.com/DataDog/datadog-agent/cmd/updater/subcommands/run" - "github.com/DataDog/datadog-agent/cmd/updater/subcommands/status" -) - -// UpdaterSubcommands returns SubcommandFactories for the subcommands -// supported with the current build flags. -func UpdaterSubcommands() []command.SubcommandFactory { - return []command.SubcommandFactory{ - run.Commands, - bootstrap.Commands, - status.Commands, - experiment.Commands, - purge.Commands, - } -} diff --git a/tasks/installer.py b/tasks/installer.py index b99b24d1cac2e..bfc22f312aa9c 100644 --- a/tasks/installer.py +++ b/tasks/installer.py @@ -46,7 +46,7 @@ def build( go_build_tags = " ".join(build_tags) updater_bin = os.path.join(BIN_PATH, bin_name("installer")) cmd = f"go build -mod={go_mod} {race_opt} {build_type} -tags \"{go_build_tags}\" " - cmd += f"-o {updater_bin} -gcflags=\"{gcflags}\" -ldflags=\"{ldflags}\" {REPO_PATH}/cmd/updater" + cmd += f"-o {updater_bin} -gcflags=\"{gcflags}\" -ldflags=\"{ldflags}\" {REPO_PATH}/cmd/installer" ctx.run(cmd, env=env) From 69148dbac696218e4cba69eeacc29c408e3e673e Mon Sep 17 00:00:00 2001 From: Nicolas Schweitzer Date: Mon, 15 Apr 2024 15:58:40 +0200 Subject: [PATCH 47/99] force tag (#24685) --- .github/workflows/codeql-analysis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4092e755a2d2b..c5569f04e53bd 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -43,7 +43,7 @@ jobs: go-version-file: ".go-version" - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@v3.24.10 with: languages: ${{ matrix.language }} setup-python-dependencies: false @@ -62,4 +62,4 @@ jobs: invoke agent.build --build-exclude=systemd - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@v3.24.10 From cb16bd36e51e55eeb1986e421d6e81512c66d5fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Mathieu?= Date: Mon, 15 Apr 2024 16:35:52 +0200 Subject: [PATCH 48/99] tasks+omnibus: use a versioned SDS shared library. (#24668) * tasks+omnibus: use a versioned SDS shared library. * tasks+omnibus: rely on default_version for omnibus + python linting --- omnibus/config/software/sds.rb | 2 +- tasks/sds.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/omnibus/config/software/sds.rb b/omnibus/config/software/sds.rb index b8b6f21ff47f4..0baad07e167be 100644 --- a/omnibus/config/software/sds.rb +++ b/omnibus/config/software/sds.rb @@ -1,6 +1,6 @@ name "sds" -default_version "042de62f5a24fbceb4f4849256c3ee5c005b7057" +default_version "v0.1.0" source git: 'https://github.com/DataDog/dd-sensitive-data-scanner' build do diff --git a/tasks/sds.py b/tasks/sds.py index 1c8989dc7238b..2c6a35de976ee 100644 --- a/tasks/sds.py +++ b/tasks/sds.py @@ -9,6 +9,8 @@ is_windows = sys.platform == "win32" is_darwin = sys.platform == "darwin" +sds_version = "v0.1.0" + @task def build_library(ctx): @@ -22,6 +24,7 @@ def build_library(ctx): with ctx.cd(temp_dir): ctx.run("git clone https://github.com/DataDog/dd-sensitive-data-scanner") with ctx.cd("dd-sensitive-data-scanner/sds-go/rust"): + ctx.run(f"git checkout {sds_version}") ctx.run("cargo build --release") # write the lib besides rtloader libs dev_path = get_dev_path() From 9d36fd11783b3d93a3df035d8f023b9d7111903c Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Mon, 15 Apr 2024 17:49:55 +0200 Subject: [PATCH 49/99] fix ctime/mtime name changes (#24689) --- pkg/security/ebpf/c/include/helpers/filesystem.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/security/ebpf/c/include/helpers/filesystem.h b/pkg/security/ebpf/c/include/helpers/filesystem.h index 75189cd55ac38..1964fd153081f 100644 --- a/pkg/security/ebpf/c/include/helpers/filesystem.h +++ b/pkg/security/ebpf/c/include/helpers/filesystem.h @@ -109,8 +109,18 @@ void __attribute__((always_inline)) fill_file(struct dentry* dentry, struct file bpf_probe_read(&file->metadata.uid, sizeof(file->metadata.uid), &d_inode->i_uid); bpf_probe_read(&file->metadata.gid, sizeof(file->metadata.gid), &d_inode->i_gid); +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) bpf_probe_read(&file->metadata.ctime, sizeof(file->metadata.ctime), &d_inode->i_ctime); +#else + bpf_probe_read(&file->metadata.ctime, sizeof(file->metadata.ctime), &d_inode->__i_ctime); +#endif + + +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 7, 0) bpf_probe_read(&file->metadata.mtime, sizeof(file->metadata.mtime), &d_inode->i_mtime); +#else + bpf_probe_read(&file->metadata.mtime, sizeof(file->metadata.mtime), &d_inode->__i_mtime); +#endif } #define get_dentry_key_path(dentry, path) (struct path_key_t) { .ino = get_dentry_ino(dentry), .mount_id = get_path_mount_id(path) } From d5fff84cccdd964522097d245cc026c3f3c681f1 Mon Sep 17 00:00:00 2001 From: Stanley Liu Date: Mon, 15 Apr 2024 11:58:05 -0400 Subject: [PATCH 50/99] Configure log source name and otel source in logsagentexporter (#24622) * Add config options for logsagentexporter * Fix tests --- .../exporter/logsagentexporter/factory.go | 35 ++++++++++++++++--- .../logsagentexporter/logs_exporter.go | 9 ++--- .../logsagentexporter/logs_exporter_test.go | 26 ++++++++++---- 3 files changed, 53 insertions(+), 17 deletions(-) diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/factory.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/factory.go index c6abdbe1b5897..808cbf5de7c08 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/factory.go +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/factory.go @@ -21,11 +21,20 @@ import ( const ( // TypeStr defines the logsagent exporter type string. - TypeStr = "logsagent" - stability = component.StabilityLevelStable + TypeStr = "logsagent" + stability = component.StabilityLevelStable + // logSourceName specifies the Datadog source tag value to be added to logs sent by the logs agent exporter. logSourceName = "OTLP log ingestion" + // otelSource specifies a source to be added to all logs sent by the logs agent exporter. The tag has key `otel_source` and the value specified on this constant. + otelSource = "datadog_agent" ) +// Config defines configuration for the logs agent exporter. +type Config struct { + otelSource string + logSourceName string +} + type factory struct { logsAgentChannel chan *message.Message } @@ -37,7 +46,12 @@ func NewFactory(logsAgentChannel chan *message.Message) exp.Factory { return exp.NewFactory( cfgType, - func() component.Config { return &struct{}{} }, + func() component.Config { + return &Config{ + otelSource: otelSource, + logSourceName: logSourceName, + } + }, exp.WithLogs(f.createLogsExporter, stability), ) } @@ -47,7 +61,8 @@ func (f *factory) createLogsExporter( set exp.CreateSettings, c component.Config, ) (exp.Logs, error) { - logSource := sources.NewLogSource(logSourceName, &config.LogsConfig{}) + cfg := checkAndCastConfig(c) + logSource := sources.NewLogSource(cfg.logSourceName, &config.LogsConfig{}) // TODO: Ideally the attributes translator would be created once and reused // across all signals. This would need unifying the logsagent and serializer @@ -57,7 +72,7 @@ func (f *factory) createLogsExporter( return nil, err } - exporter, err := newExporter(set.TelemetrySettings, logSource, f.logsAgentChannel, attributesTranslator) + exporter, err := newExporter(set.TelemetrySettings, cfg, logSource, f.logsAgentChannel, attributesTranslator) if err != nil { return nil, err } @@ -75,3 +90,13 @@ func (f *factory) createLogsExporter( }), ) } + +// checkAndCastConfig checks the configuration type and its warnings, and casts it to +// the logs agent exporter Config struct. +func checkAndCastConfig(c component.Config) *Config { + cfg, ok := c.(*Config) + if !ok { + panic("programming error: config structure is not of type *logsagentexporter.Config") + } + return cfg +} diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go index 7e62bfedc5763..4ec06af643768 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go @@ -22,10 +22,6 @@ import ( "go.opentelemetry.io/collector/pdata/plog" ) -// otelSource specifies a source to be added to all logs sent from the Datadog Agent. -// The tag has key `otel_source` and the value specified on this constant. -const otelSource = "datadog_agent" - type exporter struct { set component.TelemetrySettings logsAgentChannel chan *message.Message @@ -35,11 +31,12 @@ type exporter struct { func newExporter( set component.TelemetrySettings, + cfg *Config, logSource *sources.LogSource, logsAgentChannel chan *message.Message, attributesTranslator *attributes.Translator, ) (*exporter, error) { - translator, err := logsmapping.NewTranslator(set, attributesTranslator, otelSource) + translator, err := logsmapping.NewTranslator(set, attributesTranslator, cfg.otelSource) if err != nil { return nil, err } @@ -80,7 +77,7 @@ func (e *exporter) ConsumeLogs(ctx context.Context, ld plog.Logs) (err error) { origin := message.NewOrigin(e.logSource) origin.SetTags(tags) origin.SetService(service) - origin.SetSource(logSourceName) + origin.SetSource(e.logSource.Name) content, err := ddLog.MarshalJSON() if err != nil { diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go index 970d265c930b5..15ed81e54545f 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go @@ -28,7 +28,9 @@ func TestLogsExporter(t *testing.T) { ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) type args struct { - ld plog.Logs + ld plog.Logs + otelSource string + logSourceName string } tests := []struct { name string @@ -39,7 +41,9 @@ func TestLogsExporter(t *testing.T) { { name: "message", args: args{ - ld: lr, + ld: lr, + otelSource: otelSource, + logSourceName: logSourceName, }, want: testutil.JSONLogs{ @@ -70,6 +74,8 @@ func TestLogsExporter(t *testing.T) { ldd.Attributes().PutStr("message", "hello") return lrr }(), + otelSource: otelSource, + logSourceName: logSourceName, }, want: testutil.JSONLogs{ @@ -100,6 +106,8 @@ func TestLogsExporter(t *testing.T) { ldd.Attributes().PutStr("ddtags", "tag1:true") return lrr }(), + otelSource: otelSource, + logSourceName: logSourceName, }, want: testutil.JSONLogs{ @@ -132,6 +140,8 @@ func TestLogsExporter(t *testing.T) { ldd2.Attributes().PutStr("ddtags", "tag1:true") return lrr }(), + otelSource: otelSource, + logSourceName: logSourceName, }, want: testutil.JSONLogs{ @@ -175,6 +185,8 @@ func TestLogsExporter(t *testing.T) { ldd2.Attributes().PutStr("ddtags", "tag2:true") return lrr }(), + otelSource: "datadog_exporter", + logSourceName: "custom_source", }, want: testutil.JSONLogs{ @@ -205,17 +217,19 @@ func TestLogsExporter(t *testing.T) { "resource-attr": "resource-attr-val-1", }, }, - expectedTags: [][]string{{"tag1:true", "otel_source:datadog_agent"}, {"tag2:true", "otel_source:datadog_agent"}}, + expectedTags: [][]string{{"tag1:true", "otel_source:datadog_exporter"}, {"tag2:true", "otel_source:datadog_exporter"}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - cfg := &struct{}{} - testChannel := make(chan *message.Message, 10) params := exportertest.NewNopCreateSettings() f := NewFactory(testChannel) + cfg := &Config{ + otelSource: tt.args.otelSource, + logSourceName: tt.args.logSourceName, + } ctx := context.Background() exp, err := f.CreateLogsExporter(ctx, params, cfg) @@ -227,7 +241,7 @@ func TestLogsExporter(t *testing.T) { output := <-testChannel outputJSON := make(map[string]interface{}) json.Unmarshal(output.GetContent(), &outputJSON) - assert.Equal(t, logSourceName, output.Origin.Source()) + assert.Equal(t, tt.args.logSourceName, output.Origin.Source()) assert.Equal(t, tt.expectedTags[i], output.Origin.Tags(nil)) ans = append(ans, outputJSON) } From 07d7697347bf391d1e8166bd4cb935177d1fee78 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Mon, 15 Apr 2024 18:21:57 +0200 Subject: [PATCH 51/99] Adding oracle.user_sessions metric (#24648) --- .../corechecks/oracle/config/config.go | 6 ++++++ pkg/collector/corechecks/oracle/processes.go | 17 +++++++++++++++ .../oracle/processes_integration_test.go | 21 +++++++++++++++++++ ...oracle-user-sessions-1f5b04e49a7e350c.yaml | 11 ++++++++++ 4 files changed, 55 insertions(+) create mode 100644 pkg/collector/corechecks/oracle/processes_integration_test.go create mode 100644 releasenotes/notes/oracle-user-sessions-1f5b04e49a7e350c.yaml diff --git a/pkg/collector/corechecks/oracle/config/config.go b/pkg/collector/corechecks/oracle/config/config.go index bf1afc45676b3..ce1be8b857281 100644 --- a/pkg/collector/corechecks/oracle/config/config.go +++ b/pkg/collector/corechecks/oracle/config/config.go @@ -70,6 +70,10 @@ type inactiveSessionsConfig struct { Enabled bool `yaml:"enabled"` } +type userSessionsCount struct { + Enabled bool `yaml:"enabled"` +} + //nolint:revive // TODO(DBM) Fix revive linter type SharedMemoryConfig struct { Enabled bool `yaml:"enabled"` @@ -147,6 +151,7 @@ type InstanceConfig struct { Tablespaces TablespacesConfig `yaml:"tablespaces"` ProcessMemory ProcessMemoryConfig `yaml:"process_memory"` InactiveSessions inactiveSessionsConfig `yaml:"inactive_sessions"` + UserSessionsCount userSessionsCount `yaml:"user_sessions_count"` SharedMemory SharedMemoryConfig `yaml:"shared_memory"` ExecutionPlans ExecutionPlansConfig `yaml:"execution_plans"` AgentSQLTrace AgentSQLTrace `yaml:"agent_sql_trace"` @@ -217,6 +222,7 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data instance.ProcessMemory.Enabled = true instance.SharedMemory.Enabled = true instance.InactiveSessions.Enabled = true + instance.UserSessionsCount.Enabled = true instance.Asm.Enabled = true instance.ResourceManager.Enabled = true instance.Locks.Enabled = true diff --git a/pkg/collector/corechecks/oracle/processes.go b/pkg/collector/corechecks/oracle/processes.go index 8f24081d4b400..94c03cc148cca 100644 --- a/pkg/collector/corechecks/oracle/processes.go +++ b/pkg/collector/corechecks/oracle/processes.go @@ -15,6 +15,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/oracle/common" ) +const userSessionsMetricName = common.IntegrationName + ".user_sessions" + const pgaQuery12 = `SELECT c.name as pdb_name, p.pid as pid, p.program as server_process, @@ -49,6 +51,11 @@ const pgaQueryOldIntegration = `SELECT nvl(pga_max_mem,0) pga_max_mem FROM gv$process p` +const sessionCountQuery = `SELECT + count(*) as count +FROM v$session +WHERE type != 'BACKGROUND'` + type sessionTagColumns struct { Sid sql.NullInt64 `db:"SID"` Username sql.NullString `db:"USERNAME"` @@ -136,6 +143,16 @@ func (c *Check) ProcessMemory() error { sendMetric(c, gauge, fmt.Sprintf("%s.session.inactive_seconds", common.IntegrationName), float64(r.LastCallEt.Int64), tags) } } + + if c.config.UserSessionsCount.Enabled && !c.legacyIntegrationCompatibilityMode { + var sessionCount int + err = getWrapper(c, &sessionCount, sessionCountQuery) + if err != nil { + return fmt.Errorf("failed to collect session count: %w", err) + } + sendMetric(c, gauge, userSessionsMetricName, float64(sessionCount), c.tags) + } + sender.Commit() return nil } diff --git a/pkg/collector/corechecks/oracle/processes_integration_test.go b/pkg/collector/corechecks/oracle/processes_integration_test.go new file mode 100644 index 0000000000000..5f54313d045f4 --- /dev/null +++ b/pkg/collector/corechecks/oracle/processes_integration_test.go @@ -0,0 +1,21 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build oracle_test + +package oracle + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUserSessions(t *testing.T) { + c, s := newDefaultCheck(t, "", "") + err := c.Run() + require.NoError(t, err) + s.AssertMetricTaggedWith(t, "Gauge", userSessionsMetricName, []string{}) +} diff --git a/releasenotes/notes/oracle-user-sessions-1f5b04e49a7e350c.yaml b/releasenotes/notes/oracle-user-sessions-1f5b04e49a7e350c.yaml new file mode 100644 index 0000000000000..14e19deadeeef --- /dev/null +++ b/releasenotes/notes/oracle-user-sessions-1f5b04e49a7e350c.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + [oracle] Add ``oracle.user_sessions`` metric. From 4fbf3b89b5ab4345dfa5ec493b060450ddb1f5f1 Mon Sep 17 00:00:00 2001 From: tbavelier <97530782+tbavelier@users.noreply.github.com> Date: Mon, 15 Apr 2024 18:22:04 +0200 Subject: [PATCH 52/99] [Logs collection] Adds `podman` as source for container file-based log collection (#24678) * Adds podman as source for container file-based log collection * Adds podman as source for container file-based log collection * podman file logs unit test * Skip Windows unit test due to WSL layer --- .../launchers/container/tailerfactory/file.go | 2 + .../container/tailerfactory/file_test.go | 45 +++++++++++++++++++ ...file-log-tailing-fix-79df247d84c35305.yaml | 4 ++ 3 files changed, 51 insertions(+) create mode 100644 releasenotes/notes/podman-file-log-tailing-fix-79df247d84c35305.yaml diff --git a/pkg/logs/launchers/container/tailerfactory/file.go b/pkg/logs/launchers/container/tailerfactory/file.go index 06ee43a8aa15e..3bd2c4c02d146 100644 --- a/pkg/logs/launchers/container/tailerfactory/file.go +++ b/pkg/logs/launchers/container/tailerfactory/file.go @@ -61,6 +61,8 @@ func (tf *factory) makeFileSource(source *sources.LogSource) (*sources.LogSource switch source.Config.Type { case "docker": return tf.makeDockerFileSource(source) + case "podman": + return tf.makeDockerFileSource(source) default: return nil, fmt.Errorf("file tailing is not supported for source type %s", source.Config.Type) } diff --git a/pkg/logs/launchers/container/tailerfactory/file_test.go b/pkg/logs/launchers/container/tailerfactory/file_test.go index be7713e82740d..975d43751eb64 100644 --- a/pkg/logs/launchers/container/tailerfactory/file_test.go +++ b/pkg/logs/launchers/container/tailerfactory/file_test.go @@ -130,6 +130,51 @@ func TestMakeFileSource_docker_success(t *testing.T) { require.Equal(t, source.Config.AutoMultiLineMatchThreshold, 0.123) } +func TestMakeFileSource_podman_success(t *testing.T) { + fileTestSetup(t) + mockConfig := coreConfig.Mock(t) + mockConfig.SetWithoutSource("logs_config.use_podman_logs", true) + + // On Windows, podman runs within a Linux virtual machine, so the Agent would believe it runs in a Linux environment with all the paths being nix-like. + // The real path on the system is abstracted by the Windows Subsystem for Linux layer, so this unit test is skipped. + // Ref: https://github.com/containers/podman/blob/main/docs/tutorials/podman-for-windows.md + if runtime.GOOS == "windows" { + t.Skip("Skip on Windows due to WSL file path abstraction") + } + + p := filepath.Join(podmanLogsBasePath, filepath.FromSlash("storage/overlay-containers/abc/userdata/ctr.log")) + require.NoError(t, os.MkdirAll(filepath.Dir(p), 0o777)) + require.NoError(t, os.WriteFile(p, []byte("{}"), 0o666)) + + tf := &factory{ + pipelineProvider: pipeline.NewMockProvider(), + cop: containersorpods.NewDecidedChooser(containersorpods.LogContainers), + } + source := sources.NewLogSource("test", &config.LogsConfig{ + Type: "podman", + Identifier: "abc", + Source: "src", + Service: "svc", + Tags: []string{"tag!"}, + AutoMultiLine: pointer.Ptr(true), + AutoMultiLineSampleSize: 321, + AutoMultiLineMatchThreshold: 0.321, + }) + child, err := tf.makeFileSource(source) + require.NoError(t, err) + require.Equal(t, source.Name, child.Name) + require.Equal(t, "file", child.Config.Type) + require.Equal(t, source.Config.Identifier, child.Config.Identifier) + require.Equal(t, p, child.Config.Path) + require.Equal(t, source.Config.Source, child.Config.Source) + require.Equal(t, source.Config.Service, child.Config.Service) + require.Equal(t, source.Config.Tags, child.Config.Tags) + require.Equal(t, sources.DockerSourceType, child.GetSourceType()) + require.Equal(t, *source.Config.AutoMultiLine, true) + require.Equal(t, source.Config.AutoMultiLineSampleSize, 321) + require.Equal(t, source.Config.AutoMultiLineMatchThreshold, 0.321) +} + func TestMakeFileSource_docker_no_file(t *testing.T) { fileTestSetup(t) diff --git a/releasenotes/notes/podman-file-log-tailing-fix-79df247d84c35305.yaml b/releasenotes/notes/podman-file-log-tailing-fix-79df247d84c35305.yaml new file mode 100644 index 0000000000000..6c3c5bd657600 --- /dev/null +++ b/releasenotes/notes/podman-file-log-tailing-fix-79df247d84c35305.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixes containers file-based log collection when using the ``k8s-file`` podman log driver and the ``logs_config.use_podman_logs`` parameter. From 49706a25639a3ff86391913cab2b0e9c256086bf Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Mon, 15 Apr 2024 18:46:16 +0200 Subject: [PATCH 53/99] Update deb 10 AMI with fix for buster backports (#24673) * Update deb 10 AMI with fix for buster backports * Update kitchen and new-e2e deb 10 ami * Update arm armi as well * Fix debian x86_64 ami --- test/kitchen/platforms.json | 6 +++--- test/new-e2e/tests/agent-platform/platforms/platforms.json | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/kitchen/platforms.json b/test/kitchen/platforms.json index 18f0d2a5dfb8c..614918e3870d2 100644 --- a/test/kitchen/platforms.json +++ b/test/kitchen/platforms.json @@ -40,12 +40,12 @@ }, "ec2": { "x86_64": { - "debian-10": "ami-041540a5c191757a0", + "debian-10": "ami-0b94008ae0e4512b8", "debian-11": "ami-0607e701db389efe7", "debian-12": "ami-07edaec601cf2b6d3" }, "arm64": { - "debian-10": "ami-0108ade0d057c8eba", + "debian-10": "ami-0458baca856015b8d", "debian-11": "ami-00988b9ead6afb0b1", "debian-12": "ami-02aab8d5301cb8d68" } @@ -172,4 +172,4 @@ } } } -} \ No newline at end of file +} diff --git a/test/new-e2e/tests/agent-platform/platforms/platforms.json b/test/new-e2e/tests/agent-platform/platforms/platforms.json index 230e80a89fded..6fc8f6114cb6f 100644 --- a/test/new-e2e/tests/agent-platform/platforms/platforms.json +++ b/test/new-e2e/tests/agent-platform/platforms/platforms.json @@ -2,12 +2,12 @@ "debian": { "x86_64": { "debian-9": "ami-0182559468c1975fe", - "debian-10": "ami-041540a5c191757a0", + "debian-10": "ami-0b94008ae0e4512b8", "debian-11": "ami-0607e701db389efe7", "debian-12": "ami-07edaec601cf2b6d3" }, "arm64": { - "debian-10": "ami-0108ade0d057c8eba", + "debian-10": "ami-0458baca856015b8d", "debian-11": "ami-00988b9ead6afb0b1", "debian-12": "ami-02aab8d5301cb8d68" } @@ -68,4 +68,4 @@ "sles-15": "ami-0d446ba26bbe19573" } } -} \ No newline at end of file +} From d39f2f1b0f2cee9797be560d0f64ec21e4d97b49 Mon Sep 17 00:00:00 2001 From: Baptiste Foy Date: Mon, 15 Apr 2024 18:46:23 +0200 Subject: [PATCH 54/99] fix(installer): Fix dockerhub rate limiting in E2E tests (#24676) Co-authored-by: raphaelgavache --- omnibus/package-scripts/updater-deb/postinst | 2 +- omnibus/package-scripts/updater-rpm/posttrans | 2 +- pkg/installer/service/apm_inject.go | 2 +- test/new-e2e/tests/updater/docker.go | 4 +- test/new-e2e/tests/updater/linux_test.go | 46 +++++++++---------- 5 files changed, 27 insertions(+), 29 deletions(-) diff --git a/omnibus/package-scripts/updater-deb/postinst b/omnibus/package-scripts/updater-deb/postinst index b0235efd31fd1..95cb33fa154ca 100644 --- a/omnibus/package-scripts/updater-deb/postinst +++ b/omnibus/package-scripts/updater-deb/postinst @@ -82,6 +82,6 @@ fi chmod 750 ${HELPER} setcap cap_setuid+ep ${HELPER} -$BOOTSTRAP_INSTALLER bootstrap --url "oci://docker.io/datadog/installer-package-dev:latest" +$BOOTSTRAP_INSTALLER bootstrap --url "oci://public.ecr.aws/datadog/installer-package:latest" exit 0 diff --git a/omnibus/package-scripts/updater-rpm/posttrans b/omnibus/package-scripts/updater-rpm/posttrans index 1b0ab887e3838..54e71f22cc124 100644 --- a/omnibus/package-scripts/updater-rpm/posttrans +++ b/omnibus/package-scripts/updater-rpm/posttrans @@ -65,7 +65,7 @@ fi chmod 750 ${HELPER} setcap cap_setuid+ep ${HELPER} -$BOOTSTRAP_INSTALLER bootstrap --url "oci://docker.io/datadog/installer-package-dev:latest" +$BOOTSTRAP_INSTALLER bootstrap --url "oci://public.ecr.aws/datadog/installer-package:latest" exit 0 diff --git a/pkg/installer/service/apm_inject.go b/pkg/installer/service/apm_inject.go index 4982b61c2a826..d1d29ff1931f9 100644 --- a/pkg/installer/service/apm_inject.go +++ b/pkg/installer/service/apm_inject.go @@ -304,7 +304,7 @@ func (a *apmInjectorInstaller) deleteAgentConfig() (err error) { return nil } - err = os.WriteFile(datadogConfigPath, content, 0644) + err = os.WriteFile(datadogConfigPath, newContent, 0644) if err != nil { return err } diff --git a/test/new-e2e/tests/updater/docker.go b/test/new-e2e/tests/updater/docker.go index 3762f53f589d4..51e45f07febb6 100644 --- a/test/new-e2e/tests/updater/docker.go +++ b/test/new-e2e/tests/updater/docker.go @@ -80,12 +80,10 @@ sudo systemctl start docker // and make a call to it func launchJavaDockerContainer(t *testing.T, host *components.RemoteHost) { host.MustExecute(`sudo docker run -d -p8887:8888 baptistefoy702/message-server:latest`) - // for i := 0; i < 10; i++ { assert.Eventually(t, func() bool { _, err := host.Execute(`curl -m 1 localhost:8887/messages`) return err == nil - }, 10*time.Second, 100*time.Millisecond, + }, 30*time.Second, 100*time.Millisecond, ) - // } } diff --git a/test/new-e2e/tests/updater/linux_test.go b/test/new-e2e/tests/updater/linux_test.go index 6a5821abcaf99..10b349f9759c7 100644 --- a/test/new-e2e/tests/updater/linux_test.go +++ b/test/new-e2e/tests/updater/linux_test.go @@ -54,22 +54,23 @@ func runTest(t *testing.T, pkgManager string, arch os.Architecture, distro os.De } func TestCentOSARM(t *testing.T) { - t.Parallel() + // t.Parallel() runTest(t, "rpm", os.AMD64Arch, os.CentOSDefault) } func TestRedHatARM(t *testing.T) { - t.Parallel() + t.Skip("Support for SELinux has not been added yet") + // t.Parallel() runTest(t, "rpm", os.ARM64Arch, os.RedHatDefault) } func TestUbuntuARM(t *testing.T) { - t.Parallel() + // t.Parallel() runTest(t, "dpkg", os.ARM64Arch, os.UbuntuDefault) } func TestDebianX86(t *testing.T) { - t.Parallel() + // t.Parallel() runTest(t, "dpkg", os.AMD64Arch, os.DebianDefault) } @@ -108,7 +109,6 @@ func (v *vmUpdaterSuite) TestInstallerUnitLoaded() { func (v *vmUpdaterSuite) TestAgentUnitsLoaded() { t := v.T() - t.Skip("FIXME(Arthur): dockerhub rate limits make this test flaky") stableUnits := []string{ "datadog-agent.service", "datadog-agent-trace.service", @@ -116,7 +116,8 @@ func (v *vmUpdaterSuite) TestAgentUnitsLoaded() { "datadog-agent-sysprobe.service", "datadog-agent-security.service", } - v.Env().RemoteHost.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://docker.io/datadog/agent-package-dev@sha256:d86138d88b407cf5ef75bccb3e0bc492ce6e3e3dfa9d3a64d2387d3b350fe5c4"`, bootUpdaterDir)) + addEcrConfig(v.Env().RemoteHost) + v.Env().RemoteHost.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://669783387624.dkr.ecr.us-east-1.amazonaws.com/dockerhub/datadog/agent-package-dev@sha256:d86138d88b407cf5ef75bccb3e0bc492ce6e3e3dfa9d3a64d2387d3b350fe5c4"`, bootUpdaterDir)) for _, unit := range stableUnits { require.Equal(t, "enabled\n", v.Env().RemoteHost.MustExecute(fmt.Sprintf(`systemctl is-enabled %s`, unit))) } @@ -124,10 +125,10 @@ func (v *vmUpdaterSuite) TestAgentUnitsLoaded() { func (v *vmUpdaterSuite) TestExperimentCrash() { t := v.T() - t.Skip("FIXME(Arthur): dockerhub rate limits make this test flaky") host := v.Env().RemoteHost + addEcrConfig(host) startTime := getMonotonicTimestamp(t, host) - host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://docker.io/datadog/agent-package-dev@sha256:d86138d88b407cf5ef75bccb3e0bc492ce6e3e3dfa9d3a64d2387d3b350fe5c4"`, bootUpdaterDir)) + host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://669783387624.dkr.ecr.us-east-1.amazonaws.com/dockerhub/datadog/agent-package-dev@sha256:d86138d88b407cf5ef75bccb3e0bc492ce6e3e3dfa9d3a64d2387d3b350fe5c4"`, bootUpdaterDir)) v.Env().RemoteHost.MustExecute(`sudo systemctl start datadog-agent-exp --no-block`) res := getJournalDOnCondition(t, host, startTime, stopCondition([]JournaldLog{ {Unit: "datadog-agent.service", Message: "Started"}, @@ -142,9 +143,8 @@ func (v *vmUpdaterSuite) TestExperimentCrash() { } func (v *vmUpdaterSuite) TestPurgeAndInstallAgent() { - t := v.T() - t.Skip("FIXME(Arthur): dockerhub rate limits make this test flaky") host := v.Env().RemoteHost + addEcrConfig(host) host.MustExecute(fmt.Sprintf("sudo %v/bin/installer/installer purge", bootUpdaterDir)) stableUnits := []string{ "datadog-agent.service", @@ -179,7 +179,7 @@ func (v *vmUpdaterSuite) TestPurgeAndInstallAgent() { } // bootstrap - host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://docker.io/datadog/agent-package-dev@sha256:d86138d88b407cf5ef75bccb3e0bc492ce6e3e3dfa9d3a64d2387d3b350fe5c4"`, bootUpdaterDir)) + host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://669783387624.dkr.ecr.us-east-1.amazonaws.com/dockerhub/datadog/agent-package-dev@sha256:d86138d88b407cf5ef75bccb3e0bc492ce6e3e3dfa9d3a64d2387d3b350fe5c4"`, bootUpdaterDir)) // assert agent symlink _ = host.MustExecute(`test -L /usr/bin/datadog-agent`) @@ -210,12 +210,8 @@ func (v *vmUpdaterSuite) TestPurgeAndInstallAgent() { func (v *vmUpdaterSuite) TestPurgeAndInstallAPMInjector() { // Temporarily disable CentOS & Redhat, as there is a bug in the APM injector - if v.distro == os.CentOSDefault || v.distro == os.RedHatDefault { - v.T().Skip("APM injector not available for CentOS or RedHat yet") - } - if v.distro == os.DebianDefault || v.distro == os.UbuntuDefault && v.arch == os.AMD64Arch { - // TODO (baptiste): Fix test - v.T().Skip("Test has been temporarily disabled") + if v.distro == os.CentOSDefault { + v.T().Skip("APM injector not available for CentOS yet") } host := v.Env().RemoteHost @@ -223,7 +219,7 @@ func (v *vmUpdaterSuite) TestPurgeAndInstallAPMInjector() { /////////////////// // Setup machine // /////////////////// - + addEcrConfig(host) host.MustExecute(fmt.Sprintf("sudo %v/bin/installer/installer purge", bootUpdaterDir)) // Install docker installDocker(v.distro, v.T(), host) @@ -257,9 +253,9 @@ func (v *vmUpdaterSuite) TestPurgeAndInstallAPMInjector() { // Bootstrap packages // //////////////////////// - host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://docker.io/datadog/agent-package-dev:7.54.0-devel.git.247.f92fbc1.pipeline.31778392-1"`, bootUpdaterDir)) - host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://docker.io/datadog/apm-library-java-package-dev:1.32.0-SNAPSHOT-8708864e8e-pipeline.30373268.beta.8708864e-1"`, bootUpdaterDir)) - host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://docker.io/datadog/apm-inject-package-dev:0.12.3-dev.bddec85.glci481808135.g8acdc698-1"`, bootUpdaterDir)) + host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://669783387624.dkr.ecr.us-east-1.amazonaws.com/dockerhub/datadog/agent-package-dev:7.54.0-devel.git.247.f92fbc1.pipeline.31778392-1"`, bootUpdaterDir)) + host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://669783387624.dkr.ecr.us-east-1.amazonaws.com/dockerhub/datadog/apm-library-java-package-dev:1.32.0-SNAPSHOT-8708864e8e-pipeline.30373268.beta.8708864e-1"`, bootUpdaterDir)) + host.MustExecute(fmt.Sprintf(`sudo %v/bin/installer/installer bootstrap --url "oci://669783387624.dkr.ecr.us-east-1.amazonaws.com/dockerhub/datadog/apm-inject-package-dev:0.12.3-dev.bddec85.glci481808135.g8acdc698-1"`, bootUpdaterDir)) //////////////////////////////// // Check post-bootstrap state // @@ -340,8 +336,8 @@ func (v *vmUpdaterSuite) TestPurgeAndInstallAPMInjector() { require.NotNil(v.T(), err) _, err = host.Execute(`grep "/opt/datadog-packages/datadog-apm-inject" /etc/docker/daemon.json`) require.NotNil(v.T(), err) - _, err = host.Execute(`test -f /etc/docker/daemon.json.bak`) - require.NotNil(v.T(), err) + res, err = host.Execute("grep \"LD PRELOAD CONFIG\" /etc/datadog-agent/datadog.yaml") + require.NotNil(v.T(), err, "expected no LD PRELOAD CONFIG in agent config, got:\n%s", res) } func assertInstallMethod(v *vmUpdaterSuite, t *testing.T, host *components.RemoteHost) { @@ -355,6 +351,10 @@ func assertInstallMethod(v *vmUpdaterSuite, t *testing.T, host *components.Remot assert.True(t, "" != config.InstallMethod["tool_version"]) } +func addEcrConfig(host *components.RemoteHost) { + host.MustExecute(fmt.Sprintf("cat %s/datadog.yaml | grep registry_auth || echo \"\nupdater:\n registry_auth: ecr\" | sudo tee -a %s/datadog.yaml", confDir, confDir)) +} + // Config yaml struct type Config struct { InstallMethod map[string]string `yaml:"install_method"` From 7293d285e88ac7ee355aef17c5822d72f6af0697 Mon Sep 17 00:00:00 2001 From: Olivier G <52180542+ogaca-dd@users.noreply.github.com> Date: Mon, 15 Apr 2024 18:47:21 +0200 Subject: [PATCH 55/99] [ASCII-1338] Create container tagger (#24168) * Create agent bundle * Fix lint component * Add agent.Bundle() to getSharedFxOption to avoid code conflict * Create containertagger component * Rename containertagger to cloudfoundrycontainer * Update cmd/agent/subcommands/run/command_windows.go Co-authored-by: Gustavo Caso * Fix component README.md * Fix code conflicts * Fix Windows build * Add lifecycle to cloundfoundrycontainer component --------- Co-authored-by: Gustavo Caso --- .github/CODEOWNERS | 1 + cmd/agent/subcommands/run/command.go | 16 ++--- cmd/agent/subcommands/run/command_windows.go | 3 + comp/README.md | 6 ++ comp/agent/bundle.go | 2 + comp/agent/bundle_test.go | 3 + .../cloudfoundrycontainer.go | 59 +++++++++++++++++++ comp/agent/cloudfoundrycontainer/component.go | 12 ++++ 8 files changed, 90 insertions(+), 12 deletions(-) create mode 100644 comp/agent/cloudfoundrycontainer/cloudfoundrycontainerimpl/cloudfoundrycontainer.go create mode 100644 comp/agent/cloudfoundrycontainer/component.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1e2d295eca54c..31925e1e39eeb 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -257,6 +257,7 @@ /comp/systray @DataDog/windows-agent /comp/trace @DataDog/agent-apm /comp/updater @DataDog/fleet +/comp/agent/cloudfoundrycontainer @DataDog/platform-integrations /comp/agent/jmxlogger @DataDog/agent-metrics-logs /comp/aggregator/diagnosesendermanager @DataDog/agent-shared-components /comp/checks/agentcrashdetect @DataDog/windows-kernel-integrations diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index b5938a2d42531..33a95cc203d9a 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -34,7 +34,6 @@ import ( // core components "github.com/DataDog/datadog-agent/comp/agent/autoexit" - "github.com/DataDog/datadog-agent/comp/agent/expvarserver" "github.com/DataDog/datadog-agent/comp/agent/jmxlogger" "github.com/DataDog/datadog-agent/comp/agent/jmxlogger/jmxloggerimpl" @@ -60,6 +59,7 @@ import ( "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" "github.com/DataDog/datadog-agent/comp/agent" + "github.com/DataDog/datadog-agent/comp/agent/cloudfoundrycontainer" "github.com/DataDog/datadog-agent/comp/agent/metadatascheduler" "github.com/DataDog/datadog-agent/comp/core/healthprobe" "github.com/DataDog/datadog-agent/comp/core/healthprobe/healthprobeimpl" @@ -112,7 +112,6 @@ import ( "github.com/DataDog/datadog-agent/comp/snmptraps" snmptrapsServer "github.com/DataDog/datadog-agent/comp/snmptraps/server" traceagentStatusImpl "github.com/DataDog/datadog-agent/comp/trace/status/statusimpl" - "github.com/DataDog/datadog-agent/pkg/cloudfoundry/containertagger" pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/net" @@ -227,6 +226,7 @@ func run(log log.Component, _ packagesigning.Component, statusComponent status.Component, collector collector.Component, + cloudfoundrycontainer cloudfoundrycontainer.Component, _ expvarserver.Component, _ pid.Component, metadatascheduler metadatascheduler.Component, @@ -296,6 +296,7 @@ func run(log log.Component, invChecks, statusComponent, collector, + cloudfoundrycontainer, metadatascheduler, jmxlogger, settings, @@ -468,6 +469,7 @@ func startAgent( invChecks inventorychecks.Component, _ status.Component, collector collector.Component, + _ cloudfoundrycontainer.Component, _ metadatascheduler.Component, jmxLogger jmxlogger.Component, settings settings.Component, @@ -523,16 +525,6 @@ func startAgent( } } - // start the cloudfoundry container tagger - if pkgconfig.IsFeaturePresent(pkgconfig.CloudFoundry) && !pkgconfig.Datadog.GetBool("cloud_foundry_buildpack") { - containerTagger, err := containertagger.NewContainerTagger(wmeta) - if err != nil { - log.Errorf("Failed to create Cloud Foundry container tagger: %v", err) - } else { - containerTagger.Start(ctx) - } - } - // start the cmd HTTP server if err = agentAPI.StartServer( wmeta, diff --git a/cmd/agent/subcommands/run/command_windows.go b/cmd/agent/subcommands/run/command_windows.go index 9b61ecc2a8e4c..538eb62865194 100644 --- a/cmd/agent/subcommands/run/command_windows.go +++ b/cmd/agent/subcommands/run/command_windows.go @@ -14,6 +14,7 @@ import ( _ "net/http/pprof" // Blank import used because this isn't directly used in this file "github.com/DataDog/datadog-agent/comp/agent/autoexit" + "github.com/DataDog/datadog-agent/comp/agent/cloudfoundrycontainer" "github.com/DataDog/datadog-agent/comp/agent/expvarserver" "github.com/DataDog/datadog-agent/comp/agent/jmxlogger" "github.com/DataDog/datadog-agent/comp/agent/metadatascheduler" @@ -119,6 +120,7 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error pkgSigning packagesigning.Component, statusComponent status.Component, collector collector.Component, + cloudfoundrycontainer cloudfoundrycontainer.Component, _ autoexit.Component, _ expvarserver.Component, metadatascheduler metadatascheduler.Component, @@ -148,6 +150,7 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error invChecks, statusComponent, collector, + cloudfoundrycontainer, metadatascheduler, jmxlogger, settings, diff --git a/comp/README.md b/comp/README.md index 95a593d9a2a80..3df8c7f5ced7b 100644 --- a/comp/README.md +++ b/comp/README.md @@ -14,6 +14,12 @@ Package agent implements the "agent" bundle, Package autoexit lets setup automatic shutdown mechanism if necessary +### [comp/agent/cloudfoundrycontainer](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/agent/cloudfoundrycontainer) + +*Datadog Team*: platform-integrations + +Package cloudfoundrycontainer provides the cloud foundry container component. + ### [comp/agent/expvarserver](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/agent/expvarserver) Package expvarserver contains the component type for the expVar server. diff --git a/comp/agent/bundle.go b/comp/agent/bundle.go index 05b0aa6485a29..1692114ca6efc 100644 --- a/comp/agent/bundle.go +++ b/comp/agent/bundle.go @@ -8,6 +8,7 @@ package agent import ( "github.com/DataDog/datadog-agent/comp/agent/autoexit/autoexitimpl" + "github.com/DataDog/datadog-agent/comp/agent/cloudfoundrycontainer/cloudfoundrycontainerimpl" "github.com/DataDog/datadog-agent/comp/agent/expvarserver/expvarserverimpl" "github.com/DataDog/datadog-agent/comp/agent/jmxlogger/jmxloggerimpl" "github.com/DataDog/datadog-agent/comp/agent/metadatascheduler/metadataschedulerimpl" @@ -23,5 +24,6 @@ func Bundle() fxutil.BundleOptions { metadataschedulerimpl.Module(), jmxloggerimpl.Module(), expvarserverimpl.Module(), + cloudfoundrycontainerimpl.Module(), ) } diff --git a/comp/agent/bundle_test.go b/comp/agent/bundle_test.go index 6a20c7ec6a9d8..372e4e99444d6 100644 --- a/comp/agent/bundle_test.go +++ b/comp/agent/bundle_test.go @@ -13,6 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/agent/jmxlogger/jmxloggerimpl" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core" + "github.com/DataDog/datadog-agent/comp/core/workloadmeta" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" @@ -31,5 +32,7 @@ func TestBundleDependencies(t *testing.T) { demultiplexerimpl.Module(), fx.Supply(demultiplexerimpl.NewDefaultParams()), fx.Supply(jmxloggerimpl.NewDefaultParams()), + workloadmeta.MockModule(), + fx.Supply(workloadmeta.NewParams()), ) } diff --git a/comp/agent/cloudfoundrycontainer/cloudfoundrycontainerimpl/cloudfoundrycontainer.go b/comp/agent/cloudfoundrycontainer/cloudfoundrycontainerimpl/cloudfoundrycontainer.go new file mode 100644 index 0000000000000..9585067458c39 --- /dev/null +++ b/comp/agent/cloudfoundrycontainer/cloudfoundrycontainerimpl/cloudfoundrycontainer.go @@ -0,0 +1,59 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package cloudfoundrycontainerimpl provides the implementation of the cloud foundry container component. +package cloudfoundrycontainerimpl + +import ( + "context" + + "go.uber.org/fx" + + "github.com/DataDog/datadog-agent/comp/agent/cloudfoundrycontainer" + "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/workloadmeta" + cloudfoundrycontainertagger "github.com/DataDog/datadog-agent/pkg/cloudfoundry/containertagger" + pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgcommon "github.com/DataDog/datadog-agent/pkg/util/common" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// Module defines the fx options for this component. +func Module() fxutil.Module { + return fxutil.Component( + fx.Provide(newCloudfoundryContainer), + ) +} + +type dependencies struct { + fx.In + Config config.Component // Don't remove Config as it must be loaded before using IsFeaturePresent + WMeta workloadmeta.Component + LC fx.Lifecycle +} + +func newCloudfoundryContainer(deps dependencies) cloudfoundrycontainer.Component { + // start the cloudfoundry container tagger + if pkgconfig.IsFeaturePresent(pkgconfig.CloudFoundry) && !deps.Config.GetBool("cloud_foundry_buildpack") { + containerTagger, err := cloudfoundrycontainertagger.NewContainerTagger(deps.WMeta) + if err != nil { + log.Errorf("Failed to create Cloud Foundry container tagger: %v", err) + } else { + ctx, cancel := pkgcommon.GetMainCtxCancel() + deps.LC.Append(fx.Hook{ + OnStart: func(_ context.Context) error { + containerTagger.Start(ctx) + return nil + }, + OnStop: func(_ context.Context) error { + cancel() + return nil + }, + }) + } + } + return struct{}{} +} diff --git a/comp/agent/cloudfoundrycontainer/component.go b/comp/agent/cloudfoundrycontainer/component.go new file mode 100644 index 0000000000000..36e070dad333c --- /dev/null +++ b/comp/agent/cloudfoundrycontainer/component.go @@ -0,0 +1,12 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package cloudfoundrycontainer provides the cloud foundry container component. +package cloudfoundrycontainer + +// team: platform-integrations + +// Component is the component type. +type Component interface{} From be4cc6fe0b84c2c07d5ab815627ca59134c54d78 Mon Sep 17 00:00:00 2001 From: Hasan Mahmood <6599778+hmahmood@users.noreply.github.com> Date: Mon, 15 Apr 2024 11:56:48 -0500 Subject: [PATCH 56/99] [NPM-3228] Add back trace_pipe logging to debug test failures (#24654) * Log trace_pipe during conntrack tests * Use size of conntrack tuple struct to not offset guess reply tuple offset * Remove trace_pipe logs * Add back trace_pipe logging to debug test failures --- pkg/network/tracer/offsetguess/conntrack.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/network/tracer/offsetguess/conntrack.go b/pkg/network/tracer/offsetguess/conntrack.go index 1d8a5b04c60b8..6a9b6be598944 100644 --- a/pkg/network/tracer/offsetguess/conntrack.go +++ b/pkg/network/tracer/offsetguess/conntrack.go @@ -166,9 +166,9 @@ func (c *conntrackOffsetGuesser) checkAndUpdateCurrentOffset(mp *maps.GenericMap log.Debugf("Successfully guessed %v with offset of %d bytes", "ino", c.status.Offset_ino) return c.setReadyState(mp) } + log.Tracef("%v %d does not match expected %d, incrementing offset netns: %d, ino: %d", + whatString[GuessWhat(c.status.What)], c.status.Netns, expected.netns, c.status.Offset_netns, c.status.Offset_ino) c.status.Offset_ino++ - log.Tracef("%v %d does not match expected %d, incrementing offset %d", - whatString[GuessWhat(c.status.What)], c.status.Netns, expected.netns, c.status.Offset_netns) if c.status.Err != 0 || c.status.Offset_ino >= threshold { c.status.Offset_ino = 0 c.status.Offset_netns++ From 1777c66161b35bbb9043b9d7821af9ed04c0d774 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Mon, 15 Apr 2024 20:03:37 +0200 Subject: [PATCH 57/99] [CWS] allow providing a base branch to target for the btfhub sync PR (#24690) * allow providing a base branch to target for the btfhub sync PR * support force refresh --- .github/workflows/cws-btfhub-sync.yml | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cws-btfhub-sync.yml b/.github/workflows/cws-btfhub-sync.yml index 141379f07d3c8..5c4d279db7d36 100644 --- a/.github/workflows/cws-btfhub-sync.yml +++ b/.github/workflows/cws-btfhub-sync.yml @@ -2,6 +2,17 @@ name: "CWS BTFHub constants sync" on: workflow_dispatch: + inputs: + base_branch: + description: 'Base branch to target' + required: false + default: 'main' + type: string + force_refresh: + description: 'Force refresh of the constants' + required: false + default: 'false' + type: boolean schedule: - cron: '30 4 * * 5' # at 4:30 UTC on Friday @@ -16,6 +27,8 @@ jobs: - name: Checkout datadog-agent repository uses: actions/checkout@v4 + with: + ref: ${{ inputs.base_branch || 'main' }} - name: Checkout btfhub-archive repository uses: actions/checkout@v4 @@ -47,7 +60,7 @@ jobs: - name: Sync constants run: | - inv -e security-agent.generate-btfhub-constants --archive-path=./dev/dist/archive + inv -e security-agent.generate-btfhub-constants --archive-path=./dev/dist/archive ${{ inputs.force_refresh && '--force-refresh' || '' }} - uses: stefanzweifel/git-auto-commit-action@v5 id: commit-creator @@ -69,7 +82,7 @@ jobs: owner, repo, head: '${{ steps.branch-name.outputs.BRANCH_NAME }}', - base: 'main', + base: '${{ inputs.base_branch || 'main' }}', body: [ '### What does this PR do?', 'This PR syncs the BTFHub constants used by CWS', From 112c636533b7ad352bcec605b86d80a9a65c8e19 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Mon, 15 Apr 2024 20:39:43 +0200 Subject: [PATCH 58/99] [CWS] split credentials and capabilities fields when reading from inode (#24691) * split creds and caps * generate constants `creds_cap_inheritable_offset` * fix fallback values * fix fallback computation --- .../ebpf/c/include/hooks/commit_creds.h | 13 +- .../probe/constantfetch/btfhub/constants.json | 202 ++++++++++++++++++ .../probe/constantfetch/constant_names.go | 1 + pkg/security/probe/constantfetch/fallback.go | 6 + pkg/security/probe/probe_ebpf.go | 1 + 5 files changed, 220 insertions(+), 3 deletions(-) diff --git a/pkg/security/ebpf/c/include/hooks/commit_creds.h b/pkg/security/ebpf/c/include/hooks/commit_creds.h index 4a8afe3b2f90d..16506e912c275 100644 --- a/pkg/security/ebpf/c/include/hooks/commit_creds.h +++ b/pkg/security/ebpf/c/include/hooks/commit_creds.h @@ -238,7 +238,9 @@ struct __attribute__((__packed__)) cred_ids { kgid_t egid; kuid_t fsuid; kgid_t fsgid; - unsigned securebits; +}; + +struct __attribute__((__packed__)) cred_caps { kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; @@ -251,6 +253,11 @@ int hook_commit_creds(ctx_t *ctx) { u64 creds_uid_offset; LOAD_CONSTANT("creds_uid_offset", creds_uid_offset); struct cred_ids *credentials = (struct cred_ids *)(CTX_PARM1(ctx) + creds_uid_offset); + + u64 creds_cap_inheritable_offset; + LOAD_CONSTANT("creds_cap_inheritable_offset", creds_cap_inheritable_offset); + struct cred_caps *capabilities = (struct cred_caps *)(CTX_PARM1(ctx) + creds_cap_inheritable_offset); + struct pid_cache_t new_pid_entry = {}; // update pid_cache entry for the current process @@ -270,8 +277,8 @@ int hook_commit_creds(ctx_t *ctx) { bpf_probe_read(&pid_entry->credentials.egid, sizeof(pid_entry->credentials.egid), &credentials->egid); bpf_probe_read(&pid_entry->credentials.fsuid, sizeof(pid_entry->credentials.fsuid), &credentials->fsuid); bpf_probe_read(&pid_entry->credentials.fsgid, sizeof(pid_entry->credentials.fsgid), &credentials->fsgid); - bpf_probe_read(&pid_entry->credentials.cap_effective, sizeof(pid_entry->credentials.cap_effective), &credentials->cap_effective); - bpf_probe_read(&pid_entry->credentials.cap_permitted, sizeof(pid_entry->credentials.cap_permitted), &credentials->cap_permitted); + bpf_probe_read(&pid_entry->credentials.cap_effective, sizeof(pid_entry->credentials.cap_effective), &capabilities->cap_effective); + bpf_probe_read(&pid_entry->credentials.cap_permitted, sizeof(pid_entry->credentials.cap_permitted), &capabilities->cap_permitted); if (new_entry) { bpf_map_update_elem(&pid_cache, &pid, &new_pid_entry, BPF_ANY); diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants.json index 4da99a3c0375a..4bdf6a904c52c 100644 --- a/pkg/security/probe/constantfetch/btfhub/constants.json +++ b/pkg/security/probe/constantfetch/btfhub/constants.json @@ -9,6 +9,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -49,6 +50,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -89,6 +91,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -129,6 +132,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -170,6 +174,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, "creds_uid_offset": 8, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -211,6 +216,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -251,6 +257,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -291,6 +298,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -331,6 +339,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -372,6 +381,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -412,6 +422,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, "creds_uid_offset": 8, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -453,6 +464,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -490,6 +502,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -527,6 +540,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -567,6 +581,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -607,6 +622,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -647,6 +663,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -688,6 +705,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -728,6 +746,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, "creds_uid_offset": 8, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -769,6 +788,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -805,6 +825,7 @@ "binprm_file_offset": 168, "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -841,6 +862,7 @@ "binprm_file_offset": 168, "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -876,6 +898,7 @@ "binprm_file_offset": 168, "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -913,6 +936,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -952,6 +976,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -991,6 +1016,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1033,6 +1059,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1073,6 +1100,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1113,6 +1141,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1153,6 +1182,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1193,6 +1223,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1235,6 +1266,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1276,6 +1308,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1311,6 +1344,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1347,6 +1381,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1389,6 +1424,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1431,6 +1467,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1473,6 +1510,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1515,6 +1553,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1558,6 +1597,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1599,6 +1639,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1640,6 +1681,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1683,6 +1725,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1724,6 +1767,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1767,6 +1811,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1810,6 +1855,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1853,6 +1899,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 152, "file_f_inode_offset": 32, @@ -1896,6 +1943,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -1939,6 +1987,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 152, "file_f_inode_offset": 32, @@ -1982,6 +2031,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, "creds_uid_offset": 8, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2025,6 +2075,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, "creds_uid_offset": 8, "dentry_sb_offset": 152, "file_f_inode_offset": 32, @@ -2068,6 +2119,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2112,6 +2164,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 168, "file_f_inode_offset": 32, @@ -2156,6 +2209,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, "creds_uid_offset": 8, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2200,6 +2254,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, "creds_uid_offset": 8, "dentry_sb_offset": 168, "file_f_inode_offset": 32, @@ -2244,6 +2299,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2287,6 +2343,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 152, "file_f_inode_offset": 32, @@ -2330,6 +2387,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2373,6 +2431,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 152, "file_f_inode_offset": 32, @@ -2416,6 +2475,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, "creds_uid_offset": 8, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2459,6 +2519,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, "creds_uid_offset": 8, "dentry_sb_offset": 152, "file_f_inode_offset": 32, @@ -2502,6 +2563,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2546,6 +2608,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 168, "file_f_inode_offset": 32, @@ -2590,6 +2653,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, "creds_uid_offset": 8, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2634,6 +2698,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, "creds_uid_offset": 8, "dentry_sb_offset": 168, "file_f_inode_offset": 32, @@ -2672,6 +2737,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2709,6 +2775,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2747,6 +2814,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2784,6 +2852,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 152, "file_f_inode_offset": 32, @@ -2821,6 +2890,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2859,6 +2929,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 152, "file_f_inode_offset": 32, @@ -2898,6 +2969,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2935,6 +3007,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -2975,6 +3048,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3012,6 +3086,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3054,6 +3129,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3097,6 +3173,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3139,6 +3216,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3182,6 +3260,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3223,6 +3302,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3266,6 +3346,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3308,6 +3389,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3351,6 +3433,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3396,6 +3479,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3441,6 +3525,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3485,6 +3570,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3529,6 +3615,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3574,6 +3661,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3616,6 +3704,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3656,6 +3745,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3700,6 +3790,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3745,6 +3836,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3790,6 +3882,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3832,6 +3925,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3868,6 +3962,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3908,6 +4003,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3948,6 +4044,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -3988,6 +4085,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4032,6 +4130,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4077,6 +4176,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4122,6 +4222,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4167,6 +4268,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4209,6 +4311,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4249,6 +4352,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4290,6 +4394,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4331,6 +4436,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4374,6 +4480,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4419,6 +4526,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4464,6 +4572,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4509,6 +4618,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4548,6 +4658,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4589,6 +4700,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4632,6 +4744,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4672,6 +4785,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4710,6 +4824,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4750,6 +4865,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4792,6 +4908,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4834,6 +4951,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4871,6 +4989,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4908,6 +5027,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4945,6 +5065,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -4982,6 +5103,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5019,6 +5141,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5056,6 +5179,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5094,6 +5218,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5133,6 +5258,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5171,6 +5297,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5209,6 +5336,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5249,6 +5377,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5290,6 +5419,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5332,6 +5462,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5374,6 +5505,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5416,6 +5548,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5458,6 +5591,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5501,6 +5635,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5543,6 +5678,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5586,6 +5722,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5623,6 +5760,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5660,6 +5798,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5698,6 +5837,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5735,6 +5875,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5772,6 +5913,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5809,6 +5951,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5846,6 +5989,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5883,6 +6027,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5920,6 +6065,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5957,6 +6103,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -5994,6 +6141,7 @@ "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6031,6 +6179,7 @@ "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6073,6 +6222,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6117,6 +6267,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6160,6 +6311,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6202,6 +6354,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6244,6 +6397,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6286,6 +6440,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6331,6 +6486,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6376,6 +6532,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6421,6 +6578,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6466,6 +6624,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6510,6 +6669,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6553,6 +6713,7 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6597,6 +6758,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6641,6 +6803,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6684,6 +6847,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6727,6 +6891,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6769,6 +6934,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6812,6 +6978,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6854,6 +7021,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6896,6 +7064,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6939,6 +7108,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -6981,6 +7151,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7024,6 +7195,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7066,6 +7238,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7111,6 +7284,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7156,6 +7330,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7201,6 +7376,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7246,6 +7422,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7291,6 +7468,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7336,6 +7514,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7381,6 +7560,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7426,6 +7606,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7471,6 +7652,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7516,6 +7698,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7561,6 +7744,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7606,6 +7790,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7651,6 +7836,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7695,6 +7881,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7739,6 +7926,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7784,6 +7972,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7828,6 +8017,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7872,6 +8062,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7916,6 +8107,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -7960,6 +8152,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -8004,6 +8197,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -8049,6 +8243,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -8094,6 +8289,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -8139,6 +8335,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -8183,6 +8380,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -8227,6 +8425,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -8271,6 +8470,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -8315,6 +8515,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, @@ -8359,6 +8560,7 @@ "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, "dentry_sb_offset": 104, "file_f_inode_offset": 32, diff --git a/pkg/security/probe/constantfetch/constant_names.go b/pkg/security/probe/constantfetch/constant_names.go index 4522e8c3c630c..f88f879a1dbdd 100644 --- a/pkg/security/probe/constantfetch/constant_names.go +++ b/pkg/security/probe/constantfetch/constant_names.go @@ -20,6 +20,7 @@ const ( OffsetNameSignalStructStructTTY = "tty_offset" OffsetNameTTYStructStructName = "tty_name_offset" OffsetNameCredStructUID = "creds_uid_offset" + OffsetNameCredStructCapInheritable = "creds_cap_inheritable_offset" OffsetNameLinuxBinprmP = "linux_binprm_p_offset" OffsetNameLinuxBinprmArgc = "linux_binprm_argc_offset" OffsetNameLinuxBinprmEnvc = "linux_binprm_envc_offset" diff --git a/pkg/security/probe/constantfetch/fallback.go b/pkg/security/probe/constantfetch/fallback.go index 1e14ba24f32ce..6efca313ff1bb 100644 --- a/pkg/security/probe/constantfetch/fallback.go +++ b/pkg/security/probe/constantfetch/fallback.go @@ -51,6 +51,8 @@ func (f *FallbackConstantFetcher) appendRequest(id string) { value = getTTYNameOffset(f.kernelVersion) case OffsetNameCredStructUID: value = getCredsUIDOffset(f.kernelVersion) + case OffsetNameCredStructCapInheritable: + value = getCredCapInheritableOffset(f.kernelVersion) case OffsetNameBPFMapStructID: value = getBpfMapIDOffset(f.kernelVersion) case OffsetNameBPFMapStructName: @@ -324,6 +326,10 @@ func getCredsUIDOffset(kv *kernel.Version) uint64 { } } +func getCredCapInheritableOffset(kv *kernel.Version) uint64 { + return getCredsUIDOffset(kv) + 36 +} + func getBpfMapIDOffset(kv *kernel.Version) uint64 { switch { case kv.IsInRangeCloseOpen(kernel.Kernel5_15, kernel.Kernel5_16): diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index 5260f8ae1832d..45ec691959f64 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -1851,6 +1851,7 @@ func AppendProbeRequestsToFetcher(constantFetcher constantfetch.ConstantFetcher, constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameSignalStructStructTTY, "struct signal_struct", "tty", "linux/sched/signal.h") constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameTTYStructStructName, "struct tty_struct", "name", "linux/tty.h") constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameCredStructUID, "struct cred", "uid", "linux/cred.h") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameCredStructCapInheritable, "struct cred", "cap_inheritable", "linux/cred.h") constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameLinuxBinprmP, "struct linux_binprm", "p", "linux/binfmts.h") constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameLinuxBinprmArgc, "struct linux_binprm", "argc", "linux/binfmts.h") constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameLinuxBinprmEnvc, "struct linux_binprm", "envc", "linux/binfmts.h") From 9c6a4e6e86276855fd6eab66cbee2711e9e75f04 Mon Sep 17 00:00:00 2001 From: Raphael Gavache Date: Tue, 16 Apr 2024 00:28:55 +0200 Subject: [PATCH 59/99] pass ctx everywhere (#24645) * tmp * pass ctx after rename * avoid fake ctx * lint windows --- pkg/installer/install.go | 41 +++++----- pkg/installer/install_test.go | 21 ++--- pkg/installer/installer.go | 19 +++-- pkg/installer/repository/repositories.go | 9 ++- pkg/installer/repository/repositories_test.go | 13 +-- pkg/installer/repository/repository.go | 47 +++++------ pkg/installer/repository/repository_test.go | 26 +++--- pkg/installer/service/apm_inject.go | 80 ++++++++++--------- pkg/installer/service/apm_inject_windows.go | 6 +- pkg/installer/service/cmd_executor.go | 30 ++++--- pkg/installer/service/cmd_executor_windows.go | 11 ++- pkg/installer/service/datadog_agent.go | 52 ++++++------ .../service/datadog_agent_windows.go | 10 ++- pkg/installer/service/datadog_installer.go | 30 ++++--- .../service/datadog_installer_windows.go | 10 ++- pkg/installer/service/docker.go | 19 ++--- pkg/installer/service/systemd.go | 39 ++++----- pkg/installer/service/systemd_test.go | 31 +++---- 18 files changed, 271 insertions(+), 223 deletions(-) diff --git a/pkg/installer/install.go b/pkg/installer/install.go index db8a332a5afde..95ab2990aa81b 100644 --- a/pkg/installer/install.go +++ b/pkg/installer/install.go @@ -7,6 +7,7 @@ package installer import ( "archive/tar" + "context" "fmt" "io" "io/fs" @@ -47,7 +48,7 @@ func newPackageManager(repositories *repository.Repositories) *packageManager { } } -func (m *packageManager) installStable(pkg string, version string, image oci.Image) error { +func (m *packageManager) installStable(ctx context.Context, pkg string, version string, image oci.Image) error { tmpDir, err := os.MkdirTemp("", "") if err != nil { return fmt.Errorf("could not create temporary directory: %w", err) @@ -58,7 +59,7 @@ func (m *packageManager) installStable(pkg string, version string, image oci.Ima if err != nil { return fmt.Errorf("could not extract package layers: %w", err) } - err = m.repositories.Create(pkg, version, tmpDir) + err = m.repositories.Create(ctx, pkg, version, tmpDir) if err != nil { return fmt.Errorf("could not create repository: %w", err) } @@ -67,17 +68,17 @@ func (m *packageManager) installStable(pkg string, version string, image oci.Ima defer m.installLock.Unlock() switch pkg { case packageDatadogAgent: - return service.SetupAgentUnits() + return service.SetupAgentUnits(ctx) case packageAPMInjector: - return service.SetupAPMInjector() + return service.SetupAPMInjector(ctx) case packageDatadogInstaller: - return service.SetupInstallerUnit() + return service.SetupInstallerUnit(ctx) default: return nil } } -func (m *packageManager) installExperiment(pkg string, version string, image oci.Image) error { +func (m *packageManager) installExperiment(ctx context.Context, pkg string, version string, image oci.Image) error { tmpDir, err := os.MkdirTemp("", "") if err != nil { return fmt.Errorf("could not create temporary directory: %w", err) @@ -89,52 +90,52 @@ func (m *packageManager) installExperiment(pkg string, version string, image oci return fmt.Errorf("could not extract package layers: %w", err) } repository := m.repositories.Get(pkg) - err = repository.SetExperiment(version, tmpDir) + err = repository.SetExperiment(ctx, version, tmpDir) if err != nil { return fmt.Errorf("could not set experiment: %w", err) } - return m.startExperiment(pkg) + return m.startExperiment(ctx, pkg) } -func (m *packageManager) promoteExperiment(pkg string) error { +func (m *packageManager) promoteExperiment(ctx context.Context, pkg string) error { repository := m.repositories.Get(pkg) - err := repository.PromoteExperiment() + err := repository.PromoteExperiment(ctx) if err != nil { return fmt.Errorf("could not promote experiment: %w", err) } - return m.stopExperiment(pkg) + return m.stopExperiment(ctx, pkg) } -func (m *packageManager) uninstallExperiment(pkg string) error { +func (m *packageManager) uninstallExperiment(ctx context.Context, pkg string) error { repository := m.repositories.Get(pkg) - err := repository.DeleteExperiment() + err := repository.DeleteExperiment(ctx) if err != nil { return fmt.Errorf("could not delete experiment: %w", err) } - return m.stopExperiment(pkg) + return m.stopExperiment(ctx, pkg) } -func (m *packageManager) startExperiment(pkg string) error { +func (m *packageManager) startExperiment(ctx context.Context, pkg string) error { m.installLock.Lock() defer m.installLock.Unlock() switch pkg { case packageDatadogAgent: - return service.StartAgentExperiment() + return service.StartAgentExperiment(ctx) case packageDatadogInstaller: - return service.StartInstallerExperiment() + return service.StartInstallerExperiment(ctx) default: return nil } } -func (m *packageManager) stopExperiment(pkg string) error { +func (m *packageManager) stopExperiment(ctx context.Context, pkg string) error { m.installLock.Lock() defer m.installLock.Unlock() switch pkg { case packageDatadogAgent: - return service.StopAgentExperiment() + return service.StopAgentExperiment(ctx) case packageAPMInjector: - return service.StopInstallerExperiment() + return service.StopInstallerExperiment(ctx) default: return nil } diff --git a/pkg/installer/install_test.go b/pkg/installer/install_test.go index 2ab477b3857b3..9b1f67f7a153d 100644 --- a/pkg/installer/install_test.go +++ b/pkg/installer/install_test.go @@ -10,6 +10,7 @@ package installer import ( "bytes" + "context" "fmt" "io" "io/fs" @@ -22,6 +23,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/installer/repository" ) +var testCtx = context.TODO() + func assertEqualFS(t *testing.T, expected fs.FS, actual fs.FS) { t.Helper() err := fsContainsAll(expected, actual) @@ -95,7 +98,7 @@ func TestInstallStable(t *testing.T) { defer s.Close() installer := newTestPackageManager(t) - err := installer.installStable(fixtureSimpleV1.pkg, fixtureSimpleV1.version, s.Image(fixtureSimpleV1)) + err := installer.installStable(testCtx, fixtureSimpleV1.pkg, fixtureSimpleV1.version, s.Image(fixtureSimpleV1)) assert.NoError(t, err) r := installer.repositories.Get(fixtureSimpleV1.pkg) state, err := r.GetState() @@ -111,9 +114,9 @@ func TestInstallExperiment(t *testing.T) { defer s.Close() installer := newTestPackageManager(t) - err := installer.installStable(fixtureSimpleV1.pkg, fixtureSimpleV1.version, s.Image(fixtureSimpleV1)) + err := installer.installStable(testCtx, fixtureSimpleV1.pkg, fixtureSimpleV1.version, s.Image(fixtureSimpleV1)) assert.NoError(t, err) - err = installer.installExperiment(fixtureSimpleV1.pkg, fixtureSimpleV2.version, s.Image(fixtureSimpleV2)) + err = installer.installExperiment(testCtx, fixtureSimpleV1.pkg, fixtureSimpleV2.version, s.Image(fixtureSimpleV2)) assert.NoError(t, err) r := installer.repositories.Get(fixtureSimpleV1.pkg) state, err := r.GetState() @@ -130,11 +133,11 @@ func TestInstallPromoteExperiment(t *testing.T) { defer s.Close() installer := newTestPackageManager(t) - err := installer.installStable(fixtureSimpleV1.pkg, fixtureSimpleV1.version, s.Image(fixtureSimpleV1)) + err := installer.installStable(testCtx, fixtureSimpleV1.pkg, fixtureSimpleV1.version, s.Image(fixtureSimpleV1)) assert.NoError(t, err) - err = installer.installExperiment(fixtureSimpleV1.pkg, fixtureSimpleV2.version, s.Image(fixtureSimpleV2)) + err = installer.installExperiment(testCtx, fixtureSimpleV1.pkg, fixtureSimpleV2.version, s.Image(fixtureSimpleV2)) assert.NoError(t, err) - err = installer.promoteExperiment(fixtureSimpleV1.pkg) + err = installer.promoteExperiment(testCtx, fixtureSimpleV1.pkg) assert.NoError(t, err) r := installer.repositories.Get(fixtureSimpleV1.pkg) state, err := r.GetState() @@ -150,11 +153,11 @@ func TestUninstallExperiment(t *testing.T) { defer s.Close() installer := newTestPackageManager(t) - err := installer.installStable(fixtureSimpleV1.pkg, fixtureSimpleV1.version, s.Image(fixtureSimpleV1)) + err := installer.installStable(testCtx, fixtureSimpleV1.pkg, fixtureSimpleV1.version, s.Image(fixtureSimpleV1)) assert.NoError(t, err) - err = installer.installExperiment(fixtureSimpleV1.pkg, fixtureSimpleV2.version, s.Image(fixtureSimpleV2)) + err = installer.installExperiment(testCtx, fixtureSimpleV1.pkg, fixtureSimpleV2.version, s.Image(fixtureSimpleV2)) assert.NoError(t, err) - err = installer.uninstallExperiment(fixtureSimpleV1.pkg) + err = installer.uninstallExperiment(testCtx, fixtureSimpleV1.pkg) assert.NoError(t, err) r := installer.repositories.Get(fixtureSimpleV1.pkg) state, err := r.GetState() diff --git a/pkg/installer/installer.go b/pkg/installer/installer.go index a502cd9cbb793..58971fd9e1fdf 100644 --- a/pkg/installer/installer.go +++ b/pkg/installer/installer.go @@ -108,12 +108,15 @@ func Purge() { } func purge(locksPath, repositoryPath string) { - service.RemoveAgentUnits() - if err := service.RemoveAPMInjector(); err != nil { + var err error + span, ctx := tracer.StartSpanFromContext(context.Background(), "purge") + defer span.Finish(tracer.WithError(err)) + service.RemoveAgentUnits(ctx) + if err = service.RemoveAPMInjector(ctx); err != nil { log.Warnf("installer: could not remove APM injector: %v", err) } cleanDir(locksPath, os.RemoveAll) - cleanDir(repositoryPath, service.RemoveAll) + cleanDir(repositoryPath, func(path string) error { return service.RemoveAll(ctx, path) }) } func cleanDir(dir string, cleanFunc func(string) error) { @@ -179,7 +182,7 @@ func (i *installerImpl) Start(ctx context.Context) error { select { case <-time.After(gcInterval): i.m.Lock() - err := i.repositories.Cleanup() + err := i.repositories.Cleanup(context.Background()) i.m.Unlock() if err != nil { log.Errorf("installer: could not run GC: %v", err) @@ -271,7 +274,7 @@ func (i *installerImpl) bootstrapPackage(ctx context.Context, url string, expect if (expectedPackage != "" && downloadedPackage.Name != expectedPackage) || (expectedVersion != "" && downloadedPackage.Version != expectedVersion) { return fmt.Errorf("downloaded package does not match expected package: %s, %s != %s, %s", downloadedPackage.Name, downloadedPackage.Version, expectedPackage, expectedVersion) } - err = i.packageManager.installStable(downloadedPackage.Name, downloadedPackage.Version, downloadedPackage.Image) + err = i.packageManager.installStable(ctx, downloadedPackage.Name, downloadedPackage.Version, downloadedPackage.Image) if err != nil { return fmt.Errorf("could not install: %w", err) } @@ -307,7 +310,7 @@ func (i *installerImpl) StartExperiment(ctx context.Context, pkg string, version if downloadedPackage.Name != experimentPackage.Name || downloadedPackage.Version != experimentPackage.Version { return fmt.Errorf("downloaded package does not match requested package: %s, %s != %s, %s", downloadedPackage.Name, downloadedPackage.Version, experimentPackage.Name, experimentPackage.Version) } - err = i.packageManager.installExperiment(pkg, version, downloadedPackage.Image) + err = i.packageManager.installExperiment(ctx, pkg, version, downloadedPackage.Image) if err != nil { return fmt.Errorf("could not install experiment: %w", err) } @@ -325,7 +328,7 @@ func (i *installerImpl) PromoteExperiment(ctx context.Context, pkg string) (err defer i.refreshState(ctx) log.Infof("Installer: Promoting experiment for package %s", pkg) - err = i.packageManager.promoteExperiment(pkg) + err = i.packageManager.promoteExperiment(ctx, pkg) if err != nil { return fmt.Errorf("could not promote experiment: %w", err) } @@ -343,7 +346,7 @@ func (i *installerImpl) StopExperiment(ctx context.Context, pkg string) (err err defer i.refreshState(ctx) defer log.Infof("Installer: Stopping experiment for package %s", pkg) - err = i.packageManager.uninstallExperiment(pkg) + err = i.packageManager.uninstallExperiment(ctx, pkg) if err != nil { return fmt.Errorf("could not stop experiment: %w", err) } diff --git a/pkg/installer/repository/repositories.go b/pkg/installer/repository/repositories.go index a695625ae1713..aab338c3f0d5f 100644 --- a/pkg/installer/repository/repositories.go +++ b/pkg/installer/repository/repositories.go @@ -6,6 +6,7 @@ package repository import ( + "context" "fmt" "os" "path/filepath" @@ -54,9 +55,9 @@ func (r *Repositories) Get(pkg string) *Repository { } // Create creates a new repository for the given package name. -func (r *Repositories) Create(pkg string, version string, stableSourcePath string) error { +func (r *Repositories) Create(ctx context.Context, pkg string, version string, stableSourcePath string) error { repository := r.newRepository(pkg) - err := repository.Create(version, stableSourcePath) + err := repository.Create(ctx, version, stableSourcePath) if err != nil { return fmt.Errorf("could not create repository for package %s: %w", pkg, err) } @@ -86,13 +87,13 @@ func (r *Repositories) GetPackageState(pkg string) (State, error) { } // Cleanup cleans up the repositories. -func (r *Repositories) Cleanup() error { +func (r *Repositories) Cleanup(ctx context.Context) error { repositories, err := r.loadRepositories() if err != nil { return fmt.Errorf("could not load repositories: %w", err) } for _, repo := range repositories { - err := repo.Cleanup() + err := repo.Cleanup(ctx) if err != nil { return fmt.Errorf("could not clean up repository: %w", err) } diff --git a/pkg/installer/repository/repositories_test.go b/pkg/installer/repository/repositories_test.go index e404ede6f934c..1fa8e3add541f 100644 --- a/pkg/installer/repository/repositories_test.go +++ b/pkg/installer/repository/repositories_test.go @@ -8,6 +8,7 @@ package repository import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -15,6 +16,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/installer/service" ) +var testCtx = context.TODO() + func newTestRepositories(t *testing.T) *Repositories { rootPath := t.TempDir() locksRootPath := t.TempDir() @@ -34,12 +37,12 @@ func TestRepositoriesEmpty(t *testing.T) { func TestRepositories(t *testing.T) { repositories := newTestRepositories(t) - err := repositories.Create("repo1", "v1", t.TempDir()) + err := repositories.Create(testCtx, "repo1", "v1", t.TempDir()) assert.NoError(t, err) repository := repositories.Get("repo1") - err = repository.SetExperiment("v2", t.TempDir()) + err = repository.SetExperiment(testCtx, "v2", t.TempDir()) assert.NoError(t, err) - err = repositories.Create("repo2", "v1.0", t.TempDir()) + err = repositories.Create(testCtx, "repo2", "v1.0", t.TempDir()) assert.NoError(t, err) state, err := repositories.GetState() @@ -51,9 +54,9 @@ func TestRepositories(t *testing.T) { func TestRepositoriesReopen(t *testing.T) { repositories := newTestRepositories(t) - err := repositories.Create("repo1", "v1", t.TempDir()) + err := repositories.Create(testCtx, "repo1", "v1", t.TempDir()) assert.NoError(t, err) - err = repositories.Create("repo2", "v1", t.TempDir()) + err = repositories.Create(testCtx, "repo2", "v1", t.TempDir()) assert.NoError(t, err) repositories = NewRepositories(repositories.rootPath, repositories.locksPath) diff --git a/pkg/installer/repository/repository.go b/pkg/installer/repository/repository.go index a8ad6516f772f..b4d44d0e40b08 100644 --- a/pkg/installer/repository/repository.go +++ b/pkg/installer/repository/repository.go @@ -7,6 +7,7 @@ package repository import ( + "context" "errors" "fmt" "io/fs" @@ -109,7 +110,7 @@ func (r *Repository) GetState() (State, error) { // 2. Create the root directory. // 3. Move the stable source to the repository. // 4. Create the stable link. -func (r *Repository) Create(name string, stableSourcePath string) error { +func (r *Repository) Create(ctx context.Context, name string, stableSourcePath string) error { err := os.MkdirAll(r.rootPath, 0755) if err != nil { return fmt.Errorf("could not create packages root directory: %w", err) @@ -150,12 +151,12 @@ func (r *Repository) Create(name string, stableSourcePath string) error { } } - err = repository.cleanup() + err = repository.cleanup(ctx) if err != nil { return fmt.Errorf("could not cleanup repository: %w", err) } - err = repository.setStable(name, stableSourcePath) + err = repository.setStable(ctx, name, stableSourcePath) if err != nil { return fmt.Errorf("could not set first stable: %w", err) } @@ -167,19 +168,19 @@ func (r *Repository) Create(name string, stableSourcePath string) error { // 1. Cleanup the repository. // 2. Move the experiment source to the repository. // 3. Set the experiment link to the experiment package. -func (r *Repository) SetExperiment(name string, sourcePath string) error { +func (r *Repository) SetExperiment(ctx context.Context, name string, sourcePath string) error { repository, err := readRepository(r.rootPath, r.locksPath) if err != nil { return err } - err = repository.cleanup() + err = repository.cleanup(ctx) if err != nil { return fmt.Errorf("could not cleanup repository: %w", err) } if !repository.stable.Exists() { return fmt.Errorf("stable package does not exist, invalid state") } - err = repository.setExperiment(name, sourcePath) + err = repository.setExperiment(ctx, name, sourcePath) if err != nil { return fmt.Errorf("could not set experiment: %w", err) } @@ -192,12 +193,12 @@ func (r *Repository) SetExperiment(name string, sourcePath string) error { // 2. Set the stable link to the experiment package. // 3. Delete the experiment link. // 4. Cleanup the repository to remove the previous stable package. -func (r *Repository) PromoteExperiment() error { +func (r *Repository) PromoteExperiment(ctx context.Context) error { repository, err := readRepository(r.rootPath, r.locksPath) if err != nil { return err } - err = repository.cleanup() + err = repository.cleanup(ctx) if err != nil { return fmt.Errorf("could not cleanup repository: %w", err) } @@ -221,7 +222,7 @@ func (r *Repository) PromoteExperiment() error { if err != nil { return err } - err = repository.cleanup() + err = repository.cleanup(ctx) if err != nil { return fmt.Errorf("could not cleanup repository: %w", err) } @@ -233,12 +234,12 @@ func (r *Repository) PromoteExperiment() error { // 1. Cleanup the repository. // 2. Delete the experiment link. // 3. Cleanup the repository to remove the previous experiment package. -func (r *Repository) DeleteExperiment() error { +func (r *Repository) DeleteExperiment(ctx context.Context) error { repository, err := readRepository(r.rootPath, r.locksPath) if err != nil { return err } - err = repository.cleanup() + err = repository.cleanup(ctx) if err != nil { return fmt.Errorf("could not cleanup repository: %w", err) } @@ -258,7 +259,7 @@ func (r *Repository) DeleteExperiment() error { if err != nil { return err } - err = repository.cleanup() + err = repository.cleanup(ctx) if err != nil { return fmt.Errorf("could not cleanup repository: %w", err) } @@ -266,12 +267,12 @@ func (r *Repository) DeleteExperiment() error { } // Cleanup calls the cleanup function of the repository -func (r *Repository) Cleanup() error { +func (r *Repository) Cleanup(ctx context.Context) error { repository, err := readRepository(r.rootPath, r.locksPath) if err != nil { return err } - return repository.cleanup() + return repository.cleanup(ctx) } type repositoryFiles struct { @@ -321,8 +322,8 @@ func readRepository(rootPath string, locksPath string) (*repositoryFiles, error) }, nil } -func (r *repositoryFiles) setExperiment(name string, sourcePath string) error { - path, err := movePackageFromSource(name, r.rootPath, r.lockedPackages, sourcePath) +func (r *repositoryFiles) setExperiment(ctx context.Context, name string, sourcePath string) error { + path, err := movePackageFromSource(ctx, name, r.rootPath, r.lockedPackages, sourcePath) if err != nil { return fmt.Errorf("could not move experiment source: %w", err) } @@ -330,8 +331,8 @@ func (r *repositoryFiles) setExperiment(name string, sourcePath string) error { return r.experiment.Set(path) } -func (r *repositoryFiles) setStable(name string, sourcePath string) error { - path, err := movePackageFromSource(name, r.rootPath, r.lockedPackages, sourcePath) +func (r *repositoryFiles) setStable(ctx context.Context, name string, sourcePath string) error { + path, err := movePackageFromSource(ctx, name, r.rootPath, r.lockedPackages, sourcePath) if err != nil { return fmt.Errorf("could not move stable source: %w", err) } @@ -339,7 +340,7 @@ func (r *repositoryFiles) setStable(name string, sourcePath string) error { return r.stable.Set(path) } -func movePackageFromSource(packageName string, rootPath string, lockedPackages map[string]bool, sourcePath string) (string, error) { +func movePackageFromSource(ctx context.Context, packageName string, rootPath string, lockedPackages map[string]bool, sourcePath string) (string, error) { if packageName == "" || packageName == stableVersionLink || packageName == experimentVersionLink { return "", fmt.Errorf("invalid package name") } @@ -367,7 +368,7 @@ func movePackageFromSource(packageName string, rootPath string, lockedPackages m } switch filepath.Base(rootPath) { case "datadog-agent": - if err := service.ChownDDAgent(targetPath); err != nil { + if err := service.ChownDDAgent(ctx, targetPath); err != nil { return "", err } case "datadog-installer": @@ -375,7 +376,7 @@ func movePackageFromSource(packageName string, rootPath string, lockedPackages m if err := os.Chmod(helperPath, 0750); err != nil { return "", fmt.Errorf("could not set permissions on installer-helper: %w", err) } - if err := service.SetCapHelper(helperPath); err != nil { + if err := service.SetCapHelper(ctx, helperPath); err != nil { return "", fmt.Errorf("could not set capabilities on installer-helper: %w", err) } } @@ -383,7 +384,7 @@ func movePackageFromSource(packageName string, rootPath string, lockedPackages m return targetPath, nil } -func (r *repositoryFiles) cleanup() error { +func (r *repositoryFiles) cleanup(ctx context.Context) error { files, err := os.ReadDir(r.rootPath) if err != nil { return fmt.Errorf("could not read root directory: %w", err) @@ -405,7 +406,7 @@ func (r *repositoryFiles) cleanup() error { pkgRepositoryPath := filepath.Join(r.rootPath, file.Name()) pkgLocksPath := filepath.Join(r.locksPath, file.Name()) log.Debugf("package %s isn't locked, removing it", pkgRepositoryPath) - if err := service.RemoveAll(pkgRepositoryPath); err != nil { + if err := service.RemoveAll(ctx, pkgRepositoryPath); err != nil { log.Errorf("could not remove package %s directory, will retry: %v", pkgRepositoryPath, err) } if err := os.RemoveAll(pkgLocksPath); err != nil { diff --git a/pkg/installer/repository/repository_test.go b/pkg/installer/repository/repository_test.go index b5f69388972ed..d8e817315fb57 100644 --- a/pkg/installer/repository/repository_test.go +++ b/pkg/installer/repository/repository_test.go @@ -28,7 +28,7 @@ func createTestRepository(t *testing.T, dir string, stablePackageName string) *R rootPath: repositoryPath, locksPath: locksPath, } - err := r.Create(stablePackageName, stablePackagePath) + err := r.Create(testCtx, stablePackageName, stablePackagePath) assert.NoError(t, err) return &r } @@ -90,7 +90,7 @@ func TestSetExperiment(t *testing.T) { repository := createTestRepository(t, dir, "v1") experimentDownloadPackagePath := createTestDownloadedPackage(t, dir, "v2") - err := repository.SetExperiment("v2", experimentDownloadPackagePath) + err := repository.SetExperiment(testCtx, "v2", experimentDownloadPackagePath) assert.NoError(t, err) assert.DirExists(t, path.Join(repository.rootPath, "v2")) } @@ -101,9 +101,9 @@ func TestSetExperimentTwice(t *testing.T) { experiment1DownloadPackagePath := createTestDownloadedPackage(t, dir, "v2") experiment2DownloadPackagePath := createTestDownloadedPackage(t, dir, "v3") - err := repository.SetExperiment("v2", experiment1DownloadPackagePath) + err := repository.SetExperiment(testCtx, "v2", experiment1DownloadPackagePath) assert.NoError(t, err) - err = repository.SetExperiment("v3", experiment2DownloadPackagePath) + err = repository.SetExperiment(testCtx, "v3", experiment2DownloadPackagePath) assert.NoError(t, err) assert.DirExists(t, path.Join(repository.rootPath, "v2")) } @@ -115,7 +115,7 @@ func TestSetExperimentBeforeStable(t *testing.T) { } experimentDownloadPackagePath := createTestDownloadedPackage(t, dir, "v2") - err := repository.SetExperiment("v2", experimentDownloadPackagePath) + err := repository.SetExperiment(testCtx, "v2", experimentDownloadPackagePath) assert.Error(t, err) } @@ -124,9 +124,9 @@ func TestPromoteExperiment(t *testing.T) { repository := createTestRepository(t, dir, "v1") experimentDownloadPackagePath := createTestDownloadedPackage(t, dir, "v2") - err := repository.SetExperiment("v2", experimentDownloadPackagePath) + err := repository.SetExperiment(testCtx, "v2", experimentDownloadPackagePath) assert.NoError(t, err) - err = repository.PromoteExperiment() + err = repository.PromoteExperiment(testCtx) assert.NoError(t, err) assert.NoDirExists(t, path.Join(repository.rootPath, "v1")) assert.DirExists(t, path.Join(repository.rootPath, "v2")) @@ -136,7 +136,7 @@ func TestPromoteExperimentWithoutExperiment(t *testing.T) { dir := t.TempDir() repository := createTestRepository(t, dir, "v1") - err := repository.PromoteExperiment() + err := repository.PromoteExperiment(testCtx) assert.Error(t, err) } @@ -145,9 +145,9 @@ func TestDeleteExperiment(t *testing.T) { repository := createTestRepository(t, dir, "v1") experimentDownloadPackagePath := createTestDownloadedPackage(t, dir, "v2") - err := repository.SetExperiment("v2", experimentDownloadPackagePath) + err := repository.SetExperiment(testCtx, "v2", experimentDownloadPackagePath) assert.NoError(t, err) - err = repository.DeleteExperiment() + err = repository.DeleteExperiment(testCtx) assert.NoError(t, err) assert.NoDirExists(t, path.Join(repository.rootPath, "v2")) } @@ -156,7 +156,7 @@ func TestDeleteExperimentWithoutExperiment(t *testing.T) { dir := t.TempDir() repository := createTestRepository(t, dir, "v1") - err := repository.DeleteExperiment() + err := repository.DeleteExperiment(testCtx) assert.NoError(t, err) } @@ -165,7 +165,7 @@ func TestDeleteExperimentWithLockedPackage(t *testing.T) { repository := createTestRepository(t, dir, "v1") experimentDownloadPackagePath := createTestDownloadedPackage(t, dir, "v2") - err := repository.SetExperiment("v2", experimentDownloadPackagePath) + err := repository.SetExperiment(testCtx, "v2", experimentDownloadPackagePath) assert.NoError(t, err) // Add a running process... our own! So we're sure it's running. @@ -188,7 +188,7 @@ func TestDeleteExperimentWithLockedPackage(t *testing.T) { ) assert.NoError(t, err) - err = repository.DeleteExperiment() + err = repository.DeleteExperiment(testCtx) assert.NoError(t, err) assert.DirExists(t, path.Join(repository.rootPath, "v2")) assert.DirExists(t, path.Join(repository.locksPath, "v2")) diff --git a/pkg/installer/service/apm_inject.go b/pkg/installer/service/apm_inject.go index d1d29ff1931f9..6333e0d33cf6e 100644 --- a/pkg/installer/service/apm_inject.go +++ b/pkg/installer/service/apm_inject.go @@ -10,12 +10,14 @@ package service import ( "bytes" + "context" "fmt" "os" "path" "strings" "github.com/DataDog/datadog-agent/pkg/util/log" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) var ( @@ -35,24 +37,31 @@ dogstatsd_socket: %s ) // SetupAPMInjector sets up the injector at bootstrap -func SetupAPMInjector() error { +func SetupAPMInjector(ctx context.Context) error { + var err error + span, ctx := tracer.StartSpanFromContext(ctx, "setup_injector") + defer span.Finish(tracer.WithError(err)) // Enforce dd-installer is in the dd-agent group - if err := setInstallerAgentGroup(); err != nil { + if err = setInstallerAgentGroup(ctx); err != nil { return err } installer := &apmInjectorInstaller{ installPath: "/opt/datadog-packages/datadog-apm-inject/stable", } - return installer.Setup() + return installer.Setup(ctx) } // RemoveAPMInjector removes the APM injector -func RemoveAPMInjector() error { +func RemoveAPMInjector(ctx context.Context) error { + span, ctx := tracer.StartSpanFromContext(ctx, "remove_injector") + var err error + defer span.Finish(tracer.WithError(err)) installer := &apmInjectorInstaller{ installPath: "/opt/datadog-packages/datadog-apm-inject/stable", } - return installer.Remove() + err = installer.Remove(ctx) + return err } type apmInjectorInstaller struct { @@ -60,39 +69,39 @@ type apmInjectorInstaller struct { } // Setup sets up the APM injector -func (a *apmInjectorInstaller) Setup() error { +func (a *apmInjectorInstaller) Setup(ctx context.Context) error { var err error defer func() { if err != nil { - removeErr := a.Remove() + removeErr := a.Remove(ctx) if removeErr != nil { log.Warnf("Failed to remove APM injector: %v", removeErr) } } }() - if err := a.setAgentConfig(); err != nil { + if err := a.setAgentConfig(ctx); err != nil { return err } if err := a.setRunPermissions(); err != nil { return err } - if err := a.setLDPreloadConfig(); err != nil { + if err := a.setLDPreloadConfig(ctx); err != nil { return err } - if err := a.setDockerConfig(); err != nil { + if err := a.setDockerConfig(ctx); err != nil { return err } return nil } -func (a *apmInjectorInstaller) Remove() error { - if err := a.deleteAgentConfig(); err != nil { +func (a *apmInjectorInstaller) Remove(ctx context.Context) error { + if err := a.deleteAgentConfig(ctx); err != nil { return err } - if err := a.deleteLDPreloadConfig(); err != nil { + if err := a.deleteLDPreloadConfig(ctx); err != nil { return err } - if err := a.deleteDockerConfig(); err != nil { + if err := a.deleteDockerConfig(ctx); err != nil { return err } return nil @@ -103,7 +112,7 @@ func (a *apmInjectorInstaller) setRunPermissions() error { } // setLDPreloadConfig adds preload options on /etc/ld.so.preload, overriding existing ones -func (a *apmInjectorInstaller) setLDPreloadConfig() error { +func (a *apmInjectorInstaller) setLDPreloadConfig(ctx context.Context) error { var ldSoPreload []byte stat, err := os.Stat(ldSoPreloadPath) if err == nil { @@ -133,7 +142,7 @@ func (a *apmInjectorInstaller) setLDPreloadConfig() error { return err } - return executeCommand(string(replaceLDPreloadCommand)) + return executeCommand(ctx, string(replaceLDPreloadCommand)) } // setLDPreloadConfigContent sets the content of the LD preload configuration @@ -154,7 +163,7 @@ func (a *apmInjectorInstaller) setLDPreloadConfigContent(ldSoPreload []byte) ([] } // deleteLDPreloadConfig removes the preload options from /etc/ld.so.preload -func (a *apmInjectorInstaller) deleteLDPreloadConfig() error { +func (a *apmInjectorInstaller) deleteLDPreloadConfig(ctx context.Context) error { var ldSoPreload []byte stat, err := os.Stat(ldSoPreloadPath) if err == nil { @@ -186,7 +195,7 @@ func (a *apmInjectorInstaller) deleteLDPreloadConfig() error { return err } - return executeCommand(string(replaceLDPreloadCommand)) + return executeCommand(ctx, string(replaceLDPreloadCommand)) } // deleteLDPreloadConfigContent deletes the content of the LD preload configuration @@ -227,14 +236,14 @@ func (a *apmInjectorInstaller) deleteLDPreloadConfigContent(ldSoPreload []byte) // installer system and this will be replaced by a proper experiment when available. This is a temporary // solution to allow the APM injector to be installed, and if the agent crashes, we try to detect it and // restore the previous configuration -func (a *apmInjectorInstaller) setAgentConfig() (err error) { - err = backupAgentConfig() +func (a *apmInjectorInstaller) setAgentConfig(ctx context.Context) (err error) { + err = backupAgentConfig(ctx) if err != nil { return err } defer func() { if err != nil { - restoreErr := restoreAgentConfig() + restoreErr := restoreAgentConfig(ctx) if restoreErr != nil { log.Warnf("Failed to restore agent config: %v", restoreErr) } @@ -257,7 +266,7 @@ func (a *apmInjectorInstaller) setAgentConfig() (err error) { return err } - err = restartTraceAgent() + err = restartTraceAgent(ctx) return } @@ -279,14 +288,14 @@ func (a *apmInjectorInstaller) setAgentConfigContent(content []byte) []byte { } // deleteAgentConfig removes the agent configuration for the APM injector -func (a *apmInjectorInstaller) deleteAgentConfig() (err error) { - err = backupAgentConfig() +func (a *apmInjectorInstaller) deleteAgentConfig(ctx context.Context) (err error) { + err = backupAgentConfig(ctx) if err != nil { return err } defer func() { if err != nil { - restoreErr := restoreAgentConfig() + restoreErr := restoreAgentConfig(ctx) if restoreErr != nil { log.Warnf("Failed to restore agent config: %v", restoreErr) } @@ -309,7 +318,7 @@ func (a *apmInjectorInstaller) deleteAgentConfig() (err error) { return err } - return restartTraceAgent() + return restartTraceAgent(ctx) } // deleteAgentConfigContent deletes the agent configuration for the APM injector @@ -325,31 +334,28 @@ func (a *apmInjectorInstaller) deleteAgentConfigContent(content []byte) []byte { } // backupAgentConfig backs up the agent configuration -func backupAgentConfig() error { - return executeCommandStruct(privilegeCommand{ +func backupAgentConfig(ctx context.Context) error { + return executeCommandStruct(ctx, privilegeCommand{ Command: string(backupCommand), Path: datadogConfigPath, }) } // restoreAgentConfig restores the agent configuration & restarts the agent -func restoreAgentConfig() error { - err := executeCommandStruct(privilegeCommand{ +func restoreAgentConfig(ctx context.Context) error { + err := executeCommandStruct(ctx, privilegeCommand{ Command: string(restoreCommand), Path: datadogConfigPath, }) if err != nil { return err } - return restartTraceAgent() + return restartTraceAgent(ctx) } -// restartTraceAgent restarts the trace agent, both stable and experimental -func restartTraceAgent() error { - if err := restartUnit("datadog-agent-trace.service"); err != nil { - return err - } - if err := restartUnit("datadog-agent-trace-exp.service"); err != nil { +// restartTraceAgent restarts the stable trace agent +func restartTraceAgent(ctx context.Context) error { + if err := restartUnit(ctx, "datadog-agent-trace.service"); err != nil { return err } return nil diff --git a/pkg/installer/service/apm_inject_windows.go b/pkg/installer/service/apm_inject_windows.go index 8bbb49c5c7095..c288cc2679305 100644 --- a/pkg/installer/service/apm_inject_windows.go +++ b/pkg/installer/service/apm_inject_windows.go @@ -8,12 +8,14 @@ // Package service provides a way to interact with os services package service +import "context" + // SetupAPMInjector noop -func SetupAPMInjector() error { +func SetupAPMInjector(_ context.Context) error { return nil } // RemoveAPMInjector noop -func RemoveAPMInjector() error { +func RemoveAPMInjector(_ context.Context) error { return nil } diff --git a/pkg/installer/service/cmd_executor.go b/pkg/installer/service/cmd_executor.go index 4c93406dbbd3e..999628ebd9aed 100644 --- a/pkg/installer/service/cmd_executor.go +++ b/pkg/installer/service/cmd_executor.go @@ -20,6 +20,7 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/config/setup" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) var updaterHelper = filepath.Join(setup.InstallPath, "bin", "installer", "helper") @@ -27,34 +28,39 @@ var updaterHelper = filepath.Join(setup.InstallPath, "bin", "installer", "helper const execTimeout = 30 * time.Second // ChownDDAgent changes the owner of the given path to the dd-agent user. -func ChownDDAgent(path string) error { - return executeCommand(`{"command":"chown dd-agent","path":"` + path + `"}`) +func ChownDDAgent(ctx context.Context, path string) error { + return executeCommand(ctx, `{"command":"chown dd-agent","path":"`+path+`"}`) } // RemoveAll removes all files under a given path under /opt/datadog-packages regardless of their owner. -func RemoveAll(path string) error { - return executeCommand(`{"command":"rm","path":"` + path + `"}`) +func RemoveAll(ctx context.Context, path string) error { + return executeCommand(ctx, `{"command":"rm","path":"`+path+`"}`) } -func createAgentSymlink() error { - return executeCommand(`{"command":"agent-symlink"}`) +func createAgentSymlink(ctx context.Context) error { + return executeCommand(ctx, `{"command":"agent-symlink"}`) } -func rmAgentSymlink() error { - return executeCommand(`{"command":"rm-agent-symlink"}`) +func rmAgentSymlink(ctx context.Context) error { + return executeCommand(ctx, `{"command":"rm-agent-symlink"}`) } // SetCapHelper sets cap setuid on the newly installed helper -func SetCapHelper(path string) error { - return executeCommand(`{"command":"setcap cap_setuid+ep", "path":"` + path + `"}`) +func SetCapHelper(ctx context.Context, path string) error { + return executeCommand(ctx, `{"command":"setcap cap_setuid+ep", "path":"`+path+`"}`) } -func executeCommand(command string) error { +func executeCommand(ctx context.Context, command string) error { + var err error + var stderr io.ReadCloser + span, _ := tracer.StartSpanFromContext(ctx, "execute_command") + span.SetTag("command", command) + defer span.Finish(tracer.WithError(err)) cancelctx, cancelfunc := context.WithTimeout(context.Background(), execTimeout) defer cancelfunc() cmd := exec.CommandContext(cancelctx, updaterHelper, command) cmd.Stdout = os.Stdout - stderr, err := cmd.StderrPipe() + stderr, err = cmd.StderrPipe() if err != nil { return err } diff --git a/pkg/installer/service/cmd_executor_windows.go b/pkg/installer/service/cmd_executor_windows.go index 37b0b709ea038..2fe8f312c0db8 100644 --- a/pkg/installer/service/cmd_executor_windows.go +++ b/pkg/installer/service/cmd_executor_windows.go @@ -6,15 +6,18 @@ // Package service provides a way to interact with os services package service -import "os" +import ( + "context" + "os" +) // ChownDDAgent changes the owner of the given path to the dd-agent user. -func ChownDDAgent(_ string) error { +func ChownDDAgent(_ context.Context, _ string) error { return nil } // RemoveAll removes the versioned files at a given path. -func RemoveAll(path string) error { +func RemoveAll(_ context.Context, path string) error { return os.RemoveAll(path) } @@ -24,6 +27,6 @@ func BuildHelperForTests(_, _ string, _ bool) error { } // SetCapHelper sets cap setuid on the newly installed helper -func SetCapHelper(_ string) error { +func SetCapHelper(_ context.Context, _ string) error { return nil } diff --git a/pkg/installer/service/datadog_agent.go b/pkg/installer/service/datadog_agent.go index 8767e7a20d864..a3cccb5facd22 100644 --- a/pkg/installer/service/datadog_agent.go +++ b/pkg/installer/service/datadog_agent.go @@ -9,11 +9,13 @@ package service import ( + "context" "os/exec" "strings" "github.com/DataDog/datadog-agent/pkg/util/installinfo" "github.com/DataDog/datadog-agent/pkg/util/log" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) const ( @@ -47,35 +49,37 @@ var ( ) // SetupAgentUnits installs and starts the agent units -func SetupAgentUnits() (err error) { +func SetupAgentUnits(ctx context.Context) (err error) { + span, ctx := tracer.StartSpanFromContext(ctx, "setup_agent") defer func() { if err != nil { log.Errorf("Failed to setup agent units: %s, reverting", err) - RemoveAgentUnits() + RemoveAgentUnits(ctx) } + span.Finish(tracer.WithError(err)) }() - if err = setInstallerAgentGroup(); err != nil { + if err = setInstallerAgentGroup(ctx); err != nil { return } for _, unit := range stableUnits { - if err = loadUnit(unit); err != nil { + if err = loadUnit(ctx, unit); err != nil { return } } for _, unit := range experimentalUnits { - if err = loadUnit(unit); err != nil { + if err = loadUnit(ctx, unit); err != nil { return } } - if err = systemdReload(); err != nil { + if err = systemdReload(ctx); err != nil { return } for _, unit := range stableUnits { - if err = enableUnit(unit); err != nil { + if err = enableUnit(ctx, unit); err != nil { return } } @@ -84,64 +88,66 @@ func SetupAgentUnits() (err error) { return } for _, unit := range stableUnits { - if err = startUnit(unit); err != nil { + if err = startUnit(ctx, unit); err != nil { return } } - err = createAgentSymlink() + err = createAgentSymlink(ctx) return } // RemoveAgentUnits stops and removes the agent units -func RemoveAgentUnits() { +func RemoveAgentUnits(ctx context.Context) { + span, ctx := tracer.StartSpanFromContext(ctx, "remove_agent_units") + defer span.Finish() // stop experiments, they can restart stable agent for _, unit := range experimentalUnits { - if err := stopUnit(unit); err != nil { + if err := stopUnit(ctx, unit); err != nil { log.Warnf("Failed to stop %s: %s", unit, err) } } // stop stable agents for _, unit := range stableUnits { - if err := stopUnit(unit); err != nil { + if err := stopUnit(ctx, unit); err != nil { log.Warnf("Failed to stop %s: %s", unit, err) } } // purge experimental units for _, unit := range experimentalUnits { - if err := disableUnit(unit); err != nil { + if err := disableUnit(ctx, unit); err != nil { log.Warnf("Failed to disable %s: %s", unit, err) } - if err := removeUnit(unit); err != nil { + if err := removeUnit(ctx, unit); err != nil { log.Warnf("Failed to remove %s: %s", unit, err) } } // purge stable units for _, unit := range stableUnits { - if err := disableUnit(unit); err != nil { + if err := disableUnit(ctx, unit); err != nil { log.Warnf("Failed to disable %s: %s", unit, err) } - if err := removeUnit(unit); err != nil { + if err := removeUnit(ctx, unit); err != nil { log.Warnf("Failed to remove %s: %s", unit, err) } } - if err := rmAgentSymlink(); err != nil { + if err := rmAgentSymlink(ctx); err != nil { log.Warnf("Failed to remove agent symlink: %s", err) } installinfo.RmInstallInfo() } // StartAgentExperiment starts the agent experiment -func StartAgentExperiment() error { - return startUnit(agentExp) +func StartAgentExperiment(ctx context.Context) error { + return startUnit(ctx, agentExp) } // StopAgentExperiment stops the agent experiment -func StopAgentExperiment() error { - return startUnit(agentUnit) +func StopAgentExperiment(ctx context.Context) error { + return startUnit(ctx, agentUnit) } // setInstallerAgentGroup adds the dd-installer to the dd-agent group if it's not already in it -func setInstallerAgentGroup() error { +func setInstallerAgentGroup(ctx context.Context) error { // Get groups of dd-installer out, err := exec.Command("id", "-Gn", "dd-installer").Output() if err != nil { @@ -150,5 +156,5 @@ func setInstallerAgentGroup() error { if strings.Contains(string(out), "dd-agent") { return nil } - return executeCommand(string(addInstallerToAgentGroup)) + return executeCommand(ctx, string(addInstallerToAgentGroup)) } diff --git a/pkg/installer/service/datadog_agent_windows.go b/pkg/installer/service/datadog_agent_windows.go index 933096cad0edb..106a7cc83c61c 100644 --- a/pkg/installer/service/datadog_agent_windows.go +++ b/pkg/installer/service/datadog_agent_windows.go @@ -8,20 +8,22 @@ // Package service provides a way to interact with os services package service +import "context" + // SetupAgentUnits noop -func SetupAgentUnits() error { +func SetupAgentUnits(_ context.Context) error { return nil } // StartAgentExperiment noop -func StartAgentExperiment() error { +func StartAgentExperiment(_ context.Context) error { return nil } // StopAgentExperiment noop -func StopAgentExperiment() error { +func StopAgentExperiment(_ context.Context) error { return nil } // RemoveAgentUnits noop -func RemoveAgentUnits() {} +func RemoveAgentUnits(_ context.Context) {} diff --git a/pkg/installer/service/datadog_installer.go b/pkg/installer/service/datadog_installer.go index a1c01a85a6024..0d744fac7df9b 100644 --- a/pkg/installer/service/datadog_installer.go +++ b/pkg/installer/service/datadog_installer.go @@ -7,7 +7,11 @@ package service -import "github.com/DataDog/datadog-agent/pkg/util/log" +import ( + "context" + + "github.com/DataDog/datadog-agent/pkg/util/log" +) const ( installerUnit = "datadog-installer.service" @@ -17,7 +21,7 @@ const ( var installerUnits = []string{installerUnit, installerUnitExp} // SetupInstallerUnit installs and starts the installer systemd units -func SetupInstallerUnit() (err error) { +func SetupInstallerUnit(ctx context.Context) (err error) { defer func() { if err != nil { log.Errorf("Failed to setup installer units: %s, reverting", err) @@ -25,44 +29,44 @@ func SetupInstallerUnit() (err error) { }() for _, unit := range installerUnits { - if err = loadUnit(unit); err != nil { + if err = loadUnit(ctx, unit); err != nil { return err } } - if err = systemdReload(); err != nil { + if err = systemdReload(ctx); err != nil { return err } - if err = enableUnit(installerUnit); err != nil { + if err = enableUnit(ctx, installerUnit); err != nil { return err } - if err = startUnit(installerUnit); err != nil { + if err = startUnit(ctx, installerUnit); err != nil { return err } return nil } // RemoveInstallerUnit removes the installer systemd units -func RemoveInstallerUnit() { +func RemoveInstallerUnit(ctx context.Context) { var err error for _, unit := range installerUnits { - if err = disableUnit(unit); err != nil { + if err = disableUnit(ctx, unit); err != nil { log.Warnf("Failed to disable %s: %s", unit, err) } - if err = removeUnit(unit); err != nil { + if err = removeUnit(ctx, unit); err != nil { log.Warnf("Failed to stop %s: %s", unit, err) } } } // StartInstallerExperiment installs the experimental systemd units for the installer -func StartInstallerExperiment() error { - return startUnit(installerUnitExp) +func StartInstallerExperiment(ctx context.Context) error { + return startUnit(ctx, installerUnitExp) } // StopInstallerExperiment installs the stable systemd units for the installer -func StopInstallerExperiment() error { - return startUnit(installerUnit) +func StopInstallerExperiment(ctx context.Context) error { + return startUnit(ctx, installerUnit) } diff --git a/pkg/installer/service/datadog_installer_windows.go b/pkg/installer/service/datadog_installer_windows.go index e93ef99b0e1a2..793a2b5dec97e 100644 --- a/pkg/installer/service/datadog_installer_windows.go +++ b/pkg/installer/service/datadog_installer_windows.go @@ -7,21 +7,23 @@ package service +import "context" + // SetupInstallerUnit noop -func SetupInstallerUnit() (err error) { +func SetupInstallerUnit(_ context.Context) (err error) { return nil } // RemoveInstallerUnit noop -func RemoveInstallerUnit() { +func RemoveInstallerUnit(_ context.Context) { } // StartInstallerExperiment noop -func StartInstallerExperiment() error { +func StartInstallerExperiment(_ context.Context) error { return nil } // StopInstallerExperiment noop -func StopInstallerExperiment() error { +func StopInstallerExperiment(_ context.Context) error { return nil } diff --git a/pkg/installer/service/docker.go b/pkg/installer/service/docker.go index b7ca21bd035e3..3024cf3bed063 100644 --- a/pkg/installer/service/docker.go +++ b/pkg/installer/service/docker.go @@ -10,6 +10,7 @@ package service import ( "bytes" + "context" "encoding/json" "os" "os/exec" @@ -28,9 +29,9 @@ const ( // setDockerConfig sets up the docker daemon to use the APM injector // even if docker isn't installed, to prepare for if it is installed // later -func (a *apmInjectorInstaller) setDockerConfig() error { +func (a *apmInjectorInstaller) setDockerConfig(ctx context.Context) error { // Create docker dir if it doesn't exist - err := executeCommand(createDockerDirCommand) + err := executeCommand(ctx, createDockerDirCommand) if err != nil { return err } @@ -63,12 +64,12 @@ func (a *apmInjectorInstaller) setDockerConfig() error { } // Move the temporary file to the final location - err = executeCommand(string(replaceDockerCommand)) + err = executeCommand(ctx, string(replaceDockerCommand)) if err != nil { return err } - return restartDocker() + return restartDocker(ctx) } // setDockerConfigContent sets the content of the docker daemon configuration @@ -104,7 +105,7 @@ func (a *apmInjectorInstaller) setDockerConfigContent(previousContent []byte) ([ } // deleteDockerConfig restores the docker daemon configuration -func (a *apmInjectorInstaller) deleteDockerConfig() error { +func (a *apmInjectorInstaller) deleteDockerConfig(ctx context.Context) error { var file []byte stat, err := os.Stat(dockerDaemonPath) if err == nil { @@ -134,11 +135,11 @@ func (a *apmInjectorInstaller) deleteDockerConfig() error { } // Move the temporary file to the final location - err = executeCommand(string(replaceDockerCommand)) + err = executeCommand(ctx, string(replaceDockerCommand)) if err != nil { return err } - return restartDocker() + return restartDocker(ctx) } // deleteDockerConfigContent restores the content of the docker daemon configuration @@ -174,12 +175,12 @@ func (a *apmInjectorInstaller) deleteDockerConfigContent(previousContent []byte) } // restartDocker reloads the docker daemon if it exists -func restartDocker() error { +func restartDocker(ctx context.Context) error { if !isDockerInstalled() { log.Info("installer: docker is not installed, skipping reload") return nil } - return executeCommand(restartDockerCommand) + return executeCommand(ctx, restartDockerCommand) } // isDockerInstalled checks if docker is installed on the system diff --git a/pkg/installer/service/systemd.go b/pkg/installer/service/systemd.go index 26685c18a7578..bdb3bf0a55d2d 100644 --- a/pkg/installer/service/systemd.go +++ b/pkg/installer/service/systemd.go @@ -9,6 +9,7 @@ package service import ( + "context" "encoding/json" "os" "path" @@ -47,48 +48,48 @@ type privilegeCommand struct { } // restartUnit restarts a systemd unit -func restartUnit(unit string) error { +func restartUnit(ctx context.Context, unit string) error { // check that the unit exists first if _, err := os.Stat(path.Join(systemdPath, unit)); os.IsNotExist(err) { log.Infof("Unit %s does not exist, skipping restart", unit) return nil } - if err := stopUnit(unit); err != nil { + if err := stopUnit(ctx, unit); err != nil { return err } - if err := startUnit(unit); err != nil { + if err := startUnit(ctx, unit); err != nil { return err } return nil } -func stopUnit(unit string) error { - return executeCommand(wrapUnitCommand(stopCommand, unit)) +func stopUnit(ctx context.Context, unit string) error { + return executeCommand(ctx, wrapUnitCommand(stopCommand, unit)) } -func startUnit(unit string) error { - return executeCommand(wrapUnitCommand(startCommand, unit)) +func startUnit(ctx context.Context, unit string) error { + return executeCommand(ctx, wrapUnitCommand(startCommand, unit)) } -func enableUnit(unit string) error { - return executeCommand(wrapUnitCommand(enableCommand, unit)) +func enableUnit(ctx context.Context, unit string) error { + return executeCommand(ctx, wrapUnitCommand(enableCommand, unit)) } -func disableUnit(unit string) error { - return executeCommand(wrapUnitCommand(disableCommand, unit)) +func disableUnit(ctx context.Context, unit string) error { + return executeCommand(ctx, wrapUnitCommand(disableCommand, unit)) } -func loadUnit(unit string) error { - return executeCommand(wrapUnitCommand(loadCommand, unit)) +func loadUnit(ctx context.Context, unit string) error { + return executeCommand(ctx, wrapUnitCommand(loadCommand, unit)) } -func removeUnit(unit string) error { - return executeCommand(wrapUnitCommand(removeCommand, unit)) +func removeUnit(ctx context.Context, unit string) error { + return executeCommand(ctx, wrapUnitCommand(removeCommand, unit)) } -func systemdReload() error { - return executeCommand(systemdReloadCommand) +func systemdReload(ctx context.Context) error { + return executeCommand(ctx, systemdReloadCommand) } func wrapUnitCommand(command unitCommand, unit string) string { @@ -101,11 +102,11 @@ func wrapUnitCommand(command unitCommand, unit string) string { return string(rawJSON) } -func executeCommandStruct(command privilegeCommand) error { +func executeCommandStruct(ctx context.Context, command privilegeCommand) error { rawJSON, err := json.Marshal(command) if err != nil { return err } privilegeCommandJSON := string(rawJSON) - return executeCommand(privilegeCommandJSON) + return executeCommand(ctx, privilegeCommandJSON) } diff --git a/pkg/installer/service/systemd_test.go b/pkg/installer/service/systemd_test.go index cdb071be1fcfa..9a5200031ca86 100644 --- a/pkg/installer/service/systemd_test.go +++ b/pkg/installer/service/systemd_test.go @@ -8,6 +8,7 @@ package service import ( + "context" _ "embed" "os" "runtime" @@ -17,6 +18,8 @@ import ( "github.com/stretchr/testify/require" ) +var testCtx = context.TODO() + func testSetup(t *testing.T) { assert.Nil(t, BuildHelperForTests(os.TempDir(), os.TempDir(), false)) } @@ -34,7 +37,7 @@ func TestInvalidCommands(t *testing.T) { `{"command":"chown dd-agent", "path":"/"}`: "error: invalid path\n", `{"command":"chown dd-agent", "path":"/opt/datadog-packages/../.."}`: "error: invalid path\n", } { - assert.Equal(t, expected, executeCommand(input).Error()) + assert.Equal(t, expected, executeCommand(testCtx, input).Error()) } } @@ -48,21 +51,21 @@ func TestAssertWorkingCommands(t *testing.T) { successErr := "error: failed to lookup dd-installer user: user: unknown user dd-installer\n" successSystemd := "error: systemd unit path error: stat /lib/systemd/system: no such file or directory\n" - require.Equal(t, successErr, startUnit("datadog-agent").Error()) - assert.Equal(t, successErr, stopUnit("datadog-agent").Error()) - assert.Equal(t, successErr, enableUnit("datadog-agent").Error()) - assert.Equal(t, successErr, disableUnit("datadog-agent").Error()) - assert.Equal(t, successSystemd, loadUnit("datadog-agent").Error()) - assert.Equal(t, successSystemd, removeUnit("datadog-agent").Error()) - assert.Equal(t, successErr, createAgentSymlink().Error()) - assert.Equal(t, successErr, rmAgentSymlink().Error()) - assert.Equal(t, successErr, backupAgentConfig().Error()) - assert.Equal(t, successErr, restoreAgentConfig().Error()) + require.Equal(t, successErr, startUnit(testCtx, "datadog-agent").Error()) + assert.Equal(t, successErr, stopUnit(testCtx, "datadog-agent").Error()) + assert.Equal(t, successErr, enableUnit(testCtx, "datadog-agent").Error()) + assert.Equal(t, successErr, disableUnit(testCtx, "datadog-agent").Error()) + assert.Equal(t, successSystemd, loadUnit(testCtx, "datadog-agent").Error()) + assert.Equal(t, successSystemd, removeUnit(testCtx, "datadog-agent").Error()) + assert.Equal(t, successErr, createAgentSymlink(testCtx).Error()) + assert.Equal(t, successErr, rmAgentSymlink(testCtx).Error()) + assert.Equal(t, successErr, backupAgentConfig(testCtx).Error()) + assert.Equal(t, successErr, restoreAgentConfig(testCtx).Error()) a := &apmInjectorInstaller{ installPath: "/tmp/stable", } - assert.Equal(t, successErr, a.setLDPreloadConfig().Error()) - assert.Equal(t, successErr, a.setAgentConfig().Error()) - assert.Equal(t, successErr, a.setDockerConfig().Error()) + assert.Equal(t, successErr, a.setLDPreloadConfig(testCtx).Error()) + assert.Equal(t, successErr, a.setAgentConfig(testCtx).Error()) + assert.Equal(t, successErr, a.setDockerConfig(testCtx).Error()) } From 71e0920ea673deb56d31f88fc07e4902f8e43022 Mon Sep 17 00:00:00 2001 From: Derek Brown Date: Tue, 16 Apr 2024 00:15:22 -0700 Subject: [PATCH 60/99] [windows][cws] update windows etw logging helpers. (#24711) Many ETW notifications share the same underlying structure; we use a typecast to handle this. However, this would result in the log outputting the type as the first one implemented, rather than the actual type of the event. This change updates the String() method to accurately reflect which actual type is being printed. --- .../probe/probe_kernel_file_windows.go | 38 ++++++++++++++----- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/pkg/security/probe/probe_kernel_file_windows.go b/pkg/security/probe/probe_kernel_file_windows.go index 0b06d315a1095..13ce87b0bb3d8 100644 --- a/pkg/security/probe/probe_kernel_file_windows.go +++ b/pkg/security/probe/probe_kernel_file_windows.go @@ -186,19 +186,25 @@ func (wp *WindowsProbe) parseCreateNewFileArgs(e *etw.DDEventRecord) (*createNew } // nolint: unused -func (ca *createHandleArgs) String() string { +func (ca *createHandleArgs) string(t string) string { var output strings.Builder - output.WriteString(" Create PID: " + strconv.Itoa(int(ca.ProcessID)) + "\n") + output.WriteString(t + " PID: " + strconv.Itoa(int(ca.ProcessID)) + "\n") output.WriteString(" Name: " + ca.fileName + "\n") output.WriteString(" Opts: " + strconv.FormatUint(uint64(ca.createOptions), 16) + " Share: " + strconv.FormatUint(uint64(ca.shareAccess), 16) + "\n") output.WriteString(" OBJ: " + strconv.FormatUint(uint64(ca.fileObject), 16) + "\n") + return output.String() } +// nolint: unused +func (ca *createHandleArgs) String() string { + return ca.string("CREATE") +} + // nolint: unused func (ca *createNewFileArgs) String() string { - return (*createHandleArgs)(ca).String() + return (*createHandleArgs)(ca).string("CREATE_NEW_FILE") } /* @@ -267,16 +273,24 @@ func (wp *WindowsProbe) parseInformationArgs(e *etw.DDEventRecord) (*setInformat } // nolint: unused -func (sia *setInformationArgs) String() string { +func (sia *setInformationArgs) string(t string) string { var output strings.Builder - output.WriteString(" SIA TID: " + strconv.Itoa(int(sia.threadID)) + "\n") + output.WriteString(t + " TID: " + strconv.Itoa(int(sia.threadID)) + "\n") output.WriteString(" Name: " + sia.fileName + "\n") output.WriteString(" InfoClass: " + strconv.FormatUint(uint64(sia.infoClass), 16) + "\n") + output.WriteString(" OBJ: " + strconv.FormatUint(uint64(sia.fileObject), 16) + "\n") + output.WriteString(" KEY: " + strconv.FormatUint(uint64(sia.fileKey), 16) + "\n") + return output.String() } +// nolint: unused +func (sia *setInformationArgs) String() string { + return sia.string("SET_INFORMATION") +} + /*