diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index 8a6869cae5baa..6a6f1914cac0e 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -277,6 +277,7 @@ new-e2e-aml: - deploy_deb_testing-a7_x64 - deploy_windows_testing-a7 - qa_agent + - qa_agent_jmx - qa_dca rules: - !reference [.on_aml_or_e2e_changes] diff --git a/.run/README.md b/.run/README.md new file mode 100644 index 0000000000000..4293536650335 --- /dev/null +++ b/.run/README.md @@ -0,0 +1,3 @@ +# IntelliJ Goland out of the box configuration + +This folder contains scripts and tasks for IntelliJ Goland to build and run the agent and its sub-processes \ No newline at end of file diff --git a/comp/core/tagger/impl-dual/dual.go b/comp/core/tagger/impl-dual/dual.go index 261eeb6a2d942..4f99d713ab4f8 100644 --- a/comp/core/tagger/impl-dual/dual.go +++ b/comp/core/tagger/impl-dual/dual.go @@ -54,7 +54,8 @@ func NewComponent(req Requires) (Provides, error) { return Provides{ local.Provides{ - Comp: provide.Comp, + Comp: provide.Comp, + Endpoint: provide.Endpoint, }, }, nil } diff --git a/comp/core/tagger/impl-remote/remote.go b/comp/core/tagger/impl-remote/remote.go index 111b14fc52895..8bd21920748e9 100644 --- a/comp/core/tagger/impl-remote/remote.go +++ b/comp/core/tagger/impl-remote/remote.go @@ -9,8 +9,10 @@ package remotetaggerimpl import ( "context" "crypto/tls" + "encoding/json" "fmt" "net" + "net/http" "time" "github.com/cenkalti/backoff" @@ -21,6 +23,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + api "github.com/DataDog/datadog-agent/comp/api/api/def" "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common" @@ -35,6 +38,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/util/common" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" + httputils "github.com/DataDog/datadog-agent/pkg/util/http" ) const ( @@ -59,7 +63,8 @@ type Requires struct { type Provides struct { compdef.Out - Comp tagger.Component + Comp tagger.Component + Endpoint api.AgentEndpointProvider } type remoteTagger struct { @@ -112,11 +117,12 @@ func NewComponent(req Requires) (Provides, error) { }}) return Provides{ - Comp: remoteTagger, + Comp: remoteTagger, + Endpoint: api.NewAgentEndpointProvider(remoteTagger.writeList, "/tagger-list", "GET"), }, nil } -func newRemoteTagger(params tagger.RemoteParams, cfg config.Component, log log.Component, telemetryComp coretelemetry.Component) (tagger.Component, error) { +func newRemoteTagger(params tagger.RemoteParams, cfg config.Component, log log.Component, telemetryComp coretelemetry.Component) (*remoteTagger, error) { telemetryStore := telemetry.NewStore(telemetryComp) target, err := params.RemoteTarget(cfg) @@ -494,6 +500,17 @@ func (t *remoteTagger) startTaggerStream(maxElapsed time.Duration) error { }, expBackoff) } +func (t *remoteTagger) writeList(w http.ResponseWriter, _ *http.Request) { + response := t.List() + + jsonTags, err := json.Marshal(response) + if err != nil { + httputils.SetJSONError(w, t.log.Errorf("Unable to marshal tagger list response: %s", err), 500) + return + } + w.Write(jsonTags) +} + func convertEventType(t pb.EventType) (types.EventType, error) { switch t { case pb.EventType_ADDED: diff --git a/comp/core/tagger/impl-remote/remote_test.go b/comp/core/tagger/impl-remote/remote_test.go index 8d29a965b8a23..48c00d8006bf7 100644 --- a/comp/core/tagger/impl-remote/remote_test.go +++ b/comp/core/tagger/impl-remote/remote_test.go @@ -7,10 +7,14 @@ package remotetaggerimpl import ( "context" + "encoding/json" + "net/http" + "net/http/httptest" "os" "runtime" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/DataDog/datadog-agent/comp/core/config" @@ -18,6 +22,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/comp/core/tagger/types" nooptelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl" + compdef "github.com/DataDog/datadog-agent/comp/def" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/grpc" ) @@ -72,3 +77,42 @@ func TestStartDoNotBlockIfServerIsNotAvailable(t *testing.T) { require.NoError(t, err) remoteTagger.Stop() } + +func TestNewComponentSetsTaggerListEndpoint(t *testing.T) { + req := Requires{ + Lc: compdef.NewTestLifecycle(t), + Config: configmock.New(t), + Log: logmock.New(t), + Params: tagger.RemoteParams{ + RemoteTarget: func(config.Component) (string, error) { return ":5001", nil }, + RemoteTokenFetcher: func(config.Component) func() (string, error) { + return func() (string, error) { + return "something", nil + } + }, + }, + Telemetry: nooptelemetry.GetCompatComponent(), + } + provides, err := NewComponent(req) + require.NoError(t, err) + + endpointProvider := provides.Endpoint.Provider + + assert.Equal(t, []string{"GET"}, endpointProvider.Methods()) + assert.Equal(t, "/tagger-list", endpointProvider.Route()) + + // Create a test server with the endpoint handler + server := httptest.NewServer(endpointProvider.HandlerFunc()) + defer server.Close() + + // Make a request to the endpoint + resp, err := http.Get(server.URL + "/tagger-list") + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, http.StatusOK, resp.StatusCode) + + var response types.TaggerListResponse + err = json.NewDecoder(resp.Body).Decode(&response) + require.NoError(t, err) + assert.NotNil(t, response.Entities) +} diff --git a/comp/otelcol/converter/impl/autoconfigure.go b/comp/otelcol/converter/impl/autoconfigure.go index af0eb118c008c..b2c5a287c51bc 100644 --- a/comp/otelcol/converter/impl/autoconfigure.go +++ b/comp/otelcol/converter/impl/autoconfigure.go @@ -120,6 +120,9 @@ func addComponentToPipeline(conf *confmap.Conf, comp component, pipelineName str // For example, if api key is not found in otel config, it can be retrieved from core // agent config instead. func addCoreAgentConfig(conf *confmap.Conf, coreCfg config.Component) { + if coreCfg == nil { + return + } stringMapConf := conf.ToStringMap() exporters, ok := stringMapConf["exporters"] if !ok { @@ -129,43 +132,61 @@ func addCoreAgentConfig(conf *confmap.Conf, coreCfg config.Component) { if !ok { return } - datadog, ok := exporterMap["datadog"] - if !ok { + reg, err := regexp.Compile(secretRegex) + if err != nil { return } - datadogMap, ok := datadog.(map[string]any) - if !ok { - return - } - api, ok := datadogMap["api"] - if !ok { - return - } - apiMap, ok := api.(map[string]any) - if !ok { - return - } - - apiKey, ok := apiMap["key"] - if ok { - key, ok := apiKey.(string) - if ok && key != "" { - match, _ := regexp.MatchString(secretRegex, apiKey.(string)) - if !match { + for exporter := range exporterMap { + if componentName(exporter) == "datadog" { + datadog, ok := exporterMap[exporter] + if !ok { return } + datadogMap, ok := datadog.(map[string]any) + if !ok { + // datadog section is there, but there is nothing in it. We + // need to add it so we can add to it. + exporterMap[exporter] = make(map[string]any) + datadogMap = exporterMap[exporter].(map[string]any) + } + api, ok := datadogMap["api"] + // ok can be true if api section is there but contains nothing (api == nil). + // In which case, we need to add it so we can add to it. + if !ok || api == nil { + datadogMap["api"] = make(map[string]any, 2) + api = datadogMap["api"] + } + apiMap, ok := api.(map[string]any) + if !ok { + return + } + + // api::site + apiSite := apiMap["site"] + if (apiSite == nil || apiSite == "") && coreCfg.Get("site") != nil { + apiMap["site"] = coreCfg.Get("site") + } + + // api::key + var match bool + apiKey, ok := apiMap["key"] + if ok { + var key string + if keyString, okString := apiKey.(string); okString { + key = keyString + } + if ok && key != "" { + match = reg.Match([]byte(key)) + if !match { + continue + } + } + } + // TODO: add logic to either fail or log message if api key not found + if (apiKey == nil || apiKey == "" || match) && coreCfg.Get("api_key") != nil { + apiMap["key"] = coreCfg.Get("api_key") + } } } - // this is the only reference to Requires.Conf - // TODO: add logic to either fail or log message if api key not found - if coreCfg != nil { - apiMap["key"] = coreCfg.Get("api_key") - - apiSite, ok := apiMap["site"] - if ok && apiSite == "" { - apiMap["site"] = coreCfg.Get("site") - } - } - *conf = *confmap.NewFromStringMap(stringMapConf) } diff --git a/comp/otelcol/converter/impl/converter_test.go b/comp/otelcol/converter/impl/converter_test.go index b742e066059fe..8ef0926120ae6 100644 --- a/comp/otelcol/converter/impl/converter_test.go +++ b/comp/otelcol/converter/impl/converter_test.go @@ -7,10 +7,13 @@ package converterimpl import ( "context" + "os" "path/filepath" "testing" + "github.com/DataDog/datadog-agent/comp/core/config" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/confmap/provider/envprovider" "go.opentelemetry.io/collector/confmap/provider/fileprovider" @@ -48,6 +51,7 @@ func TestConvert(t *testing.T) { name string provided string expectedResult string + agentConfig string }{ { name: "connectors/no-dd-connector", @@ -109,6 +113,16 @@ func TestConvert(t *testing.T) { provided: "receivers/no-changes/config.yaml", expectedResult: "receivers/no-changes/config.yaml", }, + { + name: "receivers/no-changes-multiple-dd", + provided: "receivers/no-changes-multiple-dd/config.yaml", + expectedResult: "receivers/no-changes-multiple-dd/config.yaml", + }, + { + name: "receivers/no-changes-multiple-dd-same-pipeline", + provided: "receivers/no-changes-multiple-dd-same-pipeline/config.yaml", + expectedResult: "receivers/no-changes-multiple-dd-same-pipeline/config.yaml", + }, { name: "receivers/no-prometheus-receiver", provided: "receivers/no-prometheus-receiver/config.yaml", @@ -144,11 +158,114 @@ func TestConvert(t *testing.T) { provided: "processors/dd-connector-multi-pipelines/config.yaml", expectedResult: "processors/dd-connector-multi-pipelines/config-result.yaml", }, + { + name: "dd-core-cfg/apikey/empty-string", + provided: "dd-core-cfg/apikey/empty-string/config.yaml", + expectedResult: "dd-core-cfg/apikey/empty-string/config-result.yaml", + agentConfig: "dd-core-cfg/apikey/empty-string/acfg.yaml", + }, + { + name: "dd-core-cfg/apikey/unset", + provided: "dd-core-cfg/apikey/unset/config.yaml", + expectedResult: "dd-core-cfg/apikey/unset/config-result.yaml", + agentConfig: "dd-core-cfg/apikey/unset/acfg.yaml", + }, + { + name: "dd-core-cfg/apikey/secret", + provided: "dd-core-cfg/apikey/secret/config.yaml", + expectedResult: "dd-core-cfg/apikey/secret/config-result.yaml", + agentConfig: "dd-core-cfg/apikey/secret/acfg.yaml", + }, + { + name: "dd-core-cfg/apikey/api-set-no-key", + provided: "dd-core-cfg/apikey/api-set-no-key/config.yaml", + expectedResult: "dd-core-cfg/apikey/api-set-no-key/config-result.yaml", + agentConfig: "dd-core-cfg/apikey/api-set-no-key/acfg.yaml", + }, + { + name: "dd-core-cfg/apikey/no-api-key-section", + provided: "dd-core-cfg/apikey/no-api-key-section/config.yaml", + expectedResult: "dd-core-cfg/apikey/no-api-key-section/config-result.yaml", + agentConfig: "dd-core-cfg/apikey/no-api-key-section/acfg.yaml", + }, + { + name: "dd-core-cfg/apikey/multiple-dd-exporter", + provided: "dd-core-cfg/apikey/multiple-dd-exporter/config.yaml", + expectedResult: "dd-core-cfg/apikey/multiple-dd-exporter/config-result.yaml", + agentConfig: "dd-core-cfg/apikey/multiple-dd-exporter/acfg.yaml", + }, + { + name: "dd-core-cfg/site/empty-string", + provided: "dd-core-cfg/site/empty-string/config.yaml", + expectedResult: "dd-core-cfg/site/empty-string/config-result.yaml", + agentConfig: "dd-core-cfg/site/empty-string/acfg.yaml", + }, + { + name: "dd-core-cfg/site/multiple-dd-exporter", + provided: "dd-core-cfg/site/multiple-dd-exporter/config.yaml", + expectedResult: "dd-core-cfg/site/multiple-dd-exporter/config-result.yaml", + agentConfig: "dd-core-cfg/site/multiple-dd-exporter/acfg.yaml", + }, + { + name: "dd-core-cfg/site/no-api-site-section", + provided: "dd-core-cfg/site/no-api-site-section/config.yaml", + expectedResult: "dd-core-cfg/site/no-api-site-section/config-result.yaml", + agentConfig: "dd-core-cfg/site/no-api-site-section/acfg.yaml", + }, + { + name: "dd-core-cfg/site/unset", + provided: "dd-core-cfg/site/unset/config.yaml", + expectedResult: "dd-core-cfg/site/unset/config-result.yaml", + agentConfig: "dd-core-cfg/site/unset/acfg.yaml", + }, + { + name: "dd-core-cfg/site/api-set-no-site", + provided: "dd-core-cfg/site/api-set-no-site/config.yaml", + expectedResult: "dd-core-cfg/site/api-set-no-site/config-result.yaml", + agentConfig: "dd-core-cfg/site/api-set-no-site/acfg.yaml", + }, + { + name: "dd-core-cfg/all/no-overrides", + provided: "dd-core-cfg/all/no-overrides/config.yaml", + expectedResult: "dd-core-cfg/all/no-overrides/config.yaml", + agentConfig: "dd-core-cfg/all/no-overrides/acfg.yaml", + }, + { + name: "dd-core-cfg/all/api-section", + provided: "dd-core-cfg/all/api-section/config.yaml", + expectedResult: "dd-core-cfg/all/api-section/config-result.yaml", + agentConfig: "dd-core-cfg/all/api-section/acfg.yaml", + }, + { + name: "dd-core-cfg/all/key-site-section", + provided: "dd-core-cfg/all/key-site-section/config.yaml", + expectedResult: "dd-core-cfg/all/key-site-section/config-result.yaml", + agentConfig: "dd-core-cfg/all/key-site-section/acfg.yaml", + }, + { + name: "dd-core-cfg/all/no-api-section", + provided: "dd-core-cfg/all/no-api-section/config.yaml", + expectedResult: "dd-core-cfg/all/no-api-section/config-result.yaml", + agentConfig: "dd-core-cfg/all/no-api-section/acfg.yaml", + }, + { + name: "dd-core-cfg/none", + provided: "dd-core-cfg/none/config.yaml", + expectedResult: "dd-core-cfg/none/config-result.yaml", + agentConfig: "dd-core-cfg/none/acfg.yaml", + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - converter, err := NewConverterForAgent(Requires{}) + r := Requires{} + if tc.agentConfig != "" { + f, err := os.ReadFile(uriFromFile(tc.agentConfig)[0]) + require.NoError(t, err) + acfg := config.NewMockFromYAML(t, string(f)) + r.Conf = acfg + } + converter, err := NewConverterForAgent(r) assert.NoError(t, err) resolver, err := newResolver(uriFromFile(tc.provided)) @@ -166,9 +283,13 @@ func TestConvert(t *testing.T) { assert.Equal(t, confResult.ToStringMap(), conf.ToStringMap()) }) } + // test using newConverter function to simulate ocb environment nopLogger := zap.NewNop() for _, tc := range tests { + if tc.agentConfig != "" { + continue + } t.Run(tc.name, func(t *testing.T) { converter := newConverter(confmap.ConverterSettings{Logger: nopLogger}) diff --git a/comp/otelcol/converter/impl/prometheus.go b/comp/otelcol/converter/impl/prometheus.go index a26b0d7e5342a..9e3dc9e489ff8 100644 --- a/comp/otelcol/converter/impl/prometheus.go +++ b/comp/otelcol/converter/impl/prometheus.go @@ -116,9 +116,11 @@ func addPrometheusReceiver(conf *confmap.Conf, comp component) { return } if targetString == internalMetricsAddress { - if ddExporter := receiverInPipelineWithDatadogExporter(conf, receiver); ddExporter != "" { + if ddExporters := receiverInPipelineWithDatadogExporter(conf, receiver); ddExporters != nil { scrapeConfigMap["job_name"] = "datadog-agent" - delete(datadogExportersMap, ddExporter) + for _, ddExporter := range ddExporters { + delete(datadogExportersMap, ddExporter) + } } } } @@ -186,28 +188,29 @@ func addPrometheusReceiver(conf *confmap.Conf, comp component) { } } -func receiverInPipelineWithDatadogExporter(conf *confmap.Conf, receiverName string) string { +func receiverInPipelineWithDatadogExporter(conf *confmap.Conf, receiverName string) []string { + var ddExporters []string stringMapConf := conf.ToStringMap() service, ok := stringMapConf["service"] if !ok { - return "" + return nil } serviceMap, ok := service.(map[string]any) if !ok { - return "" + return nil } pipelines, ok := serviceMap["pipelines"] if !ok { - return "" + return nil } pipelinesMap, ok := pipelines.(map[string]any) if !ok { - return "" + return nil } for _, components := range pipelinesMap { componentsMap, ok := components.(map[string]any) if !ok { - return "" + return nil } exporters, ok := componentsMap["exporters"] if !ok { @@ -215,7 +218,7 @@ func receiverInPipelineWithDatadogExporter(conf *confmap.Conf, receiverName stri } exportersSlice, ok := exporters.([]any) if !ok { - return "" + return nil } for _, exporter := range exportersSlice { if exporterString, ok := exporter.(string); ok { @@ -227,26 +230,23 @@ func receiverInPipelineWithDatadogExporter(conf *confmap.Conf, receiverName stri } receiverSlice, ok := receivers.([]any) if !ok { - return "" + return nil } for _, receiver := range receiverSlice { receiverString, ok := receiver.(string) if !ok { - return "" + return nil } if receiverString == receiverName { - return exporterString + ddExporters = append(ddExporters, exporterString) } - } - } } } } - - return "" + return ddExporters } func getDatadogExporters(conf *confmap.Conf) map[string]any { diff --git a/comp/otelcol/converter/impl/testdata/connectors/already-set/config.yaml b/comp/otelcol/converter/impl/testdata/connectors/already-set/config.yaml index 143029b8cef5b..4c06c79247850 100644 --- a/comp/otelcol/converter/impl/testdata/connectors/already-set/config.yaml +++ b/comp/otelcol/converter/impl/testdata/connectors/already-set/config.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/connectors/no-dd-connector/config.yaml b/comp/otelcol/converter/impl/testdata/connectors/no-dd-connector/config.yaml index 2a72d6336d491..dccb5d98e85f9 100644 --- a/comp/otelcol/converter/impl/testdata/connectors/no-dd-connector/config.yaml +++ b/comp/otelcol/converter/impl/testdata/connectors/no-dd-connector/config.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/connectors/set-default/config-result.yaml b/comp/otelcol/converter/impl/testdata/connectors/set-default/config-result.yaml index 9c87aac4819d8..4d11433766dcb 100644 --- a/comp/otelcol/converter/impl/testdata/connectors/set-default/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/connectors/set-default/config-result.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/connectors/set-default/config.yaml b/comp/otelcol/converter/impl/testdata/connectors/set-default/config.yaml index 61e1a2a871c6a..45994878a3ee1 100644 --- a/comp/otelcol/converter/impl/testdata/connectors/set-default/config.yaml +++ b/comp/otelcol/converter/impl/testdata/connectors/set-default/config.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/api-section/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/api-section/acfg.yaml new file mode 100644 index 0000000000000..d5a8a57302902 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/api-section/acfg.yaml @@ -0,0 +1,2 @@ +api_key: ggggg77777 +site: datadoghq.eu \ No newline at end of file diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/api-section/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/api-section/config-result.yaml new file mode 100644 index 0000000000000..45c95534aae74 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/api-section/config-result.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + site: datadoghq.eu + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/api-section/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/api-section/config.yaml new file mode 100644 index 0000000000000..58a150f658497 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/api-section/config.yaml @@ -0,0 +1,39 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/key-site-section/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/key-site-section/acfg.yaml new file mode 100644 index 0000000000000..d5a8a57302902 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/key-site-section/acfg.yaml @@ -0,0 +1,2 @@ +api_key: ggggg77777 +site: datadoghq.eu \ No newline at end of file diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/key-site-section/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/key-site-section/config-result.yaml new file mode 100644 index 0000000000000..45c95534aae74 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/key-site-section/config-result.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + site: datadoghq.eu + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/key-site-section/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/key-site-section/config.yaml new file mode 100644 index 0000000000000..f9d9173c1238a --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/key-site-section/config.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: + site: + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-api-section/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-api-section/acfg.yaml new file mode 100644 index 0000000000000..d5a8a57302902 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-api-section/acfg.yaml @@ -0,0 +1,2 @@ +api_key: ggggg77777 +site: datadoghq.eu \ No newline at end of file diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-api-section/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-api-section/config-result.yaml new file mode 100644 index 0000000000000..45c95534aae74 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-api-section/config-result.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + site: datadoghq.eu + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-api-section/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-api-section/config.yaml new file mode 100644 index 0000000000000..c8919b426c09a --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-api-section/config.yaml @@ -0,0 +1,38 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-overrides/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-overrides/acfg.yaml new file mode 100644 index 0000000000000..d5a8a57302902 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-overrides/acfg.yaml @@ -0,0 +1,2 @@ +api_key: ggggg77777 +site: datadoghq.eu \ No newline at end of file diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-overrides/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-overrides/config.yaml new file mode 100644 index 0000000000000..efe926f1a8046 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/all/no-overrides/config.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: abc123 + site: us1.datadoghq.com + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/api-set-no-key/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/api-set-no-key/acfg.yaml new file mode 100644 index 0000000000000..bd45eb60c4a4d --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/api-set-no-key/acfg.yaml @@ -0,0 +1,2 @@ +api_key: ggggg77777 +site: datadoghq.eu diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/api-set-no-key/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/api-set-no-key/config-result.yaml new file mode 100644 index 0000000000000..959a117fc8eef --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/api-set-no-key/config-result.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + site: datadoghq.eu + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/api-set-no-key/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/api-set-no-key/config.yaml new file mode 100644 index 0000000000000..63923dcf01ede --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/api-set-no-key/config.yaml @@ -0,0 +1,40 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + site: datadoghq.eu + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/acfg.yaml new file mode 100644 index 0000000000000..9bf23f6b554ed --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/acfg.yaml @@ -0,0 +1 @@ +api_key: ggggg77777 diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config-result.yaml new file mode 100644 index 0000000000000..0f4753e0690e5 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config-result.yaml @@ -0,0 +1,40 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config.yaml new file mode 100644 index 0000000000000..f31a6089fe240 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config.yaml @@ -0,0 +1,40 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: "" + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/acfg.yaml new file mode 100644 index 0000000000000..9bf23f6b554ed --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/acfg.yaml @@ -0,0 +1 @@ +api_key: ggggg77777 diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config-result.yaml new file mode 100644 index 0000000000000..38413568a0848 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config-result.yaml @@ -0,0 +1,45 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + datadog/2: + api: + key: ggggg77777 +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics/1: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog/2] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config.yaml new file mode 100644 index 0000000000000..e1a8d16c5ada3 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config.yaml @@ -0,0 +1,47 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: + datadog/2: + api: + key: + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics/1: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog/2] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/acfg.yaml new file mode 100644 index 0000000000000..9bf23f6b554ed --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/acfg.yaml @@ -0,0 +1 @@ +api_key: ggggg77777 diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/config-result.yaml new file mode 100644 index 0000000000000..0f4753e0690e5 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/config-result.yaml @@ -0,0 +1,40 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/config.yaml new file mode 100644 index 0000000000000..c8919b426c09a --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/config.yaml @@ -0,0 +1,38 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/acfg.yaml new file mode 100644 index 0000000000000..9bf23f6b554ed --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/acfg.yaml @@ -0,0 +1 @@ +api_key: ggggg77777 diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config-result.yaml new file mode 100644 index 0000000000000..0f4753e0690e5 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config-result.yaml @@ -0,0 +1,40 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config.yaml new file mode 100644 index 0000000000000..7d51de23ea767 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config.yaml @@ -0,0 +1,40 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: "ENC[my-secret]" + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/acfg.yaml new file mode 100644 index 0000000000000..9bf23f6b554ed --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/acfg.yaml @@ -0,0 +1 @@ +api_key: ggggg77777 diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/config-result.yaml new file mode 100644 index 0000000000000..0f4753e0690e5 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/config-result.yaml @@ -0,0 +1,40 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/config.yaml new file mode 100644 index 0000000000000..b80558a7bc040 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/config.yaml @@ -0,0 +1,39 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/none/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/none/acfg.yaml new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/none/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/none/config-result.yaml new file mode 100644 index 0000000000000..f9d9173c1238a --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/none/config-result.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: + site: + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/none/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/none/config.yaml new file mode 100644 index 0000000000000..e293d7ca773fe --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/none/config.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: + site: + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/api-set-no-site/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/api-set-no-site/acfg.yaml new file mode 100644 index 0000000000000..9e0316073c26b --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/api-set-no-site/acfg.yaml @@ -0,0 +1 @@ +site: datadoghq.eu diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/api-set-no-site/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/api-set-no-site/config-result.yaml new file mode 100644 index 0000000000000..959a117fc8eef --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/api-set-no-site/config-result.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + site: datadoghq.eu + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/api-set-no-site/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/api-set-no-site/config.yaml new file mode 100644 index 0000000000000..0f4753e0690e5 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/api-set-no-site/config.yaml @@ -0,0 +1,40 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/empty-string/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/empty-string/acfg.yaml new file mode 100644 index 0000000000000..bd45eb60c4a4d --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/empty-string/acfg.yaml @@ -0,0 +1,2 @@ +api_key: ggggg77777 +site: datadoghq.eu diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/empty-string/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/empty-string/config-result.yaml new file mode 100644 index 0000000000000..959a117fc8eef --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/empty-string/config-result.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + site: datadoghq.eu + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/empty-string/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/empty-string/config.yaml new file mode 100644 index 0000000000000..1578ae633dbe1 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/empty-string/config.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + site: "" + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/multiple-dd-exporter/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/multiple-dd-exporter/acfg.yaml new file mode 100644 index 0000000000000..d5a8a57302902 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/multiple-dd-exporter/acfg.yaml @@ -0,0 +1,2 @@ +api_key: ggggg77777 +site: datadoghq.eu \ No newline at end of file diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/multiple-dd-exporter/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/multiple-dd-exporter/config-result.yaml new file mode 100644 index 0000000000000..5018db5b7b3b2 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/multiple-dd-exporter/config-result.yaml @@ -0,0 +1,47 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: abcde12345 + site: datadoghq.eu + datadog/2: + api: + key: abcde12345 + site: datadoghq.eu +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics/1: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog/2] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/multiple-dd-exporter/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/multiple-dd-exporter/config.yaml new file mode 100644 index 0000000000000..7577e98dfdbfd --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/multiple-dd-exporter/config.yaml @@ -0,0 +1,47 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: abcde12345 + datadog/2: + api: + key: abcde12345 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics/1: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog/2] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/no-api-site-section/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/no-api-site-section/acfg.yaml new file mode 100644 index 0000000000000..bd45eb60c4a4d --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/no-api-site-section/acfg.yaml @@ -0,0 +1,2 @@ +api_key: ggggg77777 +site: datadoghq.eu diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/no-api-site-section/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/no-api-site-section/config-result.yaml new file mode 100644 index 0000000000000..959a117fc8eef --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/no-api-site-section/config-result.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + site: datadoghq.eu + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/no-api-site-section/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/no-api-site-section/config.yaml new file mode 100644 index 0000000000000..6c633ac386d33 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/no-api-site-section/config.yaml @@ -0,0 +1,40 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: ggggg77777 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset/acfg.yaml new file mode 100644 index 0000000000000..d5a8a57302902 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset/acfg.yaml @@ -0,0 +1,2 @@ +api_key: ggggg77777 +site: datadoghq.eu \ No newline at end of file diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset/config-result.yaml new file mode 100644 index 0000000000000..c2bacbcf1533d --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset/config-result.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: abcde12345 + site: datadoghq.eu + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset/config.yaml new file mode 100644 index 0000000000000..99ee6d6f9faa3 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset/config.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: abcde12345 + site: + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd/config-result.yaml index 9f02c5c92a25c..c311a477af6e1 100644 --- a/comp/otelcol/converter/impl/testdata/dd/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/dd/config-result.yaml @@ -7,7 +7,7 @@ processors: exporters: datadog: api: - key: 12345 + key: abcde12345 connectors: nop/connector: diff --git a/comp/otelcol/converter/impl/testdata/dd/config.yaml b/comp/otelcol/converter/impl/testdata/dd/config.yaml index 9f02c5c92a25c..c311a477af6e1 100644 --- a/comp/otelcol/converter/impl/testdata/dd/config.yaml +++ b/comp/otelcol/converter/impl/testdata/dd/config.yaml @@ -7,7 +7,7 @@ processors: exporters: datadog: api: - key: 12345 + key: abcde12345 connectors: nop/connector: diff --git a/comp/otelcol/converter/impl/testdata/processors/no-changes/config.yaml b/comp/otelcol/converter/impl/testdata/processors/no-changes/config.yaml index 756f84e487050..2ecc50630bc2a 100644 --- a/comp/otelcol/converter/impl/testdata/processors/no-changes/config.yaml +++ b/comp/otelcol/converter/impl/testdata/processors/no-changes/config.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/processors/no-processor-partial/config-result.yaml b/comp/otelcol/converter/impl/testdata/processors/no-processor-partial/config-result.yaml index 697c77659af90..3c350b584f7a9 100644 --- a/comp/otelcol/converter/impl/testdata/processors/no-processor-partial/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/processors/no-processor-partial/config-result.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 processors: k8sattributes: diff --git a/comp/otelcol/converter/impl/testdata/processors/no-processor-partial/config.yaml b/comp/otelcol/converter/impl/testdata/processors/no-processor-partial/config.yaml index 4e82649cac147..513076b06b32a 100644 --- a/comp/otelcol/converter/impl/testdata/processors/no-processor-partial/config.yaml +++ b/comp/otelcol/converter/impl/testdata/processors/no-processor-partial/config.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 processors: k8sattributes: diff --git a/comp/otelcol/converter/impl/testdata/processors/no-processors/config-result.yaml b/comp/otelcol/converter/impl/testdata/processors/no-processors/config-result.yaml index 6da189e4a2256..fa3868f86a597 100644 --- a/comp/otelcol/converter/impl/testdata/processors/no-processors/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/processors/no-processors/config-result.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/processors/no-processors/config.yaml b/comp/otelcol/converter/impl/testdata/processors/no-processors/config.yaml index e024121c05626..18c8bde47c533 100644 --- a/comp/otelcol/converter/impl/testdata/processors/no-processors/config.yaml +++ b/comp/otelcol/converter/impl/testdata/processors/no-processors/config.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/processors/other-processors/config-result.yaml b/comp/otelcol/converter/impl/testdata/processors/other-processors/config-result.yaml index e84cc09f734ce..bcb0096299ea8 100644 --- a/comp/otelcol/converter/impl/testdata/processors/other-processors/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/processors/other-processors/config-result.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/processors/other-processors/config.yaml b/comp/otelcol/converter/impl/testdata/processors/other-processors/config.yaml index de77e594c7f8f..f76d6b0e3a4a2 100644 --- a/comp/otelcol/converter/impl/testdata/processors/other-processors/config.yaml +++ b/comp/otelcol/converter/impl/testdata/processors/other-processors/config.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 processors: k8sattributes: diff --git a/comp/otelcol/converter/impl/testdata/receivers/job-name-change/config-result.yaml b/comp/otelcol/converter/impl/testdata/receivers/job-name-change/config-result.yaml index 3fff816644e2d..c2f8aa89006de 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/job-name-change/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/job-name-change/config-result.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/receivers/job-name-change/config.yaml b/comp/otelcol/converter/impl/testdata/receivers/job-name-change/config.yaml index 47e9b4f58463c..2dcc8524c58cf 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/job-name-change/config.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/job-name-change/config.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/receivers/multi-dd-partial-prom/config-result.yaml b/comp/otelcol/converter/impl/testdata/receivers/multi-dd-partial-prom/config-result.yaml index baf1856351220..e4434fd09f952 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/multi-dd-partial-prom/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/multi-dd-partial-prom/config-result.yaml @@ -18,13 +18,13 @@ receivers: exporters: datadog/1: api: - key: 12345 + key: abcde12345 datadog/2: api: - key: 12345 + key: abcde12345 datadog/3: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/receivers/multi-dd-partial-prom/config.yaml b/comp/otelcol/converter/impl/testdata/receivers/multi-dd-partial-prom/config.yaml index 51b90562c7f44..a4e8c166d74d8 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/multi-dd-partial-prom/config.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/multi-dd-partial-prom/config.yaml @@ -11,13 +11,13 @@ receivers: exporters: datadog/1: api: - key: 12345 + key: abcde12345 datadog/2: api: - key: 12345 + key: abcde12345 datadog/3: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/receivers/no-changes-multiple-dd-same-pipeline/config.yaml b/comp/otelcol/converter/impl/testdata/receivers/no-changes-multiple-dd-same-pipeline/config.yaml new file mode 100644 index 0000000000000..c17a3fdbd4482 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/receivers/no-changes-multiple-dd-same-pipeline/config.yaml @@ -0,0 +1,43 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +exporters: + datadog: + api: + key: abcde12345 + datadog/2: + api: + key: abcde12345 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +processors: + infraattributes/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [nop] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [nop, prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog, datadog/2] + logs: + receivers: [nop] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/receivers/no-changes-multiple-dd/config.yaml b/comp/otelcol/converter/impl/testdata/receivers/no-changes-multiple-dd/config.yaml new file mode 100644 index 0000000000000..8f48837890504 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/receivers/no-changes-multiple-dd/config.yaml @@ -0,0 +1,47 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +exporters: + datadog: + api: + key: abcde12345 + datadog/2: + api: + key: abcde12345 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +processors: + infraattributes/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [nop] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [nop, prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics/2: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog/2] + logs: + receivers: [nop] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/receivers/no-changes/config.yaml b/comp/otelcol/converter/impl/testdata/receivers/no-changes/config.yaml index 3d814075872f2..3625ed308e769 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/no-changes/config.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/no-changes/config.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/receivers/no-prom-multi-dd/config-result.yaml b/comp/otelcol/converter/impl/testdata/receivers/no-prom-multi-dd/config-result.yaml index a31a1402f3500..da86cf2366964 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/no-prom-multi-dd/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/no-prom-multi-dd/config-result.yaml @@ -11,10 +11,10 @@ receivers: exporters: datadog/1: api: - key: 12345 + key: abcde12345 datadog/2: api: - key: 6789 + key: fghi6789 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/receivers/no-prom-multi-dd/config.yaml b/comp/otelcol/converter/impl/testdata/receivers/no-prom-multi-dd/config.yaml index d0a052ac1e521..c454b7930f2b5 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/no-prom-multi-dd/config.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/no-prom-multi-dd/config.yaml @@ -4,10 +4,10 @@ receivers: exporters: datadog/1: api: - key: 12345 + key: abcde12345 datadog/2: api: - key: 6789 + key: fghi6789 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/receivers/no-prom-not-default-addr/config-result.yaml b/comp/otelcol/converter/impl/testdata/receivers/no-prom-not-default-addr/config-result.yaml index 7a6cd6cf2558d..f0c0de3c28d35 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/no-prom-not-default-addr/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/no-prom-not-default-addr/config-result.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/receivers/no-prom-not-default-addr/config.yaml b/comp/otelcol/converter/impl/testdata/receivers/no-prom-not-default-addr/config.yaml index 9da2bedafb8bc..ad61bd222f51b 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/no-prom-not-default-addr/config.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/no-prom-not-default-addr/config.yaml @@ -4,7 +4,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/receivers/no-prometheus-receiver/config-result.yaml b/comp/otelcol/converter/impl/testdata/receivers/no-prometheus-receiver/config-result.yaml index 203185d2272ff..67f8f6463ff1a 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/no-prometheus-receiver/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/no-prometheus-receiver/config-result.yaml @@ -11,7 +11,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/receivers/no-prometheus-receiver/config.yaml b/comp/otelcol/converter/impl/testdata/receivers/no-prometheus-receiver/config.yaml index 07452dca0f605..e5b635da1cb3a 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/no-prometheus-receiver/config.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/no-prometheus-receiver/config.yaml @@ -4,7 +4,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/receivers/no-receivers-defined/config-result.yaml b/comp/otelcol/converter/impl/testdata/receivers/no-receivers-defined/config-result.yaml index 39e6cb511c45f..6e57ae36b8dda 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/no-receivers-defined/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/no-receivers-defined/config-result.yaml @@ -10,7 +10,7 @@ receivers: exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/receivers/no-receivers-defined/config.yaml b/comp/otelcol/converter/impl/testdata/receivers/no-receivers-defined/config.yaml index 9a1ec2e6c0fa9..cf5430beb88cd 100644 --- a/comp/otelcol/converter/impl/testdata/receivers/no-receivers-defined/config.yaml +++ b/comp/otelcol/converter/impl/testdata/receivers/no-receivers-defined/config.yaml @@ -1,7 +1,7 @@ exporters: datadog: api: - key: 12345 + key: abcde12345 extensions: pprof/user-defined: diff --git a/pkg/collector/corechecks/oracle/oracle_integration_test.go b/pkg/collector/corechecks/oracle/oracle_integration_test.go index a94e716c5df91..c0df585ca2065 100644 --- a/pkg/collector/corechecks/oracle/oracle_integration_test.go +++ b/pkg/collector/corechecks/oracle/oracle_integration_test.go @@ -216,7 +216,7 @@ func TestChkRun(t *testing.T) { diff1 := (pgaAfter1StRun - pgaBefore) / 1024 var extremePGAUsage float64 if isDbVersionGreaterOrEqualThan(&c, "12.2") { - extremePGAUsage = 1024 + extremePGAUsage = 2048 } else { extremePGAUsage = 8192 } diff --git a/pkg/gpu/probe_test.go b/pkg/gpu/probe_test.go index 9739fc0eccd51..9b47b39998ab4 100644 --- a/pkg/gpu/probe_test.go +++ b/pkg/gpu/probe_test.go @@ -11,18 +11,15 @@ import ( "testing" "time" - "golang.org/x/exp/maps" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "golang.org/x/exp/maps" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/gpu/model" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" consumerstestutil "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers/testutil" "github.com/DataDog/datadog-agent/pkg/gpu/config" "github.com/DataDog/datadog-agent/pkg/gpu/testutil" - "github.com/DataDog/datadog-agent/pkg/network/usm/utils" - "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" ) type probeTestSuite struct { @@ -72,9 +69,8 @@ func (s *probeTestSuite) TestCanReceiveEvents() { t := s.T() probe := s.getProbe() - cmd := testutil.RunSample(t, testutil.CudaSample) - - utils.WaitForProgramsToBeTraced(t, gpuModuleName, gpuAttacherName, cmd.Process.Pid, utils.ManualTracingFallbackDisabled) + cmd, err := testutil.RunSample(t, testutil.CudaSample) + require.NoError(t, err) var handlerStream, handlerGlobal *StreamHandler require.Eventually(t, func() bool { @@ -89,7 +85,7 @@ func (s *probeTestSuite) TestCanReceiveEvents() { } return handlerStream != nil && handlerGlobal != nil && len(handlerStream.kernelSpans) > 0 && len(handlerGlobal.allocations) > 0 - }, 10*time.Second, 500*time.Millisecond, "stream and global handlers not found: existing is %v", probe.consumer.streamHandlers) + }, 3*time.Second, 100*time.Millisecond, "stream and global handlers not found: existing is %v", probe.consumer.streamHandlers) // Check device assignments require.Contains(t, probe.consumer.sysCtx.selectedDeviceByPIDAndTID, cmd.Process.Pid) @@ -115,15 +111,14 @@ func (s *probeTestSuite) TestCanGenerateStats() { probe := s.getProbe() - cmd := testutil.RunSample(t, testutil.CudaSample) - - utils.WaitForProgramsToBeTraced(t, gpuModuleName, gpuAttacherName, cmd.Process.Pid, utils.ManualTracingFallbackDisabled) + cmd, err := testutil.RunSample(t, testutil.CudaSample) + require.NoError(t, err) - // Wait until the process finishes and we can get the stats. Run this instead of waiting for the process to finish - // so that we can time out correctly + //TODO: change this check to count telemetry counter of the consumer (once added). + // we are expecting 2 different streamhandlers because cudasample generates 3 events in total for 2 different streams (stream 0 and stream 30) require.Eventually(t, func() bool { - return !utils.IsProgramTraced(gpuModuleName, gpuAttacherName, cmd.Process.Pid) - }, 20*time.Second, 500*time.Millisecond, "process not stopped") + return len(probe.consumer.streamHandlers) == 2 + }, 3*time.Second, 100*time.Millisecond, "stream handlers count mismatch: expected: 2, got: %d", len(probe.consumer.streamHandlers)) stats, err := probe.GetAndFlush() require.NoError(t, err) @@ -145,21 +140,20 @@ func (s *probeTestSuite) TestMultiGPUSupport() { sampleArgs := testutil.SampleArgs{ StartWaitTimeSec: 6, // default wait time for WaitForProgramsToBeTraced is 5 seconds, give margin to attach manually to avoid flakes - EndWaitTimeSec: 1, // We need the process to stay active a bit so we can inspect its environment variables, if it ends too quickly we get no information CudaVisibleDevicesEnv: "1,2", SelectedDevice: 1, } // Visible devices 1,2 -> selects 1 in that array -> global device index = 2 selectedGPU := testutil.GPUUUIDs[2] - cmd := testutil.RunSampleWithArgs(t, testutil.CudaSample, sampleArgs) - utils.WaitForProgramsToBeTraced(t, gpuModuleName, gpuAttacherName, cmd.Process.Pid, utils.ManualTracingFallbackEnabled) + cmd, err := testutil.RunSampleWithArgs(t, testutil.CudaSample, sampleArgs) + require.NoError(t, err) - // Wait until the process finishes and we can get the stats. Run this instead of waiting for the process to finish - // so that we can time out correctly + //TODO: change this check to count telemetry counter of the consumer (once added). + // we are expecting 2 different streamhandlers because cudasample generates 3 events in total for 2 different streams (stream 0 and stream 30) require.Eventually(t, func() bool { - return !utils.IsProgramTraced(gpuModuleName, gpuAttacherName, cmd.Process.Pid) - }, 60*time.Second, 500*time.Millisecond, "process not stopped") + return len(probe.consumer.streamHandlers) == 2 + }, 3*time.Second, 100*time.Millisecond, "stream handlers count mismatch: expected: 2, got: %d", len(probe.consumer.streamHandlers)) stats, err := probe.GetAndFlush() require.NoError(t, err) @@ -175,22 +169,9 @@ func (s *probeTestSuite) TestMultiGPUSupport() { func (s *probeTestSuite) TestDetectsContainer() { t := s.T() - // Flaky test in CI, avoid failures on main for now. - flake.Mark(t) - probe := s.getProbe() - args := testutil.GetDefaultArgs() - args.EndWaitTimeSec = 1 - pid, cid := testutil.RunSampleInDockerWithArgs(t, testutil.CudaSample, testutil.MinimalDockerImage, args) - - utils.WaitForProgramsToBeTraced(t, gpuModuleName, gpuAttacherName, pid, utils.ManualTracingFallbackDisabled) - - // Wait until the process finishes and we can get the stats. Run this instead of waiting for the process to finish - // so that we can time out correctly - require.Eventually(t, func() bool { - return !utils.IsProgramTraced(gpuModuleName, gpuAttacherName, pid) - }, 20*time.Second, 500*time.Millisecond, "process not stopped") + pid, cid := testutil.RunSampleInDocker(t, testutil.CudaSample, testutil.MinimalDockerImage) // Check that the stream handlers have the correct container ID assigned for key, handler := range probe.consumer.streamHandlers { diff --git a/pkg/gpu/testdata/cudasample.c b/pkg/gpu/testdata/cudasample.c index aa65df206567b..870092a19afa0 100644 --- a/pkg/gpu/testdata/cudasample.c +++ b/pkg/gpu/testdata/cudasample.c @@ -39,15 +39,16 @@ cudaError_t cudaSetDevice(int device) { int main(int argc, char **argv) { cudaStream_t stream = 30; - if (argc != 4) { - fprintf(stderr, "Usage: %s \n", argv[0]); + if (argc != 3) { + fprintf(stderr, "Usage: %s \n", argv[0]); return 1; } int waitStart = atoi(argv[1]); - int waitEnd = atoi(argv[2]); - int device = atoi(argv[3]); + int device = atoi(argv[2]); + // This string is used by PatternScanner to validate a proper start of this sample program inside the container + fprintf(stderr, "Starting CudaSample program\n"); fprintf(stderr, "Waiting for %d seconds before starting\n", waitStart); // Give time for the eBPF program to load @@ -62,12 +63,12 @@ int main(int argc, char **argv) { cudaFree(ptr); cudaStreamSynchronize(stream); - fprintf(stderr, "CUDA calls made. Waiting for %d seconds before exiting\n", waitEnd); + // we don't exit to avoid flakiness when the process is terminated before it was hooked for gpu monitoring + // the expected usage is to send a kill signal to the process (or stop the container that is running it) - // Give time for the agent to inspect this process and check environment variables/etc before this exits - sleep(waitEnd); - - fprintf(stderr, "Exiting\n"); + //this line is used as a market by patternScanner to indicate the end of the program + fprintf(stderr, "CUDA calls made.\n"); + pause(); // Wait for signal to finish the process return 0; } diff --git a/pkg/gpu/testutil/samplebins.go b/pkg/gpu/testutil/samplebins.go index 23ab544162e91..c38865a405909 100644 --- a/pkg/gpu/testutil/samplebins.go +++ b/pkg/gpu/testutil/samplebins.go @@ -8,11 +8,11 @@ package testutil import ( - "bufio" + "context" "fmt" - "io" "os/exec" "path/filepath" + "regexp" "strconv" "testing" "time" @@ -23,35 +23,33 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/util/log" + procutil "github.com/DataDog/datadog-agent/pkg/util/testutil" dockerutils "github.com/DataDog/datadog-agent/pkg/util/testutil/docker" ) -// SampleName represents the name of the sample binary. -type SampleName string +// sampleName represents the name of the sample binary. +type sampleName string const ( // CudaSample is the sample binary that uses CUDA. - CudaSample SampleName = "cudasample" + CudaSample sampleName = "cudasample" ) -// DockerImage represents the Docker image to use for running the sample binary. -type DockerImage string +// dockerImage represents the Docker image to use for running the sample binary. +type dockerImage string + +var startedPattern = regexp.MustCompile("Starting CudaSample program") +var finishedPattern = regexp.MustCompile("CUDA calls made") const ( // MinimalDockerImage is the minimal docker image, just used for running a binary - MinimalDockerImage DockerImage = "alpine:3.20.3" + MinimalDockerImage dockerImage = "alpine:3.20.3" ) type SampleArgs struct { //nolint:revive // TODO // StartWaitTimeSec represents the time in seconds to wait before the binary starting the CUDA calls StartWaitTimeSec int - // EndWaitTimeSec represents the time in seconds to wait before the binary stops after making the CUDA calls - // This is necessary because the mock CUDA calls are instant, which means that the binary will exit before the - // eBPF probe has a chance to read the events and inspect the binary. To make the behavior of the sample binary - // more predictable and avoid flakiness in the tests, we introduce a delay before the binary exits. - EndWaitTimeSec int - // CudaVisibleDevicesEnv represents the value of the CUDA_VISIBLE_DEVICES environment variable CudaVisibleDevicesEnv string @@ -60,34 +58,21 @@ type SampleArgs struct { //nolint:revive // TODO } func (a *SampleArgs) getEnv() []string { - env := []string{} if a.CudaVisibleDevicesEnv != "" { - env = append(env, fmt.Sprintf("CUDA_VISIBLE_DEVICES=%s", a.CudaVisibleDevicesEnv)) + return []string{fmt.Sprintf("CUDA_VISIBLE_DEVICES=%s", a.CudaVisibleDevicesEnv)} } - return env + return nil } func (a *SampleArgs) getCLIArgs() []string { return []string{ - strconv.Itoa(int(a.StartWaitTimeSec)), - strconv.Itoa(int(a.EndWaitTimeSec)), + strconv.Itoa(a.StartWaitTimeSec), strconv.Itoa(a.SelectedDevice), } } -// redirectReaderToLog reads from the reader and logs the output with the given prefix -func redirectReaderToLog(r io.Reader, prefix string) { - go func() { - scanner := bufio.NewScanner(r) - for scanner.Scan() { - log.Debugf("%s: %s", prefix, scanner.Text()) - } - // Automatically exits when the scanner reaches EOF, that is, when the command finishes - }() -} - // RunSampleWithArgs executes the sample binary and returns the command. Cleanup is configured automatically -func getBuiltSamplePath(t *testing.T, sample SampleName) string { +func getBuiltSamplePath(t *testing.T, sample sampleName) string { curDir, err := testutil.CurDir() require.NoError(t, err) @@ -100,91 +85,105 @@ func getBuiltSamplePath(t *testing.T, sample SampleName) string { return builtBin } -// GetDefaultArgs returns the default arguments for the sample binary -func GetDefaultArgs() SampleArgs { +// getDefaultArgs returns the default arguments for the sample binary +func getDefaultArgs() SampleArgs { return SampleArgs{ StartWaitTimeSec: 5, - EndWaitTimeSec: 1, // We need the process to stay active a bit so we can inspect its environment variables, if it ends too quickly we get no information CudaVisibleDevicesEnv: "", SelectedDevice: 0, } } -func runCommandAndPipeOutput(t *testing.T, command []string, args SampleArgs, logName string) *exec.Cmd { +func runCommandAndPipeOutput(t *testing.T, command []string, args SampleArgs) (cmd *exec.Cmd, err error) { command = append(command, args.getCLIArgs()...) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) - cmd := exec.Command(command[0], command[1:]...) + cmd = exec.CommandContext(ctx, command[0], command[1:]...) t.Cleanup(func() { if cmd.Process != nil { _ = cmd.Process.Kill() + _ = cmd.Wait() } }) + scanner, err := procutil.NewScanner(startedPattern, finishedPattern) + require.NoError(t, err, "failed to create pattern scanner") + defer func() { + //print the cudasample log in case there was an error + if err != nil { + scanner.PrintLogs(t) + } + }() env := args.getEnv() cmd.Env = append(cmd.Env, env...) + cmd.Stdout = scanner + cmd.Stderr = scanner - stdout, err := cmd.StdoutPipe() - require.NoError(t, err) - stderr, err := cmd.StderrPipe() - require.NoError(t, err) - - redirectReaderToLog(stdout, fmt.Sprintf("%s stdout", logName)) - redirectReaderToLog(stderr, fmt.Sprintf("%s stderr", logName)) - - log.Debugf("Running command %v, env=%v", command, env) err = cmd.Start() - require.NoError(t, err) + if err != nil { + return nil, err + } - return cmd + for { + select { + case <-ctx.Done(): + if err = ctx.Err(); err != nil { + return nil, fmt.Errorf("failed to start the process %s due to: %w", command[0], err) + } + case <-scanner.DoneChan: + t.Logf("%s command succeeded", command) + return cmd, nil + case <-time.After(dockerutils.DefaultTimeout): + //setting the error explicitly to trigger the defer function + err = fmt.Errorf("%s execution attempt reached timeout %v ", CudaSample, dockerutils.DefaultTimeout) + return nil, err + } + } } // RunSample executes the sample binary and returns the command. Cleanup is configured automatically -func RunSample(t *testing.T, name SampleName) *exec.Cmd { - return RunSampleWithArgs(t, name, GetDefaultArgs()) +func RunSample(t *testing.T, name sampleName) (*exec.Cmd, error) { + return RunSampleWithArgs(t, name, getDefaultArgs()) } // RunSampleWithArgs executes the sample binary with args and returns the command. Cleanup is configured automatically -func RunSampleWithArgs(t *testing.T, name SampleName, args SampleArgs) *exec.Cmd { +func RunSampleWithArgs(t *testing.T, name sampleName, args SampleArgs) (*exec.Cmd, error) { builtBin := getBuiltSamplePath(t, name) - - return runCommandAndPipeOutput(t, []string{builtBin}, args, string(name)) + return runCommandAndPipeOutput(t, []string{builtBin}, args) } // RunSampleInDocker executes the sample binary in a Docker container and returns the PID of the main container process, and the container ID -func RunSampleInDocker(t *testing.T, name SampleName, image DockerImage) (int, string) { - return RunSampleInDockerWithArgs(t, name, image, GetDefaultArgs()) +func RunSampleInDocker(t *testing.T, name sampleName, image dockerImage) (int, string) { + return RunSampleInDockerWithArgs(t, name, image, getDefaultArgs()) } // RunSampleInDockerWithArgs executes the sample binary in a Docker container and returns the PID of the main container process, and the container ID -func RunSampleInDockerWithArgs(t *testing.T, name SampleName, image DockerImage, args SampleArgs) (int, string) { +func RunSampleInDockerWithArgs(t *testing.T, name sampleName, image dockerImage, args SampleArgs) (int, string) { builtBin := getBuiltSamplePath(t, name) containerName := fmt.Sprintf("gpu-testutil-%s", utils.RandString(10)) - mountArg := fmt.Sprintf("%s:%s", builtBin, builtBin) - - command := []string{"docker", "run", "--rm", "-v", mountArg, "--name", containerName} + scanner, err := procutil.NewScanner(startedPattern, finishedPattern) + require.NoError(t, err, "failed to create pattern scanner") - // Pass environment variables to the container as docker args - for _, env := range args.getEnv() { - command = append(command, "-e", env) - } + dockerConfig := dockerutils.NewRunConfig(containerName, + dockerutils.DefaultTimeout, + dockerutils.DefaultRetries, + scanner, + args.getEnv(), + string(image), + builtBin, + args.getCLIArgs(), + map[string]string{builtBin: builtBin}) - command = append(command, string(image), builtBin) - - _ = runCommandAndPipeOutput(t, command, args, string(name)) + require.NoError(t, dockerutils.Run(t, dockerConfig)) var dockerPID int64 var dockerContainerID string - var err error - // The docker container might take a bit to start, so we retry until we get the PID - require.EventuallyWithT(t, func(c *assert.CollectT) { - dockerPID, err = dockerutils.GetMainPID(containerName) - assert.NoError(c, err) - }, 1*time.Second, 100*time.Millisecond, "failed to get docker PID") - - require.EventuallyWithT(t, func(c *assert.CollectT) { - dockerContainerID, err = dockerutils.GetContainerID(containerName) - assert.NoError(c, err) - }, 1*time.Second, 100*time.Millisecond, "failed to get docker container ID") + + dockerPID, err = dockerutils.GetMainPID(containerName) + assert.NoError(t, err, "failed to get docker PID") + dockerContainerID, err = dockerutils.GetContainerID(containerName) + assert.NoError(t, err, "failed to get docker container ID") log.Debugf("Sample binary %s running in Docker container %s (CID=%s) with PID %d", name, containerName, dockerContainerID, dockerPID) diff --git a/pkg/network/usm/usm_http2_monitor_test.go b/pkg/network/usm/usm_http2_monitor_test.go index 2e92c2647989b..438dd4cf8ccd4 100644 --- a/pkg/network/usm/usm_http2_monitor_test.go +++ b/pkg/network/usm/usm_http2_monitor_test.go @@ -78,8 +78,7 @@ type usmHTTP2Suite struct { } func (s *usmHTTP2Suite) getCfg() *config.Config { - cfg := config.New() - cfg.EnableIstioMonitoring = false + cfg := utils.NewUSMEmptyConfig() cfg.EnableHTTP2Monitoring = true cfg.EnableGoTLSSupport = s.isTLS cfg.GoTLSExcludeSelf = s.isTLS @@ -1510,9 +1509,7 @@ func (s *usmHTTP2Suite) TestRawHuffmanEncoding() { func TestHTTP2InFlightMapCleaner(t *testing.T) { skipIfKernelNotSupported(t) - cfg := config.New() - cfg.EnableGoTLSSupport = false - cfg.EnableIstioMonitoring = false + cfg := utils.NewUSMEmptyConfig() cfg.EnableHTTP2Monitoring = true cfg.HTTP2DynamicTableMapCleanerInterval = 5 * time.Second cfg.HTTPIdleConnectionTTL = time.Second diff --git a/tasks/libs/common/junit_upload_core.py b/tasks/libs/common/junit_upload_core.py index 54d01fc2af0a4..b7282d9bf4ebc 100644 --- a/tasks/libs/common/junit_upload_core.py +++ b/tasks/libs/common/junit_upload_core.py @@ -29,14 +29,17 @@ E2E_INTERNAL_ERROR_STRING = "E2E INTERNAL ERROR" CODEOWNERS_ORG_PREFIX = "@DataDog/" REPO_NAME_PREFIX = "github.com/DataDog/datadog-agent/" -if platform.system() == "Windows": - DATADOG_CI_COMMAND = [r"c:\devtools\datadog-ci\datadog-ci", "junit", "upload"] -else: - DATADOG_CI_COMMAND = [which("datadog-ci"), "junit", "upload"] JOB_ENV_FILE_NAME = "job_env.txt" TAGS_FILE_NAME = "tags.txt" +def get_datadog_ci_command(): + path_datadog_ci = which("datadog-ci") + if path_datadog_ci is None: + raise FileNotFoundError("datadog-ci command not found") + return path_datadog_ci + + def enrich_junitxml(xml_path: str, flavor: AgentFlavor): """ Modifies the JUnit XML file: @@ -232,6 +235,7 @@ def upload_junitxmls(team_dir: Path): """ Upload all per-team split JUnit XMLs from given directory. """ + datadog_ci_command = [get_datadog_ci_command(), "junit", "upload"] additional_tags = read_additional_tags(team_dir.parent) process_env = _update_environ(team_dir.parent) processes = [] @@ -242,7 +246,7 @@ def upload_junitxmls(team_dir: Path): for flags, files in xml_files.items(): args = set_tags(owner, flavor, flags, additional_tags, files[0]) args.extend(files) - processes.append(Popen(DATADOG_CI_COMMAND + args, bufsize=-1, env=process_env, stdout=PIPE, stderr=PIPE)) + processes.append(Popen(datadog_ci_command + args, bufsize=-1, env=process_env, stdout=PIPE, stderr=PIPE)) for process in processes: stdout, stderr = process.communicate() @@ -250,7 +254,7 @@ def upload_junitxmls(team_dir: Path): print(f" Uploaded {len(tuple(team_dir.iterdir()))} files for {team_dir.name}") if stderr: print(f"Failed uploading junit:\n{stderr.decode()}", file=sys.stderr) - raise CalledProcessError(process.returncode, DATADOG_CI_COMMAND) + raise CalledProcessError(process.returncode, datadog_ci_command) return "" # For ThreadPoolExecutor.map. Without this it prints None in the log output. diff --git a/test/new-e2e/pkg/environments/aws/kubernetes/eks.go b/test/new-e2e/pkg/environments/aws/kubernetes/eks.go index 22971ae20f9ed..92262aa9d7380 100644 --- a/test/new-e2e/pkg/environments/aws/kubernetes/eks.go +++ b/test/new-e2e/pkg/environments/aws/kubernetes/eks.go @@ -112,6 +112,15 @@ func EKSRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Provi // Deploy the agent if params.agentOptions != nil { params.agentOptions = append(params.agentOptions, kubernetesagentparams.WithPulumiResourceOptions(utils.PulumiDependsOn(cluster)), kubernetesagentparams.WithFakeintake(fakeIntake), kubernetesagentparams.WithTags([]string{"stackid:" + ctx.Stack()})) + + eksParams, err := eks.NewParams(params.eksOptions...) + if err != nil { + return err + } + if eksParams.WindowsNodeGroup { + params.agentOptions = append(params.agentOptions, kubernetesagentparams.WithDeployWindows()) + } + kubernetesAgent, err := helm.NewKubernetesAgent(&awsEnv, "eks", cluster.KubeProvider, params.agentOptions...) if err != nil { return err @@ -126,7 +135,7 @@ func EKSRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Provi } // Deploy standalone dogstatsd if params.deployDogstatsd { - if _, err := dogstatsdstandalone.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "dogstatsd-standalone", fakeIntake, true, ""); err != nil { + if _, err := dogstatsdstandalone.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "dogstatsd-standalone", fakeIntake, true, "", utils.PulumiDependsOn(cluster)); err != nil { return err } } @@ -138,7 +147,7 @@ func EKSRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Provi } // dogstatsd clients that report to the Agent - if _, err := dogstatsd.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-dogstatsd", 8125, "/var/run/datadog/dsd.socket", utils.PulumiDependsOn(cluster)); err != nil { + if _, err := dogstatsd.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-dogstatsd", 8125, "/var/run/datadog/dsd.socket", utils.PulumiDependsOn(workloadWithCRDDeps...)); err != nil { return err } @@ -157,7 +166,7 @@ func EKSRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Provi return err } - if _, err := mutatedbyadmissioncontroller.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-mutated", "workload-mutated-lib-injection", utils.PulumiDependsOn(cluster)); err != nil { + if _, err := mutatedbyadmissioncontroller.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-mutated", "workload-mutated-lib-injection", utils.PulumiDependsOn(workloadWithCRDDeps...)); err != nil { return err } diff --git a/test/new-e2e/pkg/environments/aws/kubernetes/kind.go b/test/new-e2e/pkg/environments/aws/kubernetes/kind.go index 60da0620167f9..2230f1a9c1736 100644 --- a/test/new-e2e/pkg/environments/aws/kubernetes/kind.go +++ b/test/new-e2e/pkg/environments/aws/kubernetes/kind.go @@ -129,19 +129,17 @@ func KindRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Prov var dependsOnCrd []pulumi.Resource if params.agentOptions != nil { - kindClusterName := ctx.Stack() - helmValues := fmt.Sprintf(` + helmValues := ` datadog: kubelet: tlsVerify: false - clusterName: "%s" agents: useHostNetwork: true -`, kindClusterName) +` - newOpts := []kubernetesagentparams.Option{kubernetesagentparams.WithHelmValues(helmValues), kubernetesagentparams.WithTags([]string{"stackid:" + ctx.Stack()})} + newOpts := []kubernetesagentparams.Option{kubernetesagentparams.WithHelmValues(helmValues), kubernetesagentparams.WithClusterName(kindCluster.ClusterName), kubernetesagentparams.WithTags([]string{"stackid:" + ctx.Stack()})} params.agentOptions = append(newOpts, params.agentOptions...) - agent, err := helm.NewKubernetesAgent(&awsEnv, kindClusterName, kubeProvider, params.agentOptions...) + agent, err := helm.NewKubernetesAgent(&awsEnv, "kind", kubeProvider, params.agentOptions...) if err != nil { return err } @@ -182,7 +180,7 @@ agents: return err } - if _, err := mutatedbyadmissioncontroller.K8sAppDefinition(&awsEnv, kubeProvider, "workload-mutated", "workload-mutated-lib-injection"); err != nil { + if _, err := mutatedbyadmissioncontroller.K8sAppDefinition(&awsEnv, kubeProvider, "workload-mutated", "workload-mutated-lib-injection", utils.PulumiDependsOn(dependsOnCrd...)); err != nil { return err } diff --git a/test/new-e2e/pkg/utils/e2e/client/agent_commands.go b/test/new-e2e/pkg/utils/e2e/client/agent_commands.go index d20813afe89b2..c812050d986c0 100644 --- a/test/new-e2e/pkg/utils/e2e/client/agent_commands.go +++ b/test/new-e2e/pkg/utils/e2e/client/agent_commands.go @@ -175,6 +175,15 @@ func (agent *agentCommandRunner) StatusWithError(commandArgs ...agentclient.Agen }, err } +// JMX run the jmx command and returns a Status struct and error +func (agent *agentCommandRunner) JMX(commandArgs ...agentclient.AgentArgsOption) (*agentclient.Status, error) { + status, err := agent.executeCommandWithError("jmx", commandArgs...) + + return &agentclient.Status{ + Content: status, + }, err +} + // waitForReadyTimeout blocks up to timeout waiting for agent to be ready. // Retries every 100 ms up to timeout. // Returns error on failure. diff --git a/test/new-e2e/pkg/utils/e2e/client/agentclient/agent.go b/test/new-e2e/pkg/utils/e2e/client/agentclient/agent.go index d6f94f9c3222f..e3a3f11bd6219 100644 --- a/test/new-e2e/pkg/utils/e2e/client/agentclient/agent.go +++ b/test/new-e2e/pkg/utils/e2e/client/agentclient/agent.go @@ -59,6 +59,9 @@ type Agent interface { // StatusWithError runs status command and returns a Status struct and error StatusWithError(commandArgs ...AgentArgsOption) (*Status, error) + + // JMX run the jmx command and returns a Status struct and error + JMX(commandArgs ...AgentArgsOption) (*Status, error) } // Status contains the Agent status content diff --git a/test/new-e2e/tests/agent-metrics-logs/jmxfetch/docs.go b/test/new-e2e/tests/agent-metrics-logs/jmxfetch/docs.go new file mode 100644 index 0000000000000..1005b5a84b028 --- /dev/null +++ b/test/new-e2e/tests/agent-metrics-logs/jmxfetch/docs.go @@ -0,0 +1,7 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package jmxfetch contains e2e tests for the JMXFetch. +package jmxfetch diff --git a/test/new-e2e/tests/agent-metrics-logs/jmxfetch/jmxfetch_nix_test.go b/test/new-e2e/tests/agent-metrics-logs/jmxfetch/jmxfetch_nix_test.go new file mode 100644 index 0000000000000..c8f573f95307d --- /dev/null +++ b/test/new-e2e/tests/agent-metrics-logs/jmxfetch/jmxfetch_nix_test.go @@ -0,0 +1,137 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package jmxfetch + +import ( + _ "embed" + "strings" + "testing" + "time" + + "github.com/DataDog/datadog-agent/test/fakeintake/client" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awsdocker "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/docker" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient" + + "github.com/DataDog/test-infra-definitions/components/datadog/apps/jmxfetch" + "github.com/DataDog/test-infra-definitions/components/datadog/dockeragentparams" + "github.com/DataDog/test-infra-definitions/components/docker" + + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +//go:embed testdata/docker-labels.yaml +var jmxFetchADLabels string + +var jmxFetchADLabelsDockerComposeManifest = docker.ComposeInlineManifest{ + Name: "jmx-test-app-labels", + Content: pulumi.String(jmxFetchADLabels), +} + +type jmxfetchNixTest struct { + e2e.BaseSuite[environments.DockerHost] +} + +func TestJMXFetchNix(t *testing.T) { + t.Parallel() + suiteParams := []e2e.SuiteOption{e2e.WithProvisioner( + awsdocker.Provisioner( + awsdocker.WithAgentOptions( + dockeragentparams.WithLogs(), + dockeragentparams.WithJMX(), + dockeragentparams.WithExtraComposeInlineManifest( + jmxfetch.DockerComposeManifest, + jmxFetchADLabelsDockerComposeManifest, + ), + )))} + + e2e.Run(t, + &jmxfetchNixTest{}, + suiteParams..., + ) +} + +func (j *jmxfetchNixTest) Test_FakeIntakeReceivesJMXFetchMetrics() { + metricNames := []string{ + "test.e2e.jmxfetch.counter_100", + "test.e2e.jmxfetch.gauge_200", + "test.e2e.jmxfetch.increment_counter", + } + start := time.Now() + j.EventuallyWithT(func(c *assert.CollectT) { + for _, metricName := range metricNames { + metrics, err := j.Env().FakeIntake.Client(). + FilterMetrics(metricName, client.WithMetricValueHigherThan(0)) + assert.NoError(c, err) + assert.NotEmpty(j.T(), metrics, "no metrics found for", metricName) + } + }, 5*time.Minute, 10*time.Second) + j.T().Logf("Started: %v and took %v", start, time.Since(start)) + + // Helpful debug when things fail + if j.T().Failed() { + names, err := j.Env().FakeIntake.Client().GetMetricNames() + assert.NoError(j.T(), err) + for _, name := range names { + j.T().Logf("Got metric: %q", name) + } + for _, metricName := range metricNames { + tjc, err := j.Env().FakeIntake.Client().FilterMetrics(metricName) + assert.NoError(j.T(), err) + assert.NotEmpty(j.T(), tjc, "Filter metrics was empty for", metricName) + if len(tjc) > 0 { + for _, point := range tjc[0].Points { + j.T().Logf("Found metrics: %q \n%v - %v \n%q", tjc[0].Metric, point, point.Value, tjc[0].Type) + } + } + } + } +} + +func (j *jmxfetchNixTest) TestJMXListCollectedWithRateMetrics() { + status, err := j.Env().Agent.Client.JMX(agentclient.WithArgs([]string{"list", "collected", "with-rate-metrics"})) + require.NoError(j.T(), err) + assert.NotEmpty(j.T(), status.Content) + + lines := strings.Split(status.Content, "\n") + var consoleReporterOut []string + var foundShouldBe100, foundShouldBe200, foundIncrementCounter bool + for _, line := range lines { + if strings.Contains(line, "ConsoleReporter") { + consoleReporterOut = append(consoleReporterOut, line) + if strings.Contains(line, "dd.test.sample:name=default,type=simple") { + if strings.Contains(line, "ShouldBe100") { + foundShouldBe100 = true + } + if strings.Contains(line, "ShouldBe200") { + foundShouldBe200 = true + } + if strings.Contains(line, "IncrementCounter") { + foundIncrementCounter = true + } + } + } + } + + assert.NotEmpty(j.T(), consoleReporterOut, "Did not find ConsoleReporter output in status") + assert.True(j.T(), foundShouldBe100, + "Did not find bean name: dd.test.sample:name=default,type=simple - Attribute name: ShouldBe100 - Attribute type: java.lang.Integer") + assert.True(j.T(), foundShouldBe200, + "Did not find bean name: dd.test.sample:name=default,type=simple - Attribute name: ShouldBe200 - Attribute type: java.lang.Double") + assert.True(j.T(), foundIncrementCounter, + "Did not find bean name: dd.test.sample:name=default,type=simple - Attribute name: IncrementCounter - Attribute type: java.lang.Integer") + + // Helpful debug when things fail + if j.T().Failed() { + for _, line := range consoleReporterOut { + j.T().Log(line) + } + } +} diff --git a/test/new-e2e/tests/agent-metrics-logs/jmxfetch/testdata/docker-labels.yaml b/test/new-e2e/tests/agent-metrics-logs/jmxfetch/testdata/docker-labels.yaml new file mode 100644 index 0000000000000..487a8a3dba6b5 --- /dev/null +++ b/test/new-e2e/tests/agent-metrics-logs/jmxfetch/testdata/docker-labels.yaml @@ -0,0 +1,43 @@ +--- +services: + + jmx-test-app: + labels: + com.datadoghq.ad.checks: | + { + "test": { + "init_config": { + "is_jmx": true, + "collect_default_metrics": true, + "new_gc_metrics": true, + "conf": [ + { + "include": { + "domain": "dd.test.sample", + "type": "simple", + "attribute": { + "ShouldBe100": { + "metric_type": "gauge", + "alias": "test.e2e.jmxfetch.counter_100" + }, + "ShouldBe200": { + "metric_type": "gauge", + "alias": "test.e2e.jmxfetch.gauge_200" + }, + "IncrementCounter": { + "metric_type": "counter", + "alias": "test.e2e.jmxfetch.increment_counter" + } + } + } + } + ] + }, + "instances": [ + { + "host": "%%host%%", + "port": "9010" + } + ] + } + } diff --git a/test/new-e2e/tests/containers/base_test.go b/test/new-e2e/tests/containers/base_test.go index 84bce8ea70197..7bc4735a43dd2 100644 --- a/test/new-e2e/tests/containers/base_test.go +++ b/test/new-e2e/tests/containers/base_test.go @@ -14,7 +14,6 @@ import ( "github.com/samber/lo" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" "gopkg.in/yaml.v3" "gopkg.in/zorkian/go-datadog-api.v2" @@ -23,39 +22,21 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/DataDog/datadog-agent/test/fakeintake/aggregator" fakeintake "github.com/DataDog/datadog-agent/test/fakeintake/client" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" ) -type baseSuite struct { - suite.Suite +type baseSuite[Env any] struct { + e2e.BaseSuite[Env] - startTime time.Time - endTime time.Time - datadogClient *datadog.Client - Fakeintake *fakeintake.Client - clusterName string + Fakeintake *fakeintake.Client + clusterName string } -func (suite *baseSuite) SetupSuite() { - apiKey, err := runner.GetProfile().SecretStore().Get(parameters.APIKey) - suite.Require().NoError(err) - appKey, err := runner.GetProfile().SecretStore().Get(parameters.APPKey) - suite.Require().NoError(err) - suite.datadogClient = datadog.NewClient(apiKey, appKey) - - suite.startTime = time.Now() -} - -func (suite *baseSuite) TearDownSuite() { - suite.endTime = time.Now() -} - -func (suite *baseSuite) BeforeTest(suiteName, testName string) { +func (suite *baseSuite[Env]) BeforeTest(suiteName, testName string) { suite.T().Logf("START %s/%s %s", suiteName, testName, time.Now()) } -func (suite *baseSuite) AfterTest(suiteName, testName string) { +func (suite *baseSuite[Env]) AfterTest(suiteName, testName string) { suite.T().Logf("FINISH %s/%s %s", suiteName, testName, time.Now()) } @@ -98,7 +79,7 @@ func (mc *myCollectT) Errorf(format string, args ...interface{}) { mc.CollectT.Errorf(format, args...) } -func (suite *baseSuite) testMetric(args *testMetricArgs) { +func (suite *baseSuite[Env]) testMetric(args *testMetricArgs) { prettyMetricQuery := fmt.Sprintf("%s{%s}", args.Filter.Name, strings.Join(args.Filter.Tags, ",")) suite.Run("metric "+prettyMetricQuery, func() { @@ -107,7 +88,7 @@ func (suite *baseSuite) testMetric(args *testMetricArgs) { expectedTags = lo.Map(*args.Expect.Tags, func(tag string, _ int) *regexp.Regexp { return regexp.MustCompile(tag) }) } - var optionalTags []*regexp.Regexp + optionalTags := []*regexp.Regexp{regexp.MustCompile("stackid:.*")} // The stackid tag is added by the framework itself to allow filtering on the stack id if args.Optional.Tags != nil { optionalTags = lo.Map(*args.Optional.Tags, func(tag string, _ int) *regexp.Regexp { return regexp.MustCompile(tag) }) } @@ -120,7 +101,7 @@ func (suite *baseSuite) testMetric(args *testMetricArgs) { return "filter_tag_" + tag }) - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(fmt.Sprintf("testMetric %s", prettyMetricQuery)), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% ### Result @@ -227,7 +208,7 @@ type testLogExpectArgs struct { Message string } -func (suite *baseSuite) testLog(args *testLogArgs) { +func (suite *baseSuite[Env]) testLog(args *testLogArgs) { prettyLogQuery := fmt.Sprintf("%s{%s}", args.Filter.Service, strings.Join(args.Filter.Tags, ",")) suite.Run("log "+prettyLogQuery, func() { @@ -249,7 +230,7 @@ func (suite *baseSuite) testLog(args *testLogArgs) { return "filter_tag_" + tag }) - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(fmt.Sprintf("testLog %s", prettyLogQuery)), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% ### Result @@ -356,7 +337,7 @@ type testCheckRunExpectArgs struct { AcceptUnexpectedTags bool } -func (suite *baseSuite) testCheckRun(args *testCheckRunArgs) { +func (suite *baseSuite[Env]) testCheckRun(args *testCheckRunArgs) { prettyCheckRunQuery := fmt.Sprintf("%s{%s}", args.Filter.Name, strings.Join(args.Filter.Tags, ",")) suite.Run("checkRun "+prettyCheckRunQuery, func() { @@ -378,7 +359,7 @@ func (suite *baseSuite) testCheckRun(args *testCheckRunArgs) { return "filter_tag_" + tag }) - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(fmt.Sprintf("testCheckRun %s", prettyCheckRunQuery)), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% ### Result diff --git a/test/new-e2e/tests/containers/docker_test.go b/test/new-e2e/tests/containers/docker_test.go index a7d27f4fa2987..985d80e46b757 100644 --- a/test/new-e2e/tests/containers/docker_test.go +++ b/test/new-e2e/tests/containers/docker_test.go @@ -6,53 +6,24 @@ package containers import ( - "context" - "encoding/json" - "fmt" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" - "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" - "github.com/pulumi/pulumi/sdk/v3/go/auto" - "github.com/stretchr/testify/suite" - "os" "testing" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awsdocker "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/docker" ) type DockerSuite struct { - baseSuite + baseSuite[environments.DockerHost] } func TestDockerSuite(t *testing.T) { - suite.Run(t, &DockerSuite{}) + e2e.Run(t, &DockerSuite{}, e2e.WithProvisioner(awsdocker.Provisioner(awsdocker.WithTestingWorkload()))) } func (suite *DockerSuite) SetupSuite() { - ctx := context.Background() - - stackConfig := runner.ConfigMap{ - "ddagent:deploy": auto.ConfigValue{Value: "true"}, - "ddagent:fakeintake": auto.ConfigValue{Value: "true"}, - } - - _, stackOutput, err := infra.GetStackManager().GetStack(ctx, "dockerstack", stackConfig, ec2.VMRunWithDocker, false) - suite.Require().NoError(err) - - var fakeintake components.FakeIntake - fiSerialized, err := json.Marshal(stackOutput.Outputs["dd-Fakeintake-aws-aws-vm"].Value) - suite.Require().NoError(err) - suite.Require().NoError(fakeintake.Import(fiSerialized, &fakeintake)) - suite.Require().NoError(fakeintake.Init(suite)) - suite.Fakeintake = fakeintake.Client() - - var host components.RemoteHost - hostSerialized, err := json.Marshal(stackOutput.Outputs["dd-Host-aws-vm"].Value) - suite.Require().NoError(err) - suite.Require().NoError(host.Import(hostSerialized, &host)) - suite.Require().NoError(host.Init(suite)) - suite.clusterName = fmt.Sprintf("%s-%v", os.Getenv("USER"), host.Address) - suite.baseSuite.SetupSuite() + suite.Fakeintake = suite.Env().FakeIntake.Client() } func (suite *DockerSuite) TestDSDWithUDS() { diff --git a/test/new-e2e/tests/containers/dump_cluster_state.go b/test/new-e2e/tests/containers/dump_cluster_state.go deleted file mode 100644 index ddf546d889d09..0000000000000 --- a/test/new-e2e/tests/containers/dump_cluster_state.go +++ /dev/null @@ -1,341 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2023-present Datadog, Inc. - -package containers - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "io" - "net" - "os" - "os/user" - "strings" - "sync" - - "github.com/DataDog/datadog-agent/pkg/util/pointer" - awsconfig "github.com/aws/aws-sdk-go-v2/config" - awsec2 "github.com/aws/aws-sdk-go-v2/service/ec2" - awsec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" - awseks "github.com/aws/aws-sdk-go-v2/service/eks" - awsekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/genericiooptions" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - kubectlget "k8s.io/kubectl/pkg/cmd/get" - kubectlutil "k8s.io/kubectl/pkg/cmd/util" -) - -func dumpEKSClusterState(ctx context.Context, name string) (ret string) { - var out strings.Builder - defer func() { ret = out.String() }() - - cfg, err := awsconfig.LoadDefaultConfig(ctx) - if err != nil { - fmt.Fprintf(&out, "Failed to load AWS config: %v\n", err) - return - } - - client := awseks.NewFromConfig(cfg) - - clusterDescription, err := client.DescribeCluster(ctx, &awseks.DescribeClusterInput{ - Name: &name, - }) - if err != nil { - fmt.Fprintf(&out, "Failed to describe cluster %s: %v\n", name, err) - return - } - - cluster := clusterDescription.Cluster - if cluster.Status != awsekstypes.ClusterStatusActive { - fmt.Fprintf(&out, "EKS cluster %s is not in active state. Current status: %s\n", name, cluster.Status) - return - } - - kubeconfig := clientcmdapi.NewConfig() - kubeconfig.Clusters[name] = &clientcmdapi.Cluster{ - Server: *cluster.Endpoint, - } - if kubeconfig.Clusters[name].CertificateAuthorityData, err = base64.StdEncoding.DecodeString(*cluster.CertificateAuthority.Data); err != nil { - fmt.Fprintf(&out, "Failed to decode certificate authority: %v\n", err) - } - kubeconfig.AuthInfos[name] = &clientcmdapi.AuthInfo{ - Exec: &clientcmdapi.ExecConfig{ - APIVersion: "client.authentication.k8s.io/v1beta1", - Command: "aws", - Args: []string{ - "--region", - cfg.Region, - "eks", - "get-token", - "--cluster-name", - name, - "--output", - "json", - }, - }, - } - kubeconfig.Contexts[name] = &clientcmdapi.Context{ - Cluster: name, - AuthInfo: name, - } - kubeconfig.CurrentContext = name - - dumpK8sClusterState(ctx, kubeconfig, &out) - - return -} - -func dumpKindClusterState(ctx context.Context, name string) (ret string) { - var out strings.Builder - defer func() { ret = out.String() }() - - cfg, err := awsconfig.LoadDefaultConfig(ctx) - if err != nil { - fmt.Fprintf(&out, "Failed to load AWS config: %v\n", err) - return - } - - ec2Client := awsec2.NewFromConfig(cfg) - - user, _ := user.Current() - instancesDescription, err := ec2Client.DescribeInstances(ctx, &awsec2.DescribeInstancesInput{ - Filters: []awsec2types.Filter{ - { - Name: pointer.Ptr("tag:managed-by"), - Values: []string{"pulumi"}, - }, - { - Name: pointer.Ptr("tag:username"), - Values: []string{user.Username}, - }, - { - Name: pointer.Ptr("tag:Name"), - Values: []string{name + "-aws-kind"}, - }, - }, - }) - if err != nil { - fmt.Fprintf(&out, "Failed to describe instances: %v\n", err) - return - } - - if instancesDescription == nil || (len(instancesDescription.Reservations) != 1 && len(instancesDescription.Reservations[0].Instances) != 1) { - fmt.Fprintf(&out, "Didn’t find exactly one instance for cluster %s\n", name) - return - } - - instanceIP := instancesDescription.Reservations[0].Instances[0].PrivateIpAddress - - auth := []ssh.AuthMethod{} - - if sshAgentSocket, found := os.LookupEnv("SSH_AUTH_SOCK"); found { - sshAgent, err := net.Dial("unix", sshAgentSocket) - if err != nil { - fmt.Fprintf(&out, "Failed to connect to SSH agent: %v\n", err) - return - } - defer sshAgent.Close() - - auth = append(auth, ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers)) - } - - if sshKeyPath, found := os.LookupEnv("E2E_PRIVATE_KEY_PATH"); found { - sshKey, err := os.ReadFile(sshKeyPath) - if err != nil { - fmt.Fprintf(&out, "Failed to read SSH key: %v\n", err) - return - } - - signer, err := ssh.ParsePrivateKey(sshKey) - if err != nil { - fmt.Fprintf(&out, "Failed to parse SSH key: %v\n", err) - return - } - - auth = append(auth, ssh.PublicKeys(signer)) - } - - var sshClient *ssh.Client - err = nil - for _, user := range []string{"ec2-user", "ubuntu"} { - sshClient, err = ssh.Dial("tcp", *instanceIP+":22", &ssh.ClientConfig{ - User: user, - Auth: auth, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - }) - if err == nil { - break - } - } - if err != nil { - fmt.Fprintf(&out, "Failed to dial SSH server %s: %v\n", *instanceIP, err) - return - } - defer sshClient.Close() - - sshSession, err := sshClient.NewSession() - if err != nil { - fmt.Fprintf(&out, "Failed to create SSH session: %v\n", err) - return - } - defer sshSession.Close() - - stdout, err := sshSession.StdoutPipe() - if err != nil { - fmt.Fprintf(&out, "Failed to create stdout pipe: %v\n", err) - return - } - - stderr, err := sshSession.StderrPipe() - if err != nil { - fmt.Fprintf(&out, "Failed to create stderr pipe: %v\n", err) - return - } - - err = sshSession.Start("kind get kubeconfig --name \"$(kind get clusters | head -n 1)\"") - if err != nil { - fmt.Fprintf(&out, "Failed to start remote command: %v\n", err) - return - } - - var stdoutBuf bytes.Buffer - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - if _, err := io.Copy(&stdoutBuf, stdout); err != nil { - fmt.Fprintf(&out, "Failed to read stdout: %v\n", err) - } - wg.Done() - }() - - go func() { - if _, err := io.Copy(&out, stderr); err != nil { - fmt.Fprintf(&out, "Failed to read stderr: %v\n", err) - } - wg.Done() - }() - - err = sshSession.Wait() - wg.Wait() - if err != nil { - fmt.Fprintf(&out, "Remote command exited with error: %v\n", err) - return - } - - kubeconfig, err := clientcmd.Load(stdoutBuf.Bytes()) - if err != nil { - fmt.Fprintf(&out, "Failed to parse kubeconfig: %v\n", err) - return - } - - for _, cluster := range kubeconfig.Clusters { - cluster.Server = strings.Replace(cluster.Server, "0.0.0.0", *instanceIP, 1) - cluster.CertificateAuthorityData = nil - cluster.InsecureSkipTLSVerify = true - } - - dumpK8sClusterState(ctx, kubeconfig, &out) - - return -} - -func dumpK8sClusterState(ctx context.Context, kubeconfig *clientcmdapi.Config, out *strings.Builder) { - kubeconfigFile, err := os.CreateTemp("", "kubeconfig") - if err != nil { - fmt.Fprintf(out, "Failed to create kubeconfig temporary file: %v\n", err) - return - } - defer os.Remove(kubeconfigFile.Name()) - - if err := clientcmd.WriteToFile(*kubeconfig, kubeconfigFile.Name()); err != nil { - fmt.Fprintf(out, "Failed to write kubeconfig file: %v\n", err) - return - } - - if err := kubeconfigFile.Close(); err != nil { - fmt.Fprintf(out, "Failed to close kubeconfig file: %v\n", err) - } - - fmt.Fprintf(out, "\n") - - configFlags := genericclioptions.NewConfigFlags(false) - kubeconfigFileName := kubeconfigFile.Name() - configFlags.KubeConfig = &kubeconfigFileName - - factory := kubectlutil.NewFactory(configFlags) - - streams := genericiooptions.IOStreams{ - Out: out, - ErrOut: out, - } - - getCmd := kubectlget.NewCmdGet("", factory, streams) - getCmd.SetOut(out) - getCmd.SetErr(out) - getCmd.SetContext(ctx) - getCmd.SetArgs([]string{ - "nodes,all", - "--all-namespaces", - "-o", - "wide", - }) - if err := getCmd.ExecuteContext(ctx); err != nil { - fmt.Fprintf(out, "Failed to execute Get command: %v\n", err) - return - } - - // Get the logs of containers that have restarted - config, err := clientcmd.BuildConfigFromFlags("", kubeconfigFile.Name()) - if err != nil { - fmt.Fprintf(out, "Failed to build Kubernetes config: %v\n", err) - return - } - k8sClient, err := kubernetes.NewForConfig(config) - if err != nil { - fmt.Fprintf(out, "Failed to create Kubernetes client: %v\n", err) - return - } - - pods, err := k8sClient.CoreV1().Pods("").List(ctx, metav1.ListOptions{}) - if err != nil { - fmt.Fprintf(out, "Failed to list pods: %v\n", err) - return - } - - for _, pod := range pods.Items { - for _, containerStatus := range pod.Status.ContainerStatuses { - if containerStatus.RestartCount > 0 { - fmt.Fprintf(out, "\nLOGS FOR POD %s/%s CONTAINER %s:\n", pod.Namespace, pod.Name, containerStatus.Name) - logs, err := k8sClient.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ - Container: containerStatus.Name, - Previous: true, - // TailLines: pointer.Ptr(int64(100)), - }).Stream(ctx) - if err != nil { - fmt.Fprintf(out, "Failed to get logs: %v\n", err) - continue - } - defer logs.Close() - - _, err = io.Copy(out, logs) - if err != nil { - fmt.Fprintf(out, "Failed to copy logs: %v\n", err) - continue - } - } - } - } -} diff --git a/test/new-e2e/tests/containers/ecs_test.go b/test/new-e2e/tests/containers/ecs_test.go index 9498aa9414deb..f5603278f017e 100644 --- a/test/new-e2e/tests/containers/ecs_test.go +++ b/test/new-e2e/tests/containers/ecs_test.go @@ -7,29 +7,26 @@ package containers import ( "context" - "encoding/json" "regexp" "strings" "testing" "time" - ecsComp "github.com/DataDog/test-infra-definitions/components/ecs" - "github.com/DataDog/test-infra-definitions/scenarios/aws/ecs" - "github.com/DataDog/datadog-agent/pkg/util/pointer" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awsconfig "github.com/aws/aws-sdk-go-v2/config" awsecs "github.com/aws/aws-sdk-go-v2/service/ecs" awsecstypes "github.com/aws/aws-sdk-go-v2/service/ecs/types" "github.com/fatih/color" - "github.com/pulumi/pulumi/sdk/v3/go/auto" "github.com/samber/lo" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" + + tifecs "github.com/DataDog/test-infra-definitions/scenarios/aws/ecs" + + envecs "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/ecs" ) const ( @@ -41,52 +38,27 @@ const ( ) type ecsSuite struct { - baseSuite - + baseSuite[environments.ECS] ecsClusterName string } func TestECSSuite(t *testing.T) { - suite.Run(t, &ecsSuite{}) + e2e.Run(t, &ecsSuite{}, e2e.WithProvisioner(envecs.Provisioner( + envecs.WithECSOptions( + tifecs.WithFargateCapacityProvider(), + tifecs.WithLinuxNodeGroup(), + tifecs.WithWindowsNodeGroup(), + tifecs.WithLinuxBottleRocketNodeGroup(), + ), + envecs.WithTestingWorkload(), + ))) } func (suite *ecsSuite) SetupSuite() { - ctx := context.Background() - - // Creating the stack - stackConfig := runner.ConfigMap{ - "ddinfra:aws/ecs/linuxECSOptimizedNodeGroup": auto.ConfigValue{Value: "true"}, - "ddinfra:aws/ecs/linuxBottlerocketNodeGroup": auto.ConfigValue{Value: "true"}, - "ddinfra:aws/ecs/windowsLTSCNodeGroup": auto.ConfigValue{Value: "true"}, - "ddagent:deploy": auto.ConfigValue{Value: "true"}, - "ddagent:fakeintake": auto.ConfigValue{Value: "true"}, - "ddtestworkload:deploy": auto.ConfigValue{Value: "true"}, - } - - _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure( - ctx, - "ecs-cluster", - ecs.Run, - infra.WithConfigMap(stackConfig), - ) - suite.Require().NoError(err) - - fakeintake := &components.FakeIntake{} - fiSerialized, err := json.Marshal(stackOutput.Outputs["dd-Fakeintake-aws-ecs"].Value) - suite.Require().NoError(err) - suite.Require().NoError(fakeintake.Import(fiSerialized, fakeintake)) - suite.Require().NoError(fakeintake.Init(suite)) - suite.Fakeintake = fakeintake.Client() - - clusterSerialized, err := json.Marshal(stackOutput.Outputs["dd-Cluster-ecs"].Value) - suite.Require().NoError(err) - ecsCluster := &ecsComp.ClusterOutput{} - suite.Require().NoError(ecsCluster.Import(clusterSerialized, ecsCluster)) - - suite.ecsClusterName = ecsCluster.ClusterName - suite.clusterName = suite.ecsClusterName - suite.baseSuite.SetupSuite() + suite.Fakeintake = suite.Env().FakeIntake.Client() + suite.ecsClusterName = suite.Env().ECSCluster.ClusterName + suite.clusterName = suite.Env().ECSCluster.ClusterName } func (suite *ecsSuite) TearDownSuite() { @@ -99,8 +71,8 @@ func (suite *ecsSuite) TearDownSuite() { suite.T().Log(c("https://dddev.datadoghq.com/dashboard/mnw-tdr-jd8/e2e-tests-containers-ecs?refresh_mode=paused&tpl_var_ecs_cluster_name%%5B0%%5D=%s&tpl_var_fake_intake_task_family%%5B0%%5D=%s-fakeintake-ecs&from_ts=%d&to_ts=%d&live=false", suite.ecsClusterName, strings.TrimSuffix(suite.ecsClusterName, "-ecs"), - suite.startTime.UnixMilli(), - suite.endTime.UnixMilli(), + suite.StartTime().UnixMilli(), + suite.EndTime().UnixMilli(), )) } diff --git a/test/new-e2e/tests/containers/eks_test.go b/test/new-e2e/tests/containers/eks_test.go index 6562eff6abb80..163bcdba0f0b4 100644 --- a/test/new-e2e/tests/containers/eks_test.go +++ b/test/new-e2e/tests/containers/eks_test.go @@ -6,104 +6,32 @@ package containers import ( - "context" - "encoding/json" "testing" - "github.com/DataDog/test-infra-definitions/scenarios/aws/eks" + tifeks "github.com/DataDog/test-infra-definitions/scenarios/aws/eks" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" - - "github.com/pulumi/pulumi/sdk/v3/go/auto" - "github.com/stretchr/testify/suite" - "k8s.io/client-go/tools/clientcmd" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + awskubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/kubernetes" ) type eksSuite struct { k8sSuite - initOnly bool } func TestEKSSuite(t *testing.T) { - var initOnly bool - initOnlyParam, err := runner.GetProfile().ParamStore().GetBoolWithDefault(parameters.InitOnly, false) - if err == nil { - initOnly = initOnlyParam - } - suite.Run(t, &eksSuite{initOnly: initOnly}) + e2e.Run(t, &eksSuite{}, e2e.WithProvisioner(awskubernetes.EKSProvisioner( + awskubernetes.WithEKSOptions( + tifeks.WithLinuxNodeGroup(), + tifeks.WithWindowsNodeGroup(), + tifeks.WithBottlerocketNodeGroup(), + tifeks.WithLinuxARMNodeGroup(), + ), + awskubernetes.WithDeployDogstatsd(), + awskubernetes.WithDeployTestWorkload(), + ))) } func (suite *eksSuite) SetupSuite() { - ctx := context.Background() - - stackConfig := runner.ConfigMap{ - "ddagent:deploy": auto.ConfigValue{Value: "true"}, - "ddagent:fakeintake": auto.ConfigValue{Value: "true"}, - "ddtestworkload:deploy": auto.ConfigValue{Value: "true"}, - "dddogstatsd:deploy": auto.ConfigValue{Value: "true"}, - } - - _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure( - ctx, - "eks-cluster", - eks.Run, - infra.WithConfigMap(stackConfig), - ) - - if !suite.Assert().NoError(err) { - stackName, err := infra.GetStackManager().GetPulumiStackName("eks-cluster") - suite.Require().NoError(err) - suite.T().Log(dumpEKSClusterState(ctx, stackName)) - if !runner.GetProfile().AllowDevMode() || !*keepStacks { - infra.GetStackManager().DeleteStack(ctx, "eks-cluster", nil) - } - suite.T().FailNow() - } - - if suite.initOnly { - suite.T().Skip("E2E_INIT_ONLY is set, skipping tests") - } - - fakeintake := &components.FakeIntake{} - fiSerialized, err := json.Marshal(stackOutput.Outputs["dd-Fakeintake-aws-ecs"].Value) - suite.Require().NoError(err) - suite.Require().NoError(fakeintake.Import(fiSerialized, &fakeintake)) - suite.Require().NoError(fakeintake.Init(suite)) - suite.Fakeintake = fakeintake.Client() - - kubeCluster := &components.KubernetesCluster{} - kubeSerialized, err := json.Marshal(stackOutput.Outputs["dd-Cluster-eks"].Value) - suite.Require().NoError(err) - suite.Require().NoError(kubeCluster.Import(kubeSerialized, &kubeCluster)) - suite.Require().NoError(kubeCluster.Init(suite)) - suite.KubeClusterName = kubeCluster.ClusterName - suite.K8sClient = kubeCluster.Client() - suite.K8sConfig, err = clientcmd.RESTConfigFromKubeConfig([]byte(kubeCluster.KubeConfig)) - suite.Require().NoError(err) - - kubernetesAgent := &components.KubernetesAgent{} - kubernetesAgentSerialized, err := json.Marshal(stackOutput.Outputs["dd-KubernetesAgent-aws-datadog-agent"].Value) - suite.Require().NoError(err) - suite.Require().NoError(kubernetesAgent.Import(kubernetesAgentSerialized, &kubernetesAgent)) - - suite.KubernetesAgentRef = kubernetesAgent - suite.k8sSuite.SetupSuite() -} - -func (suite *eksSuite) TearDownSuite() { - if suite.initOnly { - suite.T().Logf("E2E_INIT_ONLY is set, skipping deletion") - return - } - - suite.k8sSuite.TearDownSuite() - - ctx := context.Background() - stackName, err := infra.GetStackManager().GetPulumiStackName("eks-cluster") - suite.Require().NoError(err) - suite.T().Log(dumpEKSClusterState(ctx, stackName)) + suite.Fakeintake = suite.Env().FakeIntake.Client() } diff --git a/test/new-e2e/tests/containers/k8s_test.go b/test/new-e2e/tests/containers/k8s_test.go index fb235f979c951..6290166cf6055 100644 --- a/test/new-e2e/tests/containers/k8s_test.go +++ b/test/new-e2e/tests/containers/k8s_test.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/DataDog/datadog-agent/test/fakeintake/aggregator" fakeintake "github.com/DataDog/datadog-agent/test/fakeintake/client" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" "github.com/fatih/color" "github.com/samber/lo" @@ -31,9 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" - restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" ) @@ -51,21 +49,12 @@ const ( var GitCommit string type k8sSuite struct { - baseSuite - - KubeClusterName string - AgentLinuxHelmInstallName string - AgentWindowsHelmInstallName string - KubernetesAgentRef *components.KubernetesAgent - - K8sConfig *restclient.Config - K8sClient kubernetes.Interface + baseSuite[environments.Kubernetes] } func (suite *k8sSuite) SetupSuite() { - suite.clusterName = suite.KubeClusterName - suite.baseSuite.SetupSuite() + suite.clusterName = suite.Env().KubernetesCluster.ClusterName } func (suite *k8sSuite) TearDownSuite() { @@ -76,10 +65,10 @@ func (suite *k8sSuite) TearDownSuite() { suite.T().Log(c("The data produced and asserted by these tests can be viewed on this dashboard:")) c = color.New(color.Bold, color.FgBlue).SprintfFunc() suite.T().Log(c("https://dddev.datadoghq.com/dashboard/qcp-brm-ysc/e2e-tests-containers-k8s?refresh_mode=paused&tpl_var_kube_cluster_name%%5B0%%5D=%s&tpl_var_fake_intake_task_family%%5B0%%5D=%s-fakeintake-ecs&from_ts=%d&to_ts=%d&live=false", - suite.KubeClusterName, - suite.KubeClusterName, - suite.startTime.UnixMilli(), - suite.endTime.UnixMilli(), + suite.clusterName, + suite.clusterName, + suite.StartTime().UnixMilli(), + suite.EndTime().UnixMilli(), )) } @@ -119,7 +108,7 @@ func (suite *k8sSuite) testUpAndRunning(waitFor time.Duration) { suite.Run("agent pods are ready and not restarting", func() { suite.EventuallyWithTf(func(c *assert.CollectT) { - linuxNodes, err := suite.K8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ + linuxNodes, err := suite.Env().KubernetesCluster.Client().CoreV1().Nodes().List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("kubernetes.io/os", "linux").String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged @@ -127,7 +116,7 @@ func (suite *k8sSuite) testUpAndRunning(waitFor time.Duration) { return } - windowsNodes, err := suite.K8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ + windowsNodes, err := suite.Env().KubernetesCluster.Client().CoreV1().Nodes().List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("kubernetes.io/os", "windows").String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged @@ -135,39 +124,39 @@ func (suite *k8sSuite) testUpAndRunning(waitFor time.Duration) { return } - linuxPods, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", suite.KubernetesAgentRef.LinuxNodeAgent.LabelSelectors["app"]).String(), + linuxPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", suite.Env().Agent.LinuxNodeAgent.LabelSelectors["app"]).String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged if !assert.NoErrorf(c, err, "Failed to list Linux datadog agent pods") { return } - windowsPods, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", suite.KubernetesAgentRef.WindowsNodeAgent.LabelSelectors["app"]).String(), + windowsPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", suite.Env().Agent.WindowsNodeAgent.LabelSelectors["app"]).String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged if !assert.NoErrorf(c, err, "Failed to list Windows datadog agent pods") { return } - clusterAgentPods, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", suite.KubernetesAgentRef.LinuxClusterAgent.LabelSelectors["app"]).String(), + clusterAgentPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", suite.Env().Agent.LinuxClusterAgent.LabelSelectors["app"]).String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged if !assert.NoErrorf(c, err, "Failed to list datadog cluster agent pods") { return } - clusterChecksPods, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", suite.KubernetesAgentRef.LinuxClusterChecks.LabelSelectors["app"]).String(), + clusterChecksPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", suite.Env().Agent.LinuxClusterChecks.LabelSelectors["app"]).String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged if !assert.NoErrorf(c, err, "Failed to list datadog cluster checks runner pods") { return } - dogstatsdPods, err := suite.K8sClient.CoreV1().Pods("dogstatsd-standalone").List(ctx, metav1.ListOptions{ + dogstatsdPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("dogstatsd-standalone").List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", "dogstatsd-standalone").String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged @@ -198,13 +187,13 @@ func (suite *k8sSuite) TestAdmissionControllerWebhooksExist() { expectedWebhookName := "datadog-webhook" suite.Run("agent registered mutating webhook configuration", func() { - mutatingConfig, err := suite.K8sClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(ctx, expectedWebhookName, metav1.GetOptions{}) + mutatingConfig, err := suite.Env().KubernetesCluster.Client().AdmissionregistrationV1().MutatingWebhookConfigurations().Get(ctx, expectedWebhookName, metav1.GetOptions{}) suite.Require().NoError(err) suite.NotNilf(mutatingConfig, "None of the mutating webhook configurations have the name '%s'", expectedWebhookName) }) suite.Run("agent registered validating webhook configuration", func() { - validatingConfig, err := suite.K8sClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(ctx, expectedWebhookName, metav1.GetOptions{}) + validatingConfig, err := suite.Env().KubernetesCluster.Client().AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(ctx, expectedWebhookName, metav1.GetOptions{}) suite.Require().NoError(err) suite.NotNilf(validatingConfig, "None of the validating webhook configurations have the name '%s'", expectedWebhookName) }) @@ -221,27 +210,27 @@ func (suite *k8sSuite) TestVersion() { }{ { "Linux agent", - suite.KubernetesAgentRef.LinuxNodeAgent.LabelSelectors["app"], + suite.Env().Agent.LinuxNodeAgent.LabelSelectors["app"], "agent", }, { "Windows agent", - suite.KubernetesAgentRef.WindowsNodeAgent.LabelSelectors["app"], + suite.Env().Agent.WindowsNodeAgent.LabelSelectors["app"], "agent", }, { "cluster agent", - suite.KubernetesAgentRef.LinuxClusterAgent.LabelSelectors["app"], + suite.Env().Agent.LinuxClusterAgent.LabelSelectors["app"], "cluster-agent", }, { "cluster checks", - suite.KubernetesAgentRef.LinuxClusterChecks.LabelSelectors["app"], + suite.Env().Agent.LinuxClusterChecks.LabelSelectors["app"], "agent", }, } { suite.Run(tt.podType+" pods are running the good version", func() { - linuxPods, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + linuxPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", tt.appSelector).String(), Limit: 1, }) @@ -276,8 +265,8 @@ func (suite *k8sSuite) TestCLI() { func (suite *k8sSuite) testAgentCLI() { ctx := context.Background() - pod, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", suite.KubernetesAgentRef.LinuxNodeAgent.LabelSelectors["app"]).String(), + pod, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", suite.Env().Agent.LinuxNodeAgent.LabelSelectors["app"]).String(), Limit: 1, }) suite.Require().NoError(err) @@ -383,8 +372,8 @@ func (suite *k8sSuite) testAgentCLI() { func (suite *k8sSuite) testClusterAgentCLI() { ctx := context.Background() - pod, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", suite.KubernetesAgentRef.LinuxClusterAgent.LabelSelectors["app"]).String(), + pod, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", suite.Env().Agent.LinuxClusterAgent.LabelSelectors["app"]).String(), Limit: 1, }) suite.Require().NoError(err) @@ -844,7 +833,7 @@ func (suite *k8sSuite) testDogstatsdExternalData(kubeNamespace, kubeDeployment s ctx := context.Background() // Record old pod, so we can be sure we are not looking at the incorrect one after deletion - oldPods, err := suite.K8sClient.CoreV1().Pods(kubeNamespace).List(ctx, metav1.ListOptions{ + oldPods, err := suite.Env().KubernetesCluster.KubernetesClient.K8sClient.CoreV1().Pods(kubeNamespace).List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", kubeDeployment).String(), }) suite.Require().NoError(err) @@ -852,7 +841,7 @@ func (suite *k8sSuite) testDogstatsdExternalData(kubeNamespace, kubeDeployment s oldPod := oldPods.Items[0] // Delete the pod to ensure it is recreated after the admission controller is deployed - err = suite.K8sClient.CoreV1().Pods(kubeNamespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ + err = suite.Env().KubernetesCluster.KubernetesClient.K8sClient.CoreV1().Pods(kubeNamespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", kubeDeployment).String(), }) suite.Require().NoError(err) @@ -860,7 +849,7 @@ func (suite *k8sSuite) testDogstatsdExternalData(kubeNamespace, kubeDeployment s // Wait for the fresh pod to be created var pod corev1.Pod suite.Require().EventuallyWithTf(func(c *assert.CollectT) { - pods, err := suite.K8sClient.CoreV1().Pods(kubeNamespace).List(ctx, metav1.ListOptions{ + pods, err := suite.Env().KubernetesCluster.KubernetesClient.K8sClient.CoreV1().Pods(kubeNamespace).List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", kubeDeployment).String(), }) if !assert.NoError(c, err) { @@ -1004,7 +993,7 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, // libraries for the detected language are injected if languageShouldBeAutoDetected { suite.Require().EventuallyWithTf(func(c *assert.CollectT) { - deployment, err := suite.K8sClient.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + deployment, err := suite.Env().KubernetesCluster.Client().AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) if !assert.NoError(c, err) { return } @@ -1022,7 +1011,7 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, } // Record old pod, so we can be sure we are not looking at the incorrect one after deletion - oldPods, err := suite.K8sClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + oldPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", name).String(), }) suite.Require().NoError(err) @@ -1030,7 +1019,7 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, oldPod := oldPods.Items[0] // Delete the pod to ensure it is recreated after the admission controller is deployed - err = suite.K8sClient.CoreV1().Pods(namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ + err = suite.Env().KubernetesCluster.Client().CoreV1().Pods(namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", name).String(), }) suite.Require().NoError(err) @@ -1038,7 +1027,7 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, // Wait for the fresh pod to be created var pod corev1.Pod suite.Require().EventuallyWithTf(func(c *assert.CollectT) { - pods, err := suite.K8sClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + pods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", name).String(), }) if !assert.NoError(c, err) { @@ -1134,7 +1123,7 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, func (suite *k8sSuite) TestContainerImage() { sendEvent := func(alertType, text string) { - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(suite.T().Name()), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% `+"```"+` @@ -1204,7 +1193,7 @@ func (suite *k8sSuite) TestContainerImage() { func (suite *k8sSuite) TestSBOM() { sendEvent := func(alertType, text string) { - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(suite.T().Name()), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% `+"```"+` @@ -1329,7 +1318,7 @@ func (suite *k8sSuite) TestSBOM() { func (suite *k8sSuite) TestContainerLifecycleEvents() { sendEvent := func(alertType, text string) { - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(suite.T().Name()), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% `+"```"+` @@ -1359,7 +1348,7 @@ func (suite *k8sSuite) TestContainerLifecycleEvents() { var nginxPod corev1.Pod suite.Require().EventuallyWithTf(func(c *assert.CollectT) { - pods, err := suite.K8sClient.CoreV1().Pods("workload-nginx").List(context.Background(), metav1.ListOptions{ + pods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("workload-nginx").List(context.Background(), metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", "nginx").String(), FieldSelector: fields.OneTermEqualSelector("status.phase", "Running").String(), }) @@ -1379,7 +1368,7 @@ func (suite *k8sSuite) TestContainerLifecycleEvents() { }) }, 1*time.Minute, 10*time.Second, "Failed to find an nginx pod") - err := suite.K8sClient.CoreV1().Pods("workload-nginx").Delete(context.Background(), nginxPod.Name, metav1.DeleteOptions{}) + err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("workload-nginx").Delete(context.Background(), nginxPod.Name, metav1.DeleteOptions{}) suite.Require().NoError(err) suite.EventuallyWithTf(func(collect *assert.CollectT) { @@ -1422,7 +1411,7 @@ func (suite *k8sSuite) TestContainerLifecycleEvents() { func (suite *k8sSuite) testHPA(namespace, deployment string) { suite.Run(fmt.Sprintf("hpa kubernetes_state.deployment.replicas_available{kube_namespace:%s,kube_deployment:%s}", namespace, deployment), func() { sendEvent := func(alertType, text string, time *int) { - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(fmt.Sprintf("testHPA %s/%s", namespace, deployment)), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% %s @@ -1510,7 +1499,7 @@ func (suite *k8sSuite) testHPA(namespace, deployment string) { type podExecOption func(*corev1.PodExecOptions) func (suite *k8sSuite) podExec(namespace, pod, container string, cmd []string, podOptions ...podExecOption) (stdout, stderr string, err error) { - req := suite.K8sClient.CoreV1().RESTClient().Post().Resource("pods").Namespace(namespace).Name(pod).SubResource("exec") + req := suite.Env().KubernetesCluster.Client().CoreV1().RESTClient().Post().Resource("pods").Namespace(namespace).Name(pod).SubResource("exec") option := &corev1.PodExecOptions{ Stdin: false, Stdout: true, @@ -1529,7 +1518,7 @@ func (suite *k8sSuite) podExec(namespace, pod, container string, cmd []string, p scheme.ParameterCodec, ) - exec, err := remotecommand.NewSPDYExecutor(suite.K8sConfig, "POST", req.URL()) + exec, err := remotecommand.NewSPDYExecutor(suite.Env().KubernetesCluster.KubernetesClient.K8sConfig, "POST", req.URL()) if err != nil { return "", "", err } diff --git a/test/new-e2e/tests/containers/kindvm_test.go b/test/new-e2e/tests/containers/kindvm_test.go index 5282e6fd65e34..01d10f2b5ea1a 100644 --- a/test/new-e2e/tests/containers/kindvm_test.go +++ b/test/new-e2e/tests/containers/kindvm_test.go @@ -6,19 +6,13 @@ package containers import ( - "context" - "encoding/json" "testing" - "github.com/DataDog/test-infra-definitions/scenarios/aws/kindvm" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" - - "github.com/pulumi/pulumi/sdk/v3/go/auto" - "github.com/stretchr/testify/suite" - "k8s.io/client-go/tools/clientcmd" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + awskubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/kubernetes" ) type kindSuite struct { @@ -26,69 +20,19 @@ type kindSuite struct { } func TestKindSuite(t *testing.T) { - suite.Run(t, &kindSuite{}) + e2e.Run(t, &kindSuite{}, e2e.WithProvisioner(awskubernetes.KindProvisioner( + awskubernetes.WithEC2VMOptions( + ec2.WithInstanceType("t3.xlarge"), + ), + awskubernetes.WithFakeIntakeOptions(fakeintake.WithMemory(2048)), + awskubernetes.WithDeployDogstatsd(), + awskubernetes.WithDeployTestWorkload(), + ))) } func (suite *kindSuite) SetupSuite() { - ctx := context.Background() - - stackConfig := runner.ConfigMap{ - "ddinfra:aws/defaultInstanceType": auto.ConfigValue{Value: "t3.xlarge"}, - "ddagent:deploy": auto.ConfigValue{Value: "true"}, - "ddagent:fakeintake": auto.ConfigValue{Value: "true"}, - "ddtestworkload:deploy": auto.ConfigValue{Value: "true"}, - "dddogstatsd:deploy": auto.ConfigValue{Value: "true"}, - } - - _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure( - ctx, - "kind-cluster", - kindvm.Run, - infra.WithConfigMap(stackConfig), - ) - if !suite.Assert().NoError(err) { - stackName, err := infra.GetStackManager().GetPulumiStackName("kind-cluster") - suite.Require().NoError(err) - suite.T().Log(dumpKindClusterState(ctx, stackName)) - if !runner.GetProfile().AllowDevMode() || !*keepStacks { - infra.GetStackManager().DeleteStack(ctx, "kind-cluster", nil) - } - suite.T().FailNow() - } - - var fakeintake components.FakeIntake - fiSerialized, err := json.Marshal(stackOutput.Outputs["dd-Fakeintake-aws-kind"].Value) - suite.Require().NoError(err) - suite.Require().NoError(fakeintake.Import(fiSerialized, &fakeintake)) - suite.Require().NoError(fakeintake.Init(suite)) - suite.Fakeintake = fakeintake.Client() - - var kubeCluster components.KubernetesCluster - kubeSerialized, err := json.Marshal(stackOutput.Outputs["dd-Cluster-kind"].Value) - suite.Require().NoError(err) - suite.Require().NoError(kubeCluster.Import(kubeSerialized, &kubeCluster)) - suite.Require().NoError(kubeCluster.Init(suite)) - suite.KubeClusterName = kubeCluster.ClusterName - suite.K8sClient = kubeCluster.Client() - suite.K8sConfig, err = clientcmd.RESTConfigFromKubeConfig([]byte(kubeCluster.KubeConfig)) - suite.Require().NoError(err) - - kubernetesAgent := &components.KubernetesAgent{} - kubernetesAgentSerialized, err := json.Marshal(stackOutput.Outputs["dd-KubernetesAgent-aws-datadog-agent"].Value) - suite.Require().NoError(err) - suite.Require().NoError(kubernetesAgent.Import(kubernetesAgentSerialized, &kubernetesAgent)) - suite.KubernetesAgentRef = kubernetesAgent - suite.k8sSuite.SetupSuite() -} - -func (suite *kindSuite) TearDownSuite() { - suite.k8sSuite.TearDownSuite() - - ctx := context.Background() - stackName, err := infra.GetStackManager().GetPulumiStackName("kind-cluster") - suite.Require().NoError(err) - suite.T().Log(dumpKindClusterState(ctx, stackName)) + suite.Fakeintake = suite.Env().FakeIntake.Client() } func (suite *kindSuite) TestControlPlane() {