From e6c945e2246e4144238c59dc5cb750311a783879 Mon Sep 17 00:00:00 2001 From: Kevin Fairise <132568982+KevinFairise2@users.noreply.github.com> Date: Mon, 23 Dec 2024 11:46:16 +0100 Subject: [PATCH] =?UTF-8?q?Revert=20"Revert=20"Move=20containers=20integra?= =?UTF-8?q?tion=20tests=20to=20e2e=20provisioners=E2=80=A6=20(#32055)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../pkg/provisioners/aws/kubernetes/eks.go | 32 +- .../pkg/provisioners/aws/kubernetes/kind.go | 33 +- test/new-e2e/tests/containers/base_test.go | 47 +-- test/new-e2e/tests/containers/docker_test.go | 43 +-- .../tests/containers/dump_cluster_state.go | 342 ------------------ test/new-e2e/tests/containers/ecs_test.go | 70 ++-- test/new-e2e/tests/containers/eks_test.go | 103 +----- test/new-e2e/tests/containers/k8s_test.go | 89 ++--- test/new-e2e/tests/containers/kindvm_test.go | 85 +---- 9 files changed, 154 insertions(+), 690 deletions(-) delete mode 100644 test/new-e2e/tests/containers/dump_cluster_state.go diff --git a/test/new-e2e/pkg/provisioners/aws/kubernetes/eks.go b/test/new-e2e/pkg/provisioners/aws/kubernetes/eks.go index 1e44a7019a711..d65388d9746cb 100644 --- a/test/new-e2e/pkg/provisioners/aws/kubernetes/eks.go +++ b/test/new-e2e/pkg/provisioners/aws/kubernetes/eks.go @@ -22,6 +22,7 @@ import ( dogstatsdstandalone "github.com/DataDog/test-infra-definitions/components/datadog/dogstatsd-standalone" fakeintakeComp "github.com/DataDog/test-infra-definitions/components/datadog/fakeintake" "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" + "github.com/DataDog/test-infra-definitions/components/kubernetes/vpa" "github.com/DataDog/test-infra-definitions/resources/aws" "github.com/DataDog/test-infra-definitions/scenarios/aws/eks" "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake" @@ -88,6 +89,12 @@ func EKSRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Provi return nil } + vpaCrd, err := vpa.DeployCRD(&awsEnv, cluster.KubeProvider) + if err != nil { + return err + } + dependsOnVPA := utils.PulumiDependsOn(vpaCrd) + var fakeIntake *fakeintakeComp.Fakeintake if params.fakeintakeOptions != nil { fakeIntakeOptions := []fakeintake.Option{ @@ -108,10 +115,19 @@ func EKSRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Provi env.FakeIntake = nil } - workloadWithCRDDeps := []pulumi.Resource{cluster} + var dependsOnDDAgent pulumi.ResourceOption // Deploy the agent if params.agentOptions != nil { params.agentOptions = append(params.agentOptions, kubernetesagentparams.WithPulumiResourceOptions(utils.PulumiDependsOn(cluster)), kubernetesagentparams.WithFakeintake(fakeIntake), kubernetesagentparams.WithTags([]string{"stackid:" + ctx.Stack()})) + + eksParams, err := eks.NewParams(params.eksOptions...) + if err != nil { + return err + } + if eksParams.WindowsNodeGroup { + params.agentOptions = append(params.agentOptions, kubernetesagentparams.WithDeployWindows()) + } + kubernetesAgent, err := helm.NewKubernetesAgent(&awsEnv, "eks", cluster.KubeProvider, params.agentOptions...) if err != nil { return err @@ -120,7 +136,7 @@ func EKSRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Provi if err != nil { return err } - workloadWithCRDDeps = append(workloadWithCRDDeps, kubernetesAgent) + dependsOnDDAgent = utils.PulumiDependsOn(kubernetesAgent) } else { env.Agent = nil } @@ -133,18 +149,18 @@ func EKSRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Provi if params.deployTestWorkload { - if _, err := cpustress.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-cpustress", utils.PulumiDependsOn(cluster)); err != nil { + if _, err := cpustress.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-cpustress"); err != nil { return err } // dogstatsd clients that report to the Agent - if _, err := dogstatsd.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-dogstatsd", 8125, "/var/run/datadog/dsd.socket", utils.PulumiDependsOn(cluster)); err != nil { + if _, err := dogstatsd.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-dogstatsd", 8125, "/var/run/datadog/dsd.socket", dependsOnDDAgent /* for admission */); err != nil { return err } if params.deployDogstatsd { // dogstatsd clients that report to the dogstatsd standalone deployment - if _, err := dogstatsd.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-dogstatsd-standalone", dogstatsdstandalone.HostPort, dogstatsdstandalone.Socket, utils.PulumiDependsOn(cluster)); err != nil { + if _, err := dogstatsd.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-dogstatsd-standalone", dogstatsdstandalone.HostPort, dogstatsdstandalone.Socket, dependsOnDDAgent /* for admission */); err != nil { return err } } @@ -157,17 +173,17 @@ func EKSRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Provi return err } - if _, err := mutatedbyadmissioncontroller.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-mutated", "workload-mutated-lib-injection", utils.PulumiDependsOn(cluster)); err != nil { + if _, err := mutatedbyadmissioncontroller.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-mutated", "workload-mutated-lib-injection", dependsOnDDAgent /* for admission */); err != nil { return err } // These resources cannot be deployed if the Agent is not installed, it requires some CRDs provided by the Helm chart if params.agentOptions != nil { - if _, err := nginx.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-nginx", "", true, utils.PulumiDependsOn(workloadWithCRDDeps...)); err != nil { + if _, err := nginx.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-nginx", "", true, dependsOnDDAgent /* for DDM */, dependsOnVPA); err != nil { return err } - if _, err := redis.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-redis", true, utils.PulumiDependsOn(workloadWithCRDDeps...)); err != nil { + if _, err := redis.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-redis", true, dependsOnDDAgent /* for DDM */, dependsOnVPA); err != nil { return err } } diff --git a/test/new-e2e/pkg/provisioners/aws/kubernetes/kind.go b/test/new-e2e/pkg/provisioners/aws/kubernetes/kind.go index 44f5716bc92fa..b1c958cde8200 100644 --- a/test/new-e2e/pkg/provisioners/aws/kubernetes/kind.go +++ b/test/new-e2e/pkg/provisioners/aws/kubernetes/kind.go @@ -28,6 +28,7 @@ import ( fakeintakeComp "github.com/DataDog/test-infra-definitions/components/datadog/fakeintake" "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes" + "github.com/DataDog/test-infra-definitions/components/kubernetes/vpa" "github.com/DataDog/test-infra-definitions/resources/aws" "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake" @@ -106,6 +107,12 @@ func KindRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Prov return err } + vpaCrd, err := vpa.DeployCRD(&awsEnv, kubeProvider) + if err != nil { + return err + } + dependsOnVPA := utils.PulumiDependsOn(vpaCrd) + var fakeIntake *fakeintakeComp.Fakeintake if params.fakeintakeOptions != nil { fakeintakeOpts := []fakeintake.Option{fakeintake.WithLoadBalancer()} @@ -127,21 +134,19 @@ func KindRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Prov env.FakeIntake = nil } - var dependsOnCrd []pulumi.Resource + var dependsOnDDAgent pulumi.ResourceOption if params.agentOptions != nil { - kindClusterName := ctx.Stack() - helmValues := fmt.Sprintf(` + helmValues := ` datadog: kubelet: tlsVerify: false - clusterName: "%s" agents: useHostNetwork: true -`, kindClusterName) +` - newOpts := []kubernetesagentparams.Option{kubernetesagentparams.WithHelmValues(helmValues), kubernetesagentparams.WithTags([]string{"stackid:" + ctx.Stack()})} + newOpts := []kubernetesagentparams.Option{kubernetesagentparams.WithHelmValues(helmValues), kubernetesagentparams.WithClusterName(kindCluster.ClusterName), kubernetesagentparams.WithTags([]string{"stackid:" + ctx.Stack()})} params.agentOptions = append(newOpts, params.agentOptions...) - agent, err := helm.NewKubernetesAgent(&awsEnv, kindClusterName, kubeProvider, params.agentOptions...) + agent, err := helm.NewKubernetesAgent(&awsEnv, "kind", kubeProvider, params.agentOptions...) if err != nil { return err } @@ -149,7 +154,7 @@ agents: if err != nil { return err } - dependsOnCrd = append(dependsOnCrd, agent) + dependsOnDDAgent = utils.PulumiDependsOn(agent) } else { env.Agent = nil } @@ -163,13 +168,13 @@ agents: // Deploy testing workload if params.deployTestWorkload { // dogstatsd clients that report to the Agent - if _, err := dogstatsd.K8sAppDefinition(&awsEnv, kubeProvider, "workload-dogstatsd", 8125, "/var/run/datadog/dsd.socket"); err != nil { + if _, err := dogstatsd.K8sAppDefinition(&awsEnv, kubeProvider, "workload-dogstatsd", 8125, "/var/run/datadog/dsd.socket", dependsOnDDAgent /* for admission */); err != nil { return err } if params.deployDogstatsd { // dogstatsd clients that report to the dogstatsd standalone deployment - if _, err := dogstatsd.K8sAppDefinition(&awsEnv, kubeProvider, "workload-dogstatsd-standalone", dogstatsdstandalone.HostPort, dogstatsdstandalone.Socket); err != nil { + if _, err := dogstatsd.K8sAppDefinition(&awsEnv, kubeProvider, "workload-dogstatsd-standalone", dogstatsdstandalone.HostPort, dogstatsdstandalone.Socket, dependsOnDDAgent /* for admission */); err != nil { return err } } @@ -182,21 +187,21 @@ agents: return err } - if _, err := mutatedbyadmissioncontroller.K8sAppDefinition(&awsEnv, kubeProvider, "workload-mutated", "workload-mutated-lib-injection"); err != nil { + if _, err := mutatedbyadmissioncontroller.K8sAppDefinition(&awsEnv, kubeProvider, "workload-mutated", "workload-mutated-lib-injection", dependsOnDDAgent /* for admission */); err != nil { return err } // These workloads can be deployed only if the agent is installed, they rely on CRDs installed by Agent helm chart if params.agentOptions != nil { - if _, err := nginx.K8sAppDefinition(&awsEnv, kubeProvider, "workload-nginx", "", true, utils.PulumiDependsOn(dependsOnCrd...)); err != nil { + if _, err := nginx.K8sAppDefinition(&awsEnv, kubeProvider, "workload-nginx", "", true, dependsOnDDAgent /* for DDM */, dependsOnVPA); err != nil { return err } - if _, err := redis.K8sAppDefinition(&awsEnv, kubeProvider, "workload-redis", true, utils.PulumiDependsOn(dependsOnCrd...)); err != nil { + if _, err := redis.K8sAppDefinition(&awsEnv, kubeProvider, "workload-redis", true, dependsOnDDAgent /* for DDM */, dependsOnVPA); err != nil { return err } - if _, err := cpustress.K8sAppDefinition(&awsEnv, kubeProvider, "workload-cpustress", utils.PulumiDependsOn(dependsOnCrd...)); err != nil { + if _, err := cpustress.K8sAppDefinition(&awsEnv, kubeProvider, "workload-cpustress"); err != nil { return err } } diff --git a/test/new-e2e/tests/containers/base_test.go b/test/new-e2e/tests/containers/base_test.go index 84bce8ea70197..7bc4735a43dd2 100644 --- a/test/new-e2e/tests/containers/base_test.go +++ b/test/new-e2e/tests/containers/base_test.go @@ -14,7 +14,6 @@ import ( "github.com/samber/lo" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" "gopkg.in/yaml.v3" "gopkg.in/zorkian/go-datadog-api.v2" @@ -23,39 +22,21 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/DataDog/datadog-agent/test/fakeintake/aggregator" fakeintake "github.com/DataDog/datadog-agent/test/fakeintake/client" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" ) -type baseSuite struct { - suite.Suite +type baseSuite[Env any] struct { + e2e.BaseSuite[Env] - startTime time.Time - endTime time.Time - datadogClient *datadog.Client - Fakeintake *fakeintake.Client - clusterName string + Fakeintake *fakeintake.Client + clusterName string } -func (suite *baseSuite) SetupSuite() { - apiKey, err := runner.GetProfile().SecretStore().Get(parameters.APIKey) - suite.Require().NoError(err) - appKey, err := runner.GetProfile().SecretStore().Get(parameters.APPKey) - suite.Require().NoError(err) - suite.datadogClient = datadog.NewClient(apiKey, appKey) - - suite.startTime = time.Now() -} - -func (suite *baseSuite) TearDownSuite() { - suite.endTime = time.Now() -} - -func (suite *baseSuite) BeforeTest(suiteName, testName string) { +func (suite *baseSuite[Env]) BeforeTest(suiteName, testName string) { suite.T().Logf("START %s/%s %s", suiteName, testName, time.Now()) } -func (suite *baseSuite) AfterTest(suiteName, testName string) { +func (suite *baseSuite[Env]) AfterTest(suiteName, testName string) { suite.T().Logf("FINISH %s/%s %s", suiteName, testName, time.Now()) } @@ -98,7 +79,7 @@ func (mc *myCollectT) Errorf(format string, args ...interface{}) { mc.CollectT.Errorf(format, args...) } -func (suite *baseSuite) testMetric(args *testMetricArgs) { +func (suite *baseSuite[Env]) testMetric(args *testMetricArgs) { prettyMetricQuery := fmt.Sprintf("%s{%s}", args.Filter.Name, strings.Join(args.Filter.Tags, ",")) suite.Run("metric "+prettyMetricQuery, func() { @@ -107,7 +88,7 @@ func (suite *baseSuite) testMetric(args *testMetricArgs) { expectedTags = lo.Map(*args.Expect.Tags, func(tag string, _ int) *regexp.Regexp { return regexp.MustCompile(tag) }) } - var optionalTags []*regexp.Regexp + optionalTags := []*regexp.Regexp{regexp.MustCompile("stackid:.*")} // The stackid tag is added by the framework itself to allow filtering on the stack id if args.Optional.Tags != nil { optionalTags = lo.Map(*args.Optional.Tags, func(tag string, _ int) *regexp.Regexp { return regexp.MustCompile(tag) }) } @@ -120,7 +101,7 @@ func (suite *baseSuite) testMetric(args *testMetricArgs) { return "filter_tag_" + tag }) - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(fmt.Sprintf("testMetric %s", prettyMetricQuery)), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% ### Result @@ -227,7 +208,7 @@ type testLogExpectArgs struct { Message string } -func (suite *baseSuite) testLog(args *testLogArgs) { +func (suite *baseSuite[Env]) testLog(args *testLogArgs) { prettyLogQuery := fmt.Sprintf("%s{%s}", args.Filter.Service, strings.Join(args.Filter.Tags, ",")) suite.Run("log "+prettyLogQuery, func() { @@ -249,7 +230,7 @@ func (suite *baseSuite) testLog(args *testLogArgs) { return "filter_tag_" + tag }) - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(fmt.Sprintf("testLog %s", prettyLogQuery)), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% ### Result @@ -356,7 +337,7 @@ type testCheckRunExpectArgs struct { AcceptUnexpectedTags bool } -func (suite *baseSuite) testCheckRun(args *testCheckRunArgs) { +func (suite *baseSuite[Env]) testCheckRun(args *testCheckRunArgs) { prettyCheckRunQuery := fmt.Sprintf("%s{%s}", args.Filter.Name, strings.Join(args.Filter.Tags, ",")) suite.Run("checkRun "+prettyCheckRunQuery, func() { @@ -378,7 +359,7 @@ func (suite *baseSuite) testCheckRun(args *testCheckRunArgs) { return "filter_tag_" + tag }) - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(fmt.Sprintf("testCheckRun %s", prettyCheckRunQuery)), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% ### Result diff --git a/test/new-e2e/tests/containers/docker_test.go b/test/new-e2e/tests/containers/docker_test.go index 329fd2ca1a68b..46c2cd83fe1b4 100644 --- a/test/new-e2e/tests/containers/docker_test.go +++ b/test/new-e2e/tests/containers/docker_test.go @@ -6,55 +6,24 @@ package containers import ( - "context" - "encoding/json" - "fmt" - "os" "testing" - "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" - "github.com/pulumi/pulumi/sdk/v3/go/auto" - "github.com/stretchr/testify/suite" - - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awsdocker "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/docker" ) type DockerSuite struct { - baseSuite + baseSuite[environments.DockerHost] } func TestDockerSuite(t *testing.T) { - suite.Run(t, &DockerSuite{}) + e2e.Run(t, &DockerSuite{}, e2e.WithProvisioner(awsdocker.Provisioner(awsdocker.WithTestingWorkload()))) } func (suite *DockerSuite) SetupSuite() { - ctx := context.Background() - - stackConfig := runner.ConfigMap{ - "ddagent:deploy": auto.ConfigValue{Value: "true"}, - "ddagent:fakeintake": auto.ConfigValue{Value: "true"}, - } - - _, stackOutput, err := infra.GetStackManager().GetStack(ctx, "dockerstack", stackConfig, ec2.VMRunWithDocker, false) - suite.Require().NoError(err) - - var fakeintake components.FakeIntake - fiSerialized, err := json.Marshal(stackOutput.Outputs["dd-Fakeintake-aws-aws-vm"].Value) - suite.Require().NoError(err) - suite.Require().NoError(fakeintake.Import(fiSerialized, &fakeintake)) - suite.Require().NoError(fakeintake.Init(suite)) - suite.Fakeintake = fakeintake.Client() - - var host components.RemoteHost - hostSerialized, err := json.Marshal(stackOutput.Outputs["dd-Host-aws-vm"].Value) - suite.Require().NoError(err) - suite.Require().NoError(host.Import(hostSerialized, &host)) - suite.Require().NoError(host.Init(suite)) - suite.clusterName = fmt.Sprintf("%s-%v", os.Getenv("USER"), host.Address) - suite.baseSuite.SetupSuite() + suite.Fakeintake = suite.Env().FakeIntake.Client() } func (suite *DockerSuite) TestDSDWithUDS() { diff --git a/test/new-e2e/tests/containers/dump_cluster_state.go b/test/new-e2e/tests/containers/dump_cluster_state.go deleted file mode 100644 index a1d0eeb97928c..0000000000000 --- a/test/new-e2e/tests/containers/dump_cluster_state.go +++ /dev/null @@ -1,342 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2023-present Datadog, Inc. - -package containers - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "io" - "net" - "os" - "os/user" - "strings" - "sync" - - awsconfig "github.com/aws/aws-sdk-go-v2/config" - awsec2 "github.com/aws/aws-sdk-go-v2/service/ec2" - awsec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" - awseks "github.com/aws/aws-sdk-go-v2/service/eks" - awsekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/genericiooptions" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - kubectlget "k8s.io/kubectl/pkg/cmd/get" - kubectlutil "k8s.io/kubectl/pkg/cmd/util" - - "github.com/DataDog/datadog-agent/pkg/util/pointer" -) - -func dumpEKSClusterState(ctx context.Context, name string) (ret string) { - var out strings.Builder - defer func() { ret = out.String() }() - - cfg, err := awsconfig.LoadDefaultConfig(ctx) - if err != nil { - fmt.Fprintf(&out, "Failed to load AWS config: %v\n", err) - return - } - - client := awseks.NewFromConfig(cfg) - - clusterDescription, err := client.DescribeCluster(ctx, &awseks.DescribeClusterInput{ - Name: &name, - }) - if err != nil { - fmt.Fprintf(&out, "Failed to describe cluster %s: %v\n", name, err) - return - } - - cluster := clusterDescription.Cluster - if cluster.Status != awsekstypes.ClusterStatusActive { - fmt.Fprintf(&out, "EKS cluster %s is not in active state. Current status: %s\n", name, cluster.Status) - return - } - - kubeconfig := clientcmdapi.NewConfig() - kubeconfig.Clusters[name] = &clientcmdapi.Cluster{ - Server: *cluster.Endpoint, - } - if kubeconfig.Clusters[name].CertificateAuthorityData, err = base64.StdEncoding.DecodeString(*cluster.CertificateAuthority.Data); err != nil { - fmt.Fprintf(&out, "Failed to decode certificate authority: %v\n", err) - } - kubeconfig.AuthInfos[name] = &clientcmdapi.AuthInfo{ - Exec: &clientcmdapi.ExecConfig{ - APIVersion: "client.authentication.k8s.io/v1beta1", - Command: "aws", - Args: []string{ - "--region", - cfg.Region, - "eks", - "get-token", - "--cluster-name", - name, - "--output", - "json", - }, - }, - } - kubeconfig.Contexts[name] = &clientcmdapi.Context{ - Cluster: name, - AuthInfo: name, - } - kubeconfig.CurrentContext = name - - dumpK8sClusterState(ctx, kubeconfig, &out) - - return -} - -func dumpKindClusterState(ctx context.Context, name string) (ret string) { - var out strings.Builder - defer func() { ret = out.String() }() - - cfg, err := awsconfig.LoadDefaultConfig(ctx) - if err != nil { - fmt.Fprintf(&out, "Failed to load AWS config: %v\n", err) - return - } - - ec2Client := awsec2.NewFromConfig(cfg) - - user, _ := user.Current() - instancesDescription, err := ec2Client.DescribeInstances(ctx, &awsec2.DescribeInstancesInput{ - Filters: []awsec2types.Filter{ - { - Name: pointer.Ptr("tag:managed-by"), - Values: []string{"pulumi"}, - }, - { - Name: pointer.Ptr("tag:username"), - Values: []string{user.Username}, - }, - { - Name: pointer.Ptr("tag:Name"), - Values: []string{name + "-aws-kind"}, - }, - }, - }) - if err != nil { - fmt.Fprintf(&out, "Failed to describe instances: %v\n", err) - return - } - - if instancesDescription == nil || (len(instancesDescription.Reservations) != 1 && len(instancesDescription.Reservations[0].Instances) != 1) { - fmt.Fprintf(&out, "Didn’t find exactly one instance for cluster %s\n", name) - return - } - - instanceIP := instancesDescription.Reservations[0].Instances[0].PrivateIpAddress - - auth := []ssh.AuthMethod{} - - if sshAgentSocket, found := os.LookupEnv("SSH_AUTH_SOCK"); found { - sshAgent, err := net.Dial("unix", sshAgentSocket) - if err != nil { - fmt.Fprintf(&out, "Failed to connect to SSH agent: %v\n", err) - return - } - defer sshAgent.Close() - - auth = append(auth, ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers)) - } - - if sshKeyPath, found := os.LookupEnv("E2E_PRIVATE_KEY_PATH"); found { - sshKey, err := os.ReadFile(sshKeyPath) - if err != nil { - fmt.Fprintf(&out, "Failed to read SSH key: %v\n", err) - return - } - - signer, err := ssh.ParsePrivateKey(sshKey) - if err != nil { - fmt.Fprintf(&out, "Failed to parse SSH key: %v\n", err) - return - } - - auth = append(auth, ssh.PublicKeys(signer)) - } - - var sshClient *ssh.Client - err = nil - for _, user := range []string{"ec2-user", "ubuntu"} { - sshClient, err = ssh.Dial("tcp", *instanceIP+":22", &ssh.ClientConfig{ - User: user, - Auth: auth, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - }) - if err == nil { - break - } - } - if err != nil { - fmt.Fprintf(&out, "Failed to dial SSH server %s: %v\n", *instanceIP, err) - return - } - defer sshClient.Close() - - sshSession, err := sshClient.NewSession() - if err != nil { - fmt.Fprintf(&out, "Failed to create SSH session: %v\n", err) - return - } - defer sshSession.Close() - - stdout, err := sshSession.StdoutPipe() - if err != nil { - fmt.Fprintf(&out, "Failed to create stdout pipe: %v\n", err) - return - } - - stderr, err := sshSession.StderrPipe() - if err != nil { - fmt.Fprintf(&out, "Failed to create stderr pipe: %v\n", err) - return - } - - err = sshSession.Start("kind get kubeconfig --name \"$(kind get clusters | head -n 1)\"") - if err != nil { - fmt.Fprintf(&out, "Failed to start remote command: %v\n", err) - return - } - - var stdoutBuf bytes.Buffer - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - if _, err := io.Copy(&stdoutBuf, stdout); err != nil { - fmt.Fprintf(&out, "Failed to read stdout: %v\n", err) - } - wg.Done() - }() - - go func() { - if _, err := io.Copy(&out, stderr); err != nil { - fmt.Fprintf(&out, "Failed to read stderr: %v\n", err) - } - wg.Done() - }() - - err = sshSession.Wait() - wg.Wait() - if err != nil { - fmt.Fprintf(&out, "Remote command exited with error: %v\n", err) - return - } - - kubeconfig, err := clientcmd.Load(stdoutBuf.Bytes()) - if err != nil { - fmt.Fprintf(&out, "Failed to parse kubeconfig: %v\n", err) - return - } - - for _, cluster := range kubeconfig.Clusters { - cluster.Server = strings.Replace(cluster.Server, "0.0.0.0", *instanceIP, 1) - cluster.CertificateAuthorityData = nil - cluster.InsecureSkipTLSVerify = true - } - - dumpK8sClusterState(ctx, kubeconfig, &out) - - return -} - -func dumpK8sClusterState(ctx context.Context, kubeconfig *clientcmdapi.Config, out *strings.Builder) { - kubeconfigFile, err := os.CreateTemp("", "kubeconfig") - if err != nil { - fmt.Fprintf(out, "Failed to create kubeconfig temporary file: %v\n", err) - return - } - defer os.Remove(kubeconfigFile.Name()) - - if err := clientcmd.WriteToFile(*kubeconfig, kubeconfigFile.Name()); err != nil { - fmt.Fprintf(out, "Failed to write kubeconfig file: %v\n", err) - return - } - - if err := kubeconfigFile.Close(); err != nil { - fmt.Fprintf(out, "Failed to close kubeconfig file: %v\n", err) - } - - fmt.Fprintf(out, "\n") - - configFlags := genericclioptions.NewConfigFlags(false) - kubeconfigFileName := kubeconfigFile.Name() - configFlags.KubeConfig = &kubeconfigFileName - - factory := kubectlutil.NewFactory(configFlags) - - streams := genericiooptions.IOStreams{ - Out: out, - ErrOut: out, - } - - getCmd := kubectlget.NewCmdGet("", factory, streams) - getCmd.SetOut(out) - getCmd.SetErr(out) - getCmd.SetContext(ctx) - getCmd.SetArgs([]string{ - "nodes,mutatingwebhookconfiguration,validatingwebhookconfiguration,all", - "--all-namespaces", - "-o", - "wide", - }) - if err := getCmd.ExecuteContext(ctx); err != nil { - fmt.Fprintf(out, "Failed to execute Get command: %v\n", err) - return - } - - // Get the logs of containers that have restarted - config, err := clientcmd.BuildConfigFromFlags("", kubeconfigFile.Name()) - if err != nil { - fmt.Fprintf(out, "Failed to build Kubernetes config: %v\n", err) - return - } - k8sClient, err := kubernetes.NewForConfig(config) - if err != nil { - fmt.Fprintf(out, "Failed to create Kubernetes client: %v\n", err) - return - } - - pods, err := k8sClient.CoreV1().Pods("").List(ctx, metav1.ListOptions{}) - if err != nil { - fmt.Fprintf(out, "Failed to list pods: %v\n", err) - return - } - - for _, pod := range pods.Items { - for _, containerStatus := range pod.Status.ContainerStatuses { - if containerStatus.RestartCount > 0 { - fmt.Fprintf(out, "\nLOGS FOR POD %s/%s CONTAINER %s:\n", pod.Namespace, pod.Name, containerStatus.Name) - logs, err := k8sClient.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ - Container: containerStatus.Name, - Previous: true, - // TailLines: pointer.Ptr(int64(100)), - }).Stream(ctx) - if err != nil { - fmt.Fprintf(out, "Failed to get logs: %v\n", err) - continue - } - defer logs.Close() - - _, err = io.Copy(out, logs) - if err != nil { - fmt.Fprintf(out, "Failed to copy logs: %v\n", err) - continue - } - } - } - } -} diff --git a/test/new-e2e/tests/containers/ecs_test.go b/test/new-e2e/tests/containers/ecs_test.go index 9498aa9414deb..cdf19396f01ec 100644 --- a/test/new-e2e/tests/containers/ecs_test.go +++ b/test/new-e2e/tests/containers/ecs_test.go @@ -7,29 +7,26 @@ package containers import ( "context" - "encoding/json" "regexp" "strings" "testing" "time" - ecsComp "github.com/DataDog/test-infra-definitions/components/ecs" - "github.com/DataDog/test-infra-definitions/scenarios/aws/ecs" - "github.com/DataDog/datadog-agent/pkg/util/pointer" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awsconfig "github.com/aws/aws-sdk-go-v2/config" awsecs "github.com/aws/aws-sdk-go-v2/service/ecs" awsecstypes "github.com/aws/aws-sdk-go-v2/service/ecs/types" "github.com/fatih/color" - "github.com/pulumi/pulumi/sdk/v3/go/auto" "github.com/samber/lo" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" + + tifecs "github.com/DataDog/test-infra-definitions/scenarios/aws/ecs" + + envecs "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/ecs" ) const ( @@ -41,52 +38,27 @@ const ( ) type ecsSuite struct { - baseSuite - + baseSuite[environments.ECS] ecsClusterName string } func TestECSSuite(t *testing.T) { - suite.Run(t, &ecsSuite{}) + e2e.Run(t, &ecsSuite{}, e2e.WithProvisioner(envecs.Provisioner( + envecs.WithECSOptions( + tifecs.WithFargateCapacityProvider(), + tifecs.WithLinuxNodeGroup(), + tifecs.WithWindowsNodeGroup(), + tifecs.WithLinuxBottleRocketNodeGroup(), + ), + envecs.WithTestingWorkload(), + ))) } func (suite *ecsSuite) SetupSuite() { - ctx := context.Background() - - // Creating the stack - stackConfig := runner.ConfigMap{ - "ddinfra:aws/ecs/linuxECSOptimizedNodeGroup": auto.ConfigValue{Value: "true"}, - "ddinfra:aws/ecs/linuxBottlerocketNodeGroup": auto.ConfigValue{Value: "true"}, - "ddinfra:aws/ecs/windowsLTSCNodeGroup": auto.ConfigValue{Value: "true"}, - "ddagent:deploy": auto.ConfigValue{Value: "true"}, - "ddagent:fakeintake": auto.ConfigValue{Value: "true"}, - "ddtestworkload:deploy": auto.ConfigValue{Value: "true"}, - } - - _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure( - ctx, - "ecs-cluster", - ecs.Run, - infra.WithConfigMap(stackConfig), - ) - suite.Require().NoError(err) - - fakeintake := &components.FakeIntake{} - fiSerialized, err := json.Marshal(stackOutput.Outputs["dd-Fakeintake-aws-ecs"].Value) - suite.Require().NoError(err) - suite.Require().NoError(fakeintake.Import(fiSerialized, fakeintake)) - suite.Require().NoError(fakeintake.Init(suite)) - suite.Fakeintake = fakeintake.Client() - - clusterSerialized, err := json.Marshal(stackOutput.Outputs["dd-Cluster-ecs"].Value) - suite.Require().NoError(err) - ecsCluster := &ecsComp.ClusterOutput{} - suite.Require().NoError(ecsCluster.Import(clusterSerialized, ecsCluster)) - - suite.ecsClusterName = ecsCluster.ClusterName - suite.clusterName = suite.ecsClusterName - suite.baseSuite.SetupSuite() + suite.Fakeintake = suite.Env().FakeIntake.Client() + suite.ecsClusterName = suite.Env().ECSCluster.ClusterName + suite.clusterName = suite.Env().ECSCluster.ClusterName } func (suite *ecsSuite) TearDownSuite() { @@ -99,8 +71,8 @@ func (suite *ecsSuite) TearDownSuite() { suite.T().Log(c("https://dddev.datadoghq.com/dashboard/mnw-tdr-jd8/e2e-tests-containers-ecs?refresh_mode=paused&tpl_var_ecs_cluster_name%%5B0%%5D=%s&tpl_var_fake_intake_task_family%%5B0%%5D=%s-fakeintake-ecs&from_ts=%d&to_ts=%d&live=false", suite.ecsClusterName, strings.TrimSuffix(suite.ecsClusterName, "-ecs"), - suite.startTime.UnixMilli(), - suite.endTime.UnixMilli(), + suite.StartTime().UnixMilli(), + suite.EndTime().UnixMilli(), )) } diff --git a/test/new-e2e/tests/containers/eks_test.go b/test/new-e2e/tests/containers/eks_test.go index 02ddc3cf4e1d4..40446d85f6398 100644 --- a/test/new-e2e/tests/containers/eks_test.go +++ b/test/new-e2e/tests/containers/eks_test.go @@ -6,105 +6,34 @@ package containers import ( - "context" - "encoding/json" "testing" - "github.com/DataDog/test-infra-definitions/scenarios/aws/eks" + "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" + tifeks "github.com/DataDog/test-infra-definitions/scenarios/aws/eks" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" - - "github.com/pulumi/pulumi/sdk/v3/go/auto" - "github.com/stretchr/testify/suite" - "k8s.io/client-go/tools/clientcmd" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + awskubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/kubernetes" ) type eksSuite struct { k8sSuite - initOnly bool } func TestEKSSuite(t *testing.T) { - var initOnly bool - initOnlyParam, err := runner.GetProfile().ParamStore().GetBoolWithDefault(parameters.InitOnly, false) - if err == nil { - initOnly = initOnlyParam - } - suite.Run(t, &eksSuite{initOnly: initOnly}) + e2e.Run(t, &eksSuite{}, e2e.WithProvisioner(awskubernetes.EKSProvisioner( + awskubernetes.WithEKSOptions( + tifeks.WithLinuxNodeGroup(), + tifeks.WithWindowsNodeGroup(), + tifeks.WithBottlerocketNodeGroup(), + tifeks.WithLinuxARMNodeGroup(), + ), + awskubernetes.WithDeployDogstatsd(), + awskubernetes.WithDeployTestWorkload(), + awskubernetes.WithAgentOptions(kubernetesagentparams.WithDualShipping()), + ))) } func (suite *eksSuite) SetupSuite() { - ctx := context.Background() - - stackConfig := runner.ConfigMap{ - "ddagent:deploy": auto.ConfigValue{Value: "true"}, - "ddagent:fakeintake": auto.ConfigValue{Value: "true"}, - "ddtestworkload:deploy": auto.ConfigValue{Value: "true"}, - "dddogstatsd:deploy": auto.ConfigValue{Value: "true"}, - "ddagent:dualshipping": auto.ConfigValue{Value: "true"}, - } - - _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure( - ctx, - "eks-cluster", - eks.Run, - infra.WithConfigMap(stackConfig), - ) - - if !suite.Assert().NoError(err) { - stackName, err := infra.GetStackManager().GetPulumiStackName("eks-cluster") - suite.Require().NoError(err) - suite.T().Log(dumpEKSClusterState(ctx, stackName)) - if !runner.GetProfile().AllowDevMode() || !*keepStacks { - infra.GetStackManager().DeleteStack(ctx, "eks-cluster", nil) - } - suite.T().FailNow() - } - - if suite.initOnly { - suite.T().Skip("E2E_INIT_ONLY is set, skipping tests") - } - - fakeintake := &components.FakeIntake{} - fiSerialized, err := json.Marshal(stackOutput.Outputs["dd-Fakeintake-aws-ecs"].Value) - suite.Require().NoError(err) - suite.Require().NoError(fakeintake.Import(fiSerialized, &fakeintake)) - suite.Require().NoError(fakeintake.Init(suite)) - suite.Fakeintake = fakeintake.Client() - - kubeCluster := &components.KubernetesCluster{} - kubeSerialized, err := json.Marshal(stackOutput.Outputs["dd-Cluster-eks"].Value) - suite.Require().NoError(err) - suite.Require().NoError(kubeCluster.Import(kubeSerialized, &kubeCluster)) - suite.Require().NoError(kubeCluster.Init(suite)) - suite.KubeClusterName = kubeCluster.ClusterName - suite.K8sClient = kubeCluster.Client() - suite.K8sConfig, err = clientcmd.RESTConfigFromKubeConfig([]byte(kubeCluster.KubeConfig)) - suite.Require().NoError(err) - - kubernetesAgent := &components.KubernetesAgent{} - kubernetesAgentSerialized, err := json.Marshal(stackOutput.Outputs["dd-KubernetesAgent-aws-datadog-agent"].Value) - suite.Require().NoError(err) - suite.Require().NoError(kubernetesAgent.Import(kubernetesAgentSerialized, &kubernetesAgent)) - - suite.KubernetesAgentRef = kubernetesAgent - suite.k8sSuite.SetupSuite() -} - -func (suite *eksSuite) TearDownSuite() { - if suite.initOnly { - suite.T().Logf("E2E_INIT_ONLY is set, skipping deletion") - return - } - - suite.k8sSuite.TearDownSuite() - - ctx := context.Background() - stackName, err := infra.GetStackManager().GetPulumiStackName("eks-cluster") - suite.Require().NoError(err) - suite.T().Log(dumpEKSClusterState(ctx, stackName)) + suite.Fakeintake = suite.Env().FakeIntake.Client() } diff --git a/test/new-e2e/tests/containers/k8s_test.go b/test/new-e2e/tests/containers/k8s_test.go index 9561b71fb7a26..ceb84e59d7319 100644 --- a/test/new-e2e/tests/containers/k8s_test.go +++ b/test/new-e2e/tests/containers/k8s_test.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/DataDog/datadog-agent/test/fakeintake/aggregator" fakeintake "github.com/DataDog/datadog-agent/test/fakeintake/client" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" "github.com/fatih/color" "github.com/samber/lo" @@ -31,9 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" - restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" ) @@ -52,21 +50,12 @@ const ( var GitCommit string type k8sSuite struct { - baseSuite - - KubeClusterName string - AgentLinuxHelmInstallName string - AgentWindowsHelmInstallName string - KubernetesAgentRef *components.KubernetesAgent - - K8sConfig *restclient.Config - K8sClient kubernetes.Interface + baseSuite[environments.Kubernetes] } func (suite *k8sSuite) SetupSuite() { - suite.clusterName = suite.KubeClusterName - suite.baseSuite.SetupSuite() + suite.clusterName = suite.Env().KubernetesCluster.ClusterName } func (suite *k8sSuite) TearDownSuite() { @@ -77,10 +66,10 @@ func (suite *k8sSuite) TearDownSuite() { suite.T().Log(c("The data produced and asserted by these tests can be viewed on this dashboard:")) c = color.New(color.Bold, color.FgBlue).SprintfFunc() suite.T().Log(c("https://dddev.datadoghq.com/dashboard/qcp-brm-ysc/e2e-tests-containers-k8s?refresh_mode=paused&tpl_var_kube_cluster_name%%5B0%%5D=%s&tpl_var_fake_intake_task_family%%5B0%%5D=%s-fakeintake-ecs&from_ts=%d&to_ts=%d&live=false", - suite.KubeClusterName, - suite.KubeClusterName, - suite.startTime.UnixMilli(), - suite.endTime.UnixMilli(), + suite.clusterName, + suite.clusterName, + suite.StartTime().UnixMilli(), + suite.EndTime().UnixMilli(), )) } @@ -120,7 +109,7 @@ func (suite *k8sSuite) testUpAndRunning(waitFor time.Duration) { suite.Run("agent pods are ready and not restarting", func() { suite.EventuallyWithTf(func(c *assert.CollectT) { - linuxNodes, err := suite.K8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ + linuxNodes, err := suite.Env().KubernetesCluster.Client().CoreV1().Nodes().List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("kubernetes.io/os", "linux").String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged @@ -128,7 +117,7 @@ func (suite *k8sSuite) testUpAndRunning(waitFor time.Duration) { return } - windowsNodes, err := suite.K8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ + windowsNodes, err := suite.Env().KubernetesCluster.Client().CoreV1().Nodes().List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("kubernetes.io/os", "windows").String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged @@ -136,39 +125,39 @@ func (suite *k8sSuite) testUpAndRunning(waitFor time.Duration) { return } - linuxPods, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", suite.KubernetesAgentRef.LinuxNodeAgent.LabelSelectors["app"]).String(), + linuxPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", suite.Env().Agent.LinuxNodeAgent.LabelSelectors["app"]).String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged if !assert.NoErrorf(c, err, "Failed to list Linux datadog agent pods") { return } - windowsPods, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", suite.KubernetesAgentRef.WindowsNodeAgent.LabelSelectors["app"]).String(), + windowsPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", suite.Env().Agent.WindowsNodeAgent.LabelSelectors["app"]).String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged if !assert.NoErrorf(c, err, "Failed to list Windows datadog agent pods") { return } - clusterAgentPods, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", suite.KubernetesAgentRef.LinuxClusterAgent.LabelSelectors["app"]).String(), + clusterAgentPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", suite.Env().Agent.LinuxClusterAgent.LabelSelectors["app"]).String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged if !assert.NoErrorf(c, err, "Failed to list datadog cluster agent pods") { return } - clusterChecksPods, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", suite.KubernetesAgentRef.LinuxClusterChecks.LabelSelectors["app"]).String(), + clusterChecksPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", suite.Env().Agent.LinuxClusterChecks.LabelSelectors["app"]).String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged if !assert.NoErrorf(c, err, "Failed to list datadog cluster checks runner pods") { return } - dogstatsdPods, err := suite.K8sClient.CoreV1().Pods("dogstatsd-standalone").List(ctx, metav1.ListOptions{ + dogstatsdPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("dogstatsd-standalone").List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", "dogstatsd-standalone").String(), }) // Can be replaced by require.NoErrorf(…) once https://github.com/stretchr/testify/pull/1481 is merged @@ -199,13 +188,13 @@ func (suite *k8sSuite) TestAdmissionControllerWebhooksExist() { expectedWebhookName := "datadog-webhook" suite.Run("agent registered mutating webhook configuration", func() { - mutatingConfig, err := suite.K8sClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(ctx, expectedWebhookName, metav1.GetOptions{}) + mutatingConfig, err := suite.Env().KubernetesCluster.Client().AdmissionregistrationV1().MutatingWebhookConfigurations().Get(ctx, expectedWebhookName, metav1.GetOptions{}) suite.Require().NoError(err) suite.NotNilf(mutatingConfig, "None of the mutating webhook configurations have the name '%s'", expectedWebhookName) }) suite.Run("agent registered validating webhook configuration", func() { - validatingConfig, err := suite.K8sClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(ctx, expectedWebhookName, metav1.GetOptions{}) + validatingConfig, err := suite.Env().KubernetesCluster.Client().AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(ctx, expectedWebhookName, metav1.GetOptions{}) suite.Require().NoError(err) suite.NotNilf(validatingConfig, "None of the validating webhook configurations have the name '%s'", expectedWebhookName) }) @@ -222,27 +211,27 @@ func (suite *k8sSuite) TestVersion() { }{ { "Linux agent", - suite.KubernetesAgentRef.LinuxNodeAgent.LabelSelectors["app"], + suite.Env().Agent.LinuxNodeAgent.LabelSelectors["app"], "agent", }, { "Windows agent", - suite.KubernetesAgentRef.WindowsNodeAgent.LabelSelectors["app"], + suite.Env().Agent.WindowsNodeAgent.LabelSelectors["app"], "agent", }, { "cluster agent", - suite.KubernetesAgentRef.LinuxClusterAgent.LabelSelectors["app"], + suite.Env().Agent.LinuxClusterAgent.LabelSelectors["app"], "cluster-agent", }, { "cluster checks", - suite.KubernetesAgentRef.LinuxClusterChecks.LabelSelectors["app"], + suite.Env().Agent.LinuxClusterChecks.LabelSelectors["app"], "agent", }, } { suite.Run(tt.podType+" pods are running the good version", func() { - linuxPods, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + linuxPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", tt.appSelector).String(), Limit: 1, }) @@ -277,8 +266,8 @@ func (suite *k8sSuite) TestCLI() { func (suite *k8sSuite) testAgentCLI() { ctx := context.Background() - pod, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", suite.KubernetesAgentRef.LinuxNodeAgent.LabelSelectors["app"]).String(), + pod, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", suite.Env().Agent.LinuxNodeAgent.LabelSelectors["app"]).String(), Limit: 1, }) suite.Require().NoError(err) @@ -384,8 +373,8 @@ func (suite *k8sSuite) testAgentCLI() { func (suite *k8sSuite) testClusterAgentCLI() { ctx := context.Background() - pod, err := suite.K8sClient.CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", suite.KubernetesAgentRef.LinuxClusterAgent.LabelSelectors["app"]).String(), + pod, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", suite.Env().Agent.LinuxClusterAgent.LabelSelectors["app"]).String(), Limit: 1, }) suite.Require().NoError(err) @@ -931,7 +920,7 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, // libraries for the detected language are injected if languageShouldBeAutoDetected { suite.Require().EventuallyWithTf(func(c *assert.CollectT) { - deployment, err := suite.K8sClient.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + deployment, err := suite.Env().KubernetesCluster.Client().AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) if !assert.NoError(c, err) { return } @@ -948,7 +937,7 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, }, 5*time.Minute, 10*time.Second, "The deployment with name %s in namespace %s does not exist or does not have the auto detected languages annotation", name, namespace) } - pods, err := suite.K8sClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + pods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", name).String(), }) suite.Require().NoError(err) @@ -1036,7 +1025,7 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, func (suite *k8sSuite) TestContainerImage() { sendEvent := func(alertType, text string) { - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(suite.T().Name()), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% `+"```"+` @@ -1106,7 +1095,7 @@ func (suite *k8sSuite) TestContainerImage() { func (suite *k8sSuite) TestSBOM() { sendEvent := func(alertType, text string) { - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(suite.T().Name()), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% `+"```"+` @@ -1231,7 +1220,7 @@ func (suite *k8sSuite) TestSBOM() { func (suite *k8sSuite) TestContainerLifecycleEvents() { sendEvent := func(alertType, text string) { - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(suite.T().Name()), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% `+"```"+` @@ -1261,7 +1250,7 @@ func (suite *k8sSuite) TestContainerLifecycleEvents() { var nginxPod corev1.Pod suite.Require().EventuallyWithTf(func(c *assert.CollectT) { - pods, err := suite.K8sClient.CoreV1().Pods("workload-nginx").List(context.Background(), metav1.ListOptions{ + pods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("workload-nginx").List(context.Background(), metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", "nginx").String(), FieldSelector: fields.OneTermEqualSelector("status.phase", "Running").String(), }) @@ -1281,7 +1270,7 @@ func (suite *k8sSuite) TestContainerLifecycleEvents() { }) }, 1*time.Minute, 10*time.Second, "Failed to find an nginx pod") - err := suite.K8sClient.CoreV1().Pods("workload-nginx").Delete(context.Background(), nginxPod.Name, metav1.DeleteOptions{}) + err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("workload-nginx").Delete(context.Background(), nginxPod.Name, metav1.DeleteOptions{}) suite.Require().NoError(err) suite.EventuallyWithTf(func(collect *assert.CollectT) { @@ -1324,7 +1313,7 @@ func (suite *k8sSuite) TestContainerLifecycleEvents() { func (suite *k8sSuite) testHPA(namespace, deployment string) { suite.Run(fmt.Sprintf("hpa kubernetes_state.deployment.replicas_available{kube_namespace:%s,kube_deployment:%s}", namespace, deployment), func() { sendEvent := func(alertType, text string, time *int) { - if _, err := suite.datadogClient.PostEvent(&datadog.Event{ + if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ Title: pointer.Ptr(fmt.Sprintf("testHPA %s/%s", namespace, deployment)), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% %s @@ -1412,7 +1401,7 @@ func (suite *k8sSuite) testHPA(namespace, deployment string) { type podExecOption func(*corev1.PodExecOptions) func (suite *k8sSuite) podExec(namespace, pod, container string, cmd []string, podOptions ...podExecOption) (stdout, stderr string, err error) { - req := suite.K8sClient.CoreV1().RESTClient().Post().Resource("pods").Namespace(namespace).Name(pod).SubResource("exec") + req := suite.Env().KubernetesCluster.Client().CoreV1().RESTClient().Post().Resource("pods").Namespace(namespace).Name(pod).SubResource("exec") option := &corev1.PodExecOptions{ Stdin: false, Stdout: true, @@ -1431,7 +1420,7 @@ func (suite *k8sSuite) podExec(namespace, pod, container string, cmd []string, p scheme.ParameterCodec, ) - exec, err := remotecommand.NewSPDYExecutor(suite.K8sConfig, "POST", req.URL()) + exec, err := remotecommand.NewSPDYExecutor(suite.Env().KubernetesCluster.KubernetesClient.K8sConfig, "POST", req.URL()) if err != nil { return "", "", err } diff --git a/test/new-e2e/tests/containers/kindvm_test.go b/test/new-e2e/tests/containers/kindvm_test.go index 84be75e550403..bad94d3094d30 100644 --- a/test/new-e2e/tests/containers/kindvm_test.go +++ b/test/new-e2e/tests/containers/kindvm_test.go @@ -6,19 +6,14 @@ package containers import ( - "context" - "encoding/json" "testing" - "github.com/DataDog/test-infra-definitions/scenarios/aws/kindvm" + "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" - - "github.com/pulumi/pulumi/sdk/v3/go/auto" - "github.com/stretchr/testify/suite" - "k8s.io/client-go/tools/clientcmd" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + awskubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/kubernetes" ) type kindSuite struct { @@ -26,70 +21,20 @@ type kindSuite struct { } func TestKindSuite(t *testing.T) { - suite.Run(t, &kindSuite{}) + e2e.Run(t, &kindSuite{}, e2e.WithProvisioner(awskubernetes.KindProvisioner( + awskubernetes.WithEC2VMOptions( + ec2.WithInstanceType("t3.xlarge"), + ), + awskubernetes.WithFakeIntakeOptions(fakeintake.WithMemory(2048)), + awskubernetes.WithDeployDogstatsd(), + awskubernetes.WithDeployTestWorkload(), + awskubernetes.WithAgentOptions(kubernetesagentparams.WithDualShipping()), + ))) } func (suite *kindSuite) SetupSuite() { - ctx := context.Background() - - stackConfig := runner.ConfigMap{ - "ddinfra:aws/defaultInstanceType": auto.ConfigValue{Value: "t3.xlarge"}, - "ddagent:deploy": auto.ConfigValue{Value: "true"}, - "ddagent:fakeintake": auto.ConfigValue{Value: "true"}, - "ddtestworkload:deploy": auto.ConfigValue{Value: "true"}, - "dddogstatsd:deploy": auto.ConfigValue{Value: "true"}, - "ddagent:dualshipping": auto.ConfigValue{Value: "true"}, - } - - _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure( - ctx, - "kind-cluster", - kindvm.Run, - infra.WithConfigMap(stackConfig), - ) - if !suite.Assert().NoError(err) { - stackName, err := infra.GetStackManager().GetPulumiStackName("kind-cluster") - suite.Require().NoError(err) - suite.T().Log(dumpKindClusterState(ctx, stackName)) - if !runner.GetProfile().AllowDevMode() || !*keepStacks { - infra.GetStackManager().DeleteStack(ctx, "kind-cluster", nil) - } - suite.T().FailNow() - } - - var fakeintake components.FakeIntake - fiSerialized, err := json.Marshal(stackOutput.Outputs["dd-Fakeintake-aws-kind"].Value) - suite.Require().NoError(err) - suite.Require().NoError(fakeintake.Import(fiSerialized, &fakeintake)) - suite.Require().NoError(fakeintake.Init(suite)) - suite.Fakeintake = fakeintake.Client() - - var kubeCluster components.KubernetesCluster - kubeSerialized, err := json.Marshal(stackOutput.Outputs["dd-Cluster-kind"].Value) - suite.Require().NoError(err) - suite.Require().NoError(kubeCluster.Import(kubeSerialized, &kubeCluster)) - suite.Require().NoError(kubeCluster.Init(suite)) - suite.KubeClusterName = kubeCluster.ClusterName - suite.K8sClient = kubeCluster.Client() - suite.K8sConfig, err = clientcmd.RESTConfigFromKubeConfig([]byte(kubeCluster.KubeConfig)) - suite.Require().NoError(err) - - kubernetesAgent := &components.KubernetesAgent{} - kubernetesAgentSerialized, err := json.Marshal(stackOutput.Outputs["dd-KubernetesAgent-aws-datadog-agent"].Value) - suite.Require().NoError(err) - suite.Require().NoError(kubernetesAgent.Import(kubernetesAgentSerialized, &kubernetesAgent)) - suite.KubernetesAgentRef = kubernetesAgent - suite.k8sSuite.SetupSuite() -} - -func (suite *kindSuite) TearDownSuite() { - suite.k8sSuite.TearDownSuite() - - ctx := context.Background() - stackName, err := infra.GetStackManager().GetPulumiStackName("kind-cluster") - suite.Require().NoError(err) - suite.T().Log(dumpKindClusterState(ctx, stackName)) + suite.Fakeintake = suite.Env().FakeIntake.Client() } func (suite *kindSuite) TestControlPlane() {