From 3848b87ed0299d6f6df9e938ee87553712bc8f89 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Thu, 15 Dec 2022 15:54:20 -0800 Subject: [PATCH] E2E: verify daemonset pods after machines --- test/e2e/azure_test.go | 72 +++++++++++++++++++++ test/e2e/cloud-provider-azure.go | 10 --- test/e2e/cni.go | 19 ------ test/e2e/daemonsets.go | 103 +++++++++++++++++++++++++++++++ 4 files changed, 175 insertions(+), 29 deletions(-) create mode 100644 test/e2e/daemonsets.go diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index a64259490bc1..fd1fb891a962 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -184,6 +184,15 @@ var _ = Describe("Workload cluster creation", func() { ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{ WaitForControlPlaneInitialized: EnsureControlPlaneInitialized, }, + PostMachinesProvisioned: func() { + EnsureDaemonsets(ctx, func() DaemonsetsSpecInput { + return DaemonsetsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }, }, result) By("Creating a private cluster from the management cluster", func() { @@ -236,6 +245,15 @@ var _ = Describe("Workload cluster creation", func() { ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{ WaitForControlPlaneInitialized: EnsureControlPlaneInitialized, }, + PostMachinesProvisioned: func() { + EnsureDaemonsets(ctx, func() DaemonsetsSpecInput { + return DaemonsetsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }, }, result) By("Verifying expected VM extensions are present on the node", func() { @@ -320,6 +338,15 @@ var _ = Describe("Workload cluster creation", func() { ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{ WaitForControlPlaneInitialized: EnsureControlPlaneInitialized, }, + PostMachinesProvisioned: func() { + EnsureDaemonsets(ctx, func() DaemonsetsSpecInput { + return DaemonsetsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }, }, result) By("Verifying expected VM extensions are present on the node", func() { @@ -379,6 +406,15 @@ var _ = Describe("Workload cluster creation", func() { ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{ WaitForControlPlaneInitialized: EnsureControlPlaneInitialized, }, + PostMachinesProvisioned: func() { + EnsureDaemonsets(ctx, func() DaemonsetsSpecInput { + return DaemonsetsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }, }, result) By("Verifying expected VM extensions are present on the node", func() { @@ -464,6 +500,15 @@ var _ = Describe("Workload cluster creation", func() { // The workaround is to use server side apply by passing `--server-side` flag to kubectl apply. // More on server side apply here: https://kubernetes.io/docs/reference/using-api/server-side-apply/ Args: []string{"--server-side"}, + PostMachinesProvisioned: func() { + EnsureDaemonsets(ctx, func() DaemonsetsSpecInput { + return DaemonsetsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }, }, result) By("Verifying expected VM extensions are present on the node", func() { @@ -518,6 +563,15 @@ var _ = Describe("Workload cluster creation", func() { ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{ WaitForControlPlaneInitialized: EnsureControlPlaneInitialized, }, + PostMachinesProvisioned: func() { + EnsureDaemonsets(ctx, func() DaemonsetsSpecInput { + return DaemonsetsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }, }, result) By("Verifying expected VM extensions are present on the node", func() { @@ -672,6 +726,15 @@ var _ = Describe("Workload cluster creation", func() { ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{ WaitForControlPlaneInitialized: EnsureControlPlaneInitialized, }, + PostMachinesProvisioned: func() { + EnsureDaemonsets(ctx, func() DaemonsetsSpecInput { + return DaemonsetsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }, }, result) By("Verifying expected VM extensions are present on the node", func() { @@ -745,6 +808,15 @@ var _ = Describe("Workload cluster creation", func() { ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{ WaitForControlPlaneInitialized: EnsureControlPlaneInitialized, }, + PostMachinesProvisioned: func() { + EnsureDaemonsets(ctx, func() DaemonsetsSpecInput { + return DaemonsetsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }, }, result) By("Verifying expected VM extensions are present on the node", func() { diff --git a/test/e2e/cloud-provider-azure.go b/test/e2e/cloud-provider-azure.go index 59b97f58941f..6d0a9208cf12 100644 --- a/test/e2e/cloud-provider-azure.go +++ b/test/e2e/cloud-provider-azure.go @@ -58,11 +58,6 @@ func InstallCalicoAndCloudProviderAzureHelmChart(ctx context.Context, input clus waitInput := GetWaitForDeploymentsAvailableInput(ctx, clusterProxy, d, kubesystem, specName) WaitForDeploymentsAvailable(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-deployment")...) } - By("Waiting for Ready cloud-node-manager daemonset pods") - for _, ds := range []string{"cloud-node-manager", "cloud-node-manager-windows"} { - waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, kubesystem, specName) - WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...) - } } // InstallAzureDiskCSIDriverHelmChart installs the official azure-disk CSI driver helm chart @@ -79,9 +74,4 @@ func InstallAzureDiskCSIDriverHelmChart(ctx context.Context, input clusterctl.Ap waitInput := GetWaitForDeploymentsAvailableInput(ctx, clusterProxy, d, kubesystem, specName) WaitForDeploymentsAvailable(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-deployment")...) } - By("Waiting for Running azure-disk-csi node pods") - for _, ds := range []string{"csi-azuredisk-node", "csi-azuredisk-node-win"} { - waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, kubesystem, specName) - WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...) - } } diff --git a/test/e2e/cni.go b/test/e2e/cni.go index e5ef24bd5bfe..0b763a5279ca 100644 --- a/test/e2e/cni.go +++ b/test/e2e/cni.go @@ -81,25 +81,6 @@ func InstallCalicoHelmChart(ctx context.Context, input clusterctl.ApplyClusterTe waitInput := GetWaitForDeploymentsAvailableInput(ctx, clusterProxy, d, CalicoAPIServerNamespace, specName) WaitForDeploymentsAvailable(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-deployment")...) } - By("Waiting for Ready calico-node daemonset pods") - for _, ds := range []string{"calico-node"} { - waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, CalicoSystemNamespace, specName) - WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...) - } - // TODO: enable this for all clusters once calico for windows is part of the helm chart. - if hasWindows { - By("Waiting for Ready calico windows pods") - for _, ds := range []string{"calico-node-windows"} { - waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, CalicoSystemNamespace, specName) - WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...) - } - - By("Waiting for Ready calico windows pods") - for _, ds := range []string{"kube-proxy-windows"} { - waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, kubesystem, specName) - WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...) - } - } } func getCalicoValues(cidrBlocks []string) *helmVals.Options { diff --git a/test/e2e/daemonsets.go b/test/e2e/daemonsets.go new file mode 100644 index 000000000000..b8f8260511b2 --- /dev/null +++ b/test/e2e/daemonsets.go @@ -0,0 +1,103 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + kubeadmv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// DaemonsetsSpecInput is the input for EnsureDaemonsets. +type DaemonsetsSpecInput struct { + BootstrapClusterProxy framework.ClusterProxy + Namespace *corev1.Namespace + ClusterName string +} + +// EnsureDaemonsets implements a test that verifies expected Daemonset Pods are running. +func EnsureDaemonsets(ctx context.Context, inputGetter func() DaemonsetsSpecInput) { + var ( + specName = "daemonsets" + input DaemonsetsSpecInput + ) + + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + + input = inputGetter() + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(input.Namespace).ToNot(BeNil(), "Invalid argument. input.Namespace can't be nil when calling %s spec", specName) + Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling %s spec", specName) + + mgmtClient := bootstrapClusterProxy.GetClient() + Expect(mgmtClient).NotTo(BeNil()) + cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ + Getter: mgmtClient, + Name: input.ClusterName, + Namespace: input.Namespace.Name, + }) + kubeadmControlPlane := &kubeadmv1.KubeadmControlPlane{} + key := client.ObjectKey{ + Namespace: cluster.Spec.ControlPlaneRef.Namespace, + Name: cluster.Spec.ControlPlaneRef.Name, + } + Eventually(func() error { + return mgmtClient.Get(ctx, key, kubeadmControlPlane) + }, e2eConfig.GetIntervals(specName, "wait-daemonset")...).Should(Succeed(), "Failed to get KubeadmControlPlane object %s/%s", cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name) + + workloadClusterProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name) + By("Waiting for Ready calico-node daemonset pods") + for _, ds := range []string{"calico-node"} { + waitInput := GetWaitForDaemonsetAvailableInput(ctx, workloadClusterProxy, ds, CalicoSystemNamespace, specName) + WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...) + } + // TODO: enable this for all clusters once calico for windows is part of the helm chart. + if _, hasWindows := cluster.Labels["cni-windows"]; hasWindows { + By("Waiting for Ready calico windows pods") + for _, ds := range []string{"calico-node-windows"} { + waitInput := GetWaitForDaemonsetAvailableInput(ctx, workloadClusterProxy, ds, CalicoSystemNamespace, specName) + WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...) + } + + By("Waiting for Ready kube-proxy windows pods") + for _, ds := range []string{"kube-proxy-windows"} { + waitInput := GetWaitForDaemonsetAvailableInput(ctx, workloadClusterProxy, ds, kubesystem, specName) + WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...) + } + } + if kubeadmControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraArgs["cloud-provider"] == "external" { + By("Waiting for Ready cloud-node-manager daemonset pods") + for _, ds := range []string{"cloud-node-manager", "cloud-node-manager-windows"} { + waitInput := GetWaitForDaemonsetAvailableInput(ctx, workloadClusterProxy, ds, kubesystem, specName) + WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...) + } + } + + By("Waiting for Running azure-disk-csi node pods") + for _, ds := range []string{"csi-azuredisk-node", "csi-azuredisk-node-win"} { + waitInput := GetWaitForDaemonsetAvailableInput(ctx, workloadClusterProxy, ds, kubesystem, specName) + WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...) + } +}