Skip to content

Commit

Permalink
E2E: verify daemonset pods after machines
Browse files Browse the repository at this point in the history
  • Loading branch information
jackfrancis committed Jan 5, 2023
1 parent c7f9bf4 commit 3848b87
Show file tree
Hide file tree
Showing 4 changed files with 175 additions and 29 deletions.
72 changes: 72 additions & 0 deletions test/e2e/azure_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Creating a private cluster from the management cluster", func() {
Expand Down Expand Up @@ -236,6 +245,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down Expand Up @@ -320,6 +338,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down Expand Up @@ -379,6 +406,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down Expand Up @@ -464,6 +500,15 @@ var _ = Describe("Workload cluster creation", func() {
// The workaround is to use server side apply by passing `--server-side` flag to kubectl apply.
// More on server side apply here: https://kubernetes.io/docs/reference/using-api/server-side-apply/
Args: []string{"--server-side"},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down Expand Up @@ -518,6 +563,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down Expand Up @@ -672,6 +726,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down Expand Up @@ -745,6 +808,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down
10 changes: 0 additions & 10 deletions test/e2e/cloud-provider-azure.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,6 @@ func InstallCalicoAndCloudProviderAzureHelmChart(ctx context.Context, input clus
waitInput := GetWaitForDeploymentsAvailableInput(ctx, clusterProxy, d, kubesystem, specName)
WaitForDeploymentsAvailable(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-deployment")...)
}
By("Waiting for Ready cloud-node-manager daemonset pods")
for _, ds := range []string{"cloud-node-manager", "cloud-node-manager-windows"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, kubesystem, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
}

// InstallAzureDiskCSIDriverHelmChart installs the official azure-disk CSI driver helm chart
Expand All @@ -79,9 +74,4 @@ func InstallAzureDiskCSIDriverHelmChart(ctx context.Context, input clusterctl.Ap
waitInput := GetWaitForDeploymentsAvailableInput(ctx, clusterProxy, d, kubesystem, specName)
WaitForDeploymentsAvailable(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-deployment")...)
}
By("Waiting for Running azure-disk-csi node pods")
for _, ds := range []string{"csi-azuredisk-node", "csi-azuredisk-node-win"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, kubesystem, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
}
19 changes: 0 additions & 19 deletions test/e2e/cni.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,25 +81,6 @@ func InstallCalicoHelmChart(ctx context.Context, input clusterctl.ApplyClusterTe
waitInput := GetWaitForDeploymentsAvailableInput(ctx, clusterProxy, d, CalicoAPIServerNamespace, specName)
WaitForDeploymentsAvailable(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-deployment")...)
}
By("Waiting for Ready calico-node daemonset pods")
for _, ds := range []string{"calico-node"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, CalicoSystemNamespace, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
// TODO: enable this for all clusters once calico for windows is part of the helm chart.
if hasWindows {
By("Waiting for Ready calico windows pods")
for _, ds := range []string{"calico-node-windows"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, CalicoSystemNamespace, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}

By("Waiting for Ready calico windows pods")
for _, ds := range []string{"kube-proxy-windows"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, kubesystem, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
}
}

func getCalicoValues(cidrBlocks []string) *helmVals.Options {
Expand Down
103 changes: 103 additions & 0 deletions test/e2e/daemonsets.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
//go:build e2e
// +build e2e

/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
kubeadmv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/controller-runtime/pkg/client"
)

// DaemonsetsSpecInput is the input for EnsureDaemonsets.
type DaemonsetsSpecInput struct {
BootstrapClusterProxy framework.ClusterProxy
Namespace *corev1.Namespace
ClusterName string
}

// EnsureDaemonsets implements a test that verifies expected Daemonset Pods are running.
func EnsureDaemonsets(ctx context.Context, inputGetter func() DaemonsetsSpecInput) {
var (
specName = "daemonsets"
input DaemonsetsSpecInput
)

Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)

input = inputGetter()
Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)
Expect(input.Namespace).ToNot(BeNil(), "Invalid argument. input.Namespace can't be nil when calling %s spec", specName)
Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling %s spec", specName)

mgmtClient := bootstrapClusterProxy.GetClient()
Expect(mgmtClient).NotTo(BeNil())
cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{
Getter: mgmtClient,
Name: input.ClusterName,
Namespace: input.Namespace.Name,
})
kubeadmControlPlane := &kubeadmv1.KubeadmControlPlane{}
key := client.ObjectKey{
Namespace: cluster.Spec.ControlPlaneRef.Namespace,
Name: cluster.Spec.ControlPlaneRef.Name,
}
Eventually(func() error {
return mgmtClient.Get(ctx, key, kubeadmControlPlane)
}, e2eConfig.GetIntervals(specName, "wait-daemonset")...).Should(Succeed(), "Failed to get KubeadmControlPlane object %s/%s", cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name)

workloadClusterProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name)
By("Waiting for Ready calico-node daemonset pods")
for _, ds := range []string{"calico-node"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, workloadClusterProxy, ds, CalicoSystemNamespace, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
// TODO: enable this for all clusters once calico for windows is part of the helm chart.
if _, hasWindows := cluster.Labels["cni-windows"]; hasWindows {
By("Waiting for Ready calico windows pods")
for _, ds := range []string{"calico-node-windows"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, workloadClusterProxy, ds, CalicoSystemNamespace, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}

By("Waiting for Ready kube-proxy windows pods")
for _, ds := range []string{"kube-proxy-windows"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, workloadClusterProxy, ds, kubesystem, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
}
if kubeadmControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraArgs["cloud-provider"] == "external" {
By("Waiting for Ready cloud-node-manager daemonset pods")
for _, ds := range []string{"cloud-node-manager", "cloud-node-manager-windows"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, workloadClusterProxy, ds, kubesystem, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
}

By("Waiting for Running azure-disk-csi node pods")
for _, ds := range []string{"csi-azuredisk-node", "csi-azuredisk-node-win"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, workloadClusterProxy, ds, kubesystem, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
}

0 comments on commit 3848b87

Please sign in to comment.