Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

E2E: verify daemonset pods after machines #2950

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 72 additions & 0 deletions test/e2e/azure_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Creating a private cluster from the management cluster", func() {
Expand Down Expand Up @@ -236,6 +245,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down Expand Up @@ -320,6 +338,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down Expand Up @@ -379,6 +406,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down Expand Up @@ -464,6 +500,15 @@ var _ = Describe("Workload cluster creation", func() {
// The workaround is to use server side apply by passing `--server-side` flag to kubectl apply.
// More on server side apply here: https://kubernetes.io/docs/reference/using-api/server-side-apply/
Args: []string{"--server-side"},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down Expand Up @@ -582,6 +627,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down Expand Up @@ -745,6 +799,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down Expand Up @@ -818,6 +881,15 @@ var _ = Describe("Workload cluster creation", func() {
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
},
PostMachinesProvisioned: func() {
EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
return DaemonsetsSpecInput{
BootstrapClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
ClusterName: clusterName,
}
})
},
}, result)

By("Verifying expected VM extensions are present on the node", func() {
Expand Down
10 changes: 0 additions & 10 deletions test/e2e/cloud-provider-azure.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,6 @@ func InstallCalicoAndCloudProviderAzureHelmChart(ctx context.Context, input clus
waitInput := GetWaitForDeploymentsAvailableInput(ctx, clusterProxy, d, kubesystem, specName)
WaitForDeploymentsAvailable(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-deployment")...)
}
By("Waiting for Ready cloud-node-manager daemonset pods")
for _, ds := range []string{"cloud-node-manager", "cloud-node-manager-windows"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, kubesystem, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
}

// InstallAzureDiskCSIDriverHelmChart installs the official azure-disk CSI driver helm chart
Expand All @@ -83,9 +78,4 @@ func InstallAzureDiskCSIDriverHelmChart(ctx context.Context, input clusterctl.Ap
waitInput := GetWaitForDeploymentsAvailableInput(ctx, clusterProxy, d, kubesystem, specName)
WaitForDeploymentsAvailable(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-deployment")...)
}
By("Waiting for Running azure-disk-csi node pods")
for _, ds := range []string{"csi-azuredisk-node", "csi-azuredisk-node-win"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, kubesystem, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
}
19 changes: 0 additions & 19 deletions test/e2e/cni.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,25 +81,6 @@ func InstallCalicoHelmChart(ctx context.Context, input clusterctl.ApplyClusterTe
waitInput := GetWaitForDeploymentsAvailableInput(ctx, clusterProxy, d, CalicoAPIServerNamespace, specName)
WaitForDeploymentsAvailable(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-deployment")...)
}
By("Waiting for Ready calico-node daemonset pods")
for _, ds := range []string{"calico-node"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, CalicoSystemNamespace, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
// TODO: enable this for all clusters once calico for windows is part of the helm chart.
if hasWindows {
By("Waiting for Ready calico windows pods")
for _, ds := range []string{"calico-node-windows"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, CalicoSystemNamespace, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}

By("Waiting for Ready calico windows pods")
for _, ds := range []string{"kube-proxy-windows"} {
waitInput := GetWaitForDaemonsetAvailableInput(ctx, clusterProxy, ds, kubesystem, specName)
WaitForDaemonset(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
}
}

func getCalicoValues(cidrBlocks []string) *helmVals.Options {
Expand Down
73 changes: 73 additions & 0 deletions test/e2e/daemonsets.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
//go:build e2e
// +build e2e

/*
Copyright 2022 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
kubeadmv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/controller-runtime/pkg/client"
)

// DaemonsetsSpecInput is the input for EnsureDaemonsets.
type DaemonsetsSpecInput struct {
BootstrapClusterProxy framework.ClusterProxy
Namespace *corev1.Namespace
ClusterName string
}

// EnsureDaemonsets implements a test that verifies expected Daemonset Pods are running.
func EnsureDaemonsets(ctx context.Context, inputGetter func() DaemonsetsSpecInput) {
jackfrancis marked this conversation as resolved.
Show resolved Hide resolved
var (
specName = "daemonsets"
input DaemonsetsSpecInput
)

Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)

input = inputGetter()
Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)
Expect(input.Namespace).ToNot(BeNil(), "Invalid argument. input.Namespace can't be nil when calling %s spec", specName)
Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling %s spec", specName)

mgmtClient := bootstrapClusterProxy.GetClient()
Expect(mgmtClient).NotTo(BeNil())
cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{
Getter: mgmtClient,
Name: input.ClusterName,
Namespace: input.Namespace.Name,
})
kubeadmControlPlane := &kubeadmv1.KubeadmControlPlane{}
key := client.ObjectKey{
Namespace: cluster.Spec.ControlPlaneRef.Namespace,
Name: cluster.Spec.ControlPlaneRef.Name,
}
Eventually(func() error {
return mgmtClient.Get(ctx, key, kubeadmControlPlane)
}, e2eConfig.GetIntervals(specName, "wait-daemonset")...).Should(Succeed(), "Failed to get KubeadmControlPlane object %s/%s", cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name)

workloadClusterProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, input.Namespace.Name, input.ClusterName)
By("Waiting for all DaemonSet Pods to be Running")
WaitForDaemonsets(ctx, workloadClusterProxy, specName, e2eConfig.GetIntervals(specName, "wait-daemonset")...)
}
34 changes: 20 additions & 14 deletions test/e2e/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,33 +270,39 @@ type WaitForDaemonsetInput struct {
func WaitForDaemonset(ctx context.Context, input WaitForDaemonsetInput, intervals ...interface{}) {
start := time.Now()
namespace, name := input.DaemonSet.GetNamespace(), input.DaemonSet.GetName()
Byf("waiting for daemonset %s/%s to be complete", namespace, name)
Logf("waiting for daemonset %s/%s to be complete", namespace, name)
Eventually(func() bool {
key := client.ObjectKey{Namespace: namespace, Name: name}
if err := input.Getter.Get(ctx, key, input.DaemonSet); err == nil {
if input.DaemonSet.Status.DesiredNumberScheduled == input.DaemonSet.Status.NumberReady {
Logf("%d daemonset %s/%s pods are running, took %v", input.DaemonSet.Status.NumberReady, namespace, name, time.Since(start))
if input.DaemonSet.Status.DesiredNumberScheduled > 0 {
Byf("waiting for %d daemonset %s/%s pods to be Running", input.DaemonSet.Status.DesiredNumberScheduled, namespace, name)
if input.DaemonSet.Status.DesiredNumberScheduled == input.DaemonSet.Status.NumberReady {
Logf("%d daemonset %s/%s pods are running, took %v", input.DaemonSet.Status.NumberReady, namespace, name, time.Since(start))
return true
}
} else {
Byf("daemonset %s/%s has no schedulable nodes, will skip", namespace, name)
return true
}
}
return false
}, intervals...).Should(BeTrue(), func() string { return DescribeFailedDaemonset(ctx, input) })
}

// GetWaitForDaemonsetAvailableInput is a convenience func to compose a WaitForDaemonsetInput
func GetWaitForDaemonsetAvailableInput(ctx context.Context, clusterProxy framework.ClusterProxy, name, namespace string, specName string) WaitForDaemonsetInput {
// WaitForDaemonsets retries during E2E until all daemonsets pods are all Running.
func WaitForDaemonsets(ctx context.Context, clusterProxy framework.ClusterProxy, specName string, intervals ...interface{}) {
Expect(clusterProxy).NotTo(BeNil())
cl := clusterProxy.GetClient()
var ds = &appsv1.DaemonSet{}
var dsList = &appsv1.DaemonSetList{}
Eventually(func() error {
return cl.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, ds)
}, e2eConfig.GetIntervals(specName, "wait-daemonset")...).Should(Succeed())
clientset := clusterProxy.GetClientSet()
return WaitForDaemonsetInput{
DaemonSet: ds,
Clientset: clientset,
Getter: cl,
return cl.List(ctx, dsList)
}, intervals...).Should(Succeed())
for i := range dsList.Items {
waitForDaemonsetInput := WaitForDaemonsetInput{
DaemonSet: &dsList.Items[i],
Clientset: clusterProxy.GetClientSet(),
Getter: cl,
}
WaitForDaemonset(ctx, waitForDaemonsetInput, intervals...)
}
}

Expand Down