Skip to content

Commit

Permalink
generalize helm install during E2E testing
Browse files Browse the repository at this point in the history
  • Loading branch information
jackfrancis committed May 20, 2022
1 parent 2a9b44c commit 23666ad
Show file tree
Hide file tree
Showing 2 changed files with 124 additions and 106 deletions.
137 changes: 31 additions & 106 deletions test/e2e/cloud-provider-azure.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,122 +23,47 @@ import (
"context"
"fmt"

"github.com/Azure/go-autorest/autorest/to"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
helmAction "helm.sh/helm/v3/pkg/action"
helmLoader "helm.sh/helm/v3/pkg/chart/loader"
helmCli "helm.sh/helm/v3/pkg/cli"
helmVals "helm.sh/helm/v3/pkg/cli/values"
helmGetter "helm.sh/helm/v3/pkg/getter"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client"
)

const (
cloudProviderAzureHelmRepoURL = "https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo"
cloudProviderAzureChartName = "cloud-provider-azure"
cloudProvierAzureHelmReleaseName = "cloud-provider-azure-oot"
)

// InstallCloudProviderAzureHelmChart installs the official cloud-provider-azure helm chart
// Fulfills the clusterctl.Waiter type so that it can be used as ApplyClusterTemplateAndWaitInput data
// in the flow of a clusterctl.ApplyClusterTemplateAndWait E2E test scenario
func InstallCloudProviderAzureHelmChart(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput, result *clusterctl.ApplyClusterTemplateAndWaitResult) {
By("Waiting for workload cluster kubeconfig secret")
Eventually(func() error {
client := input.ClusterProxy.GetClient()
secret := &corev1.Secret{}
key := crclient.ObjectKey{
Name: fmt.Sprintf("%s-kubeconfig", input.ConfigCluster.ClusterName),
Namespace: input.ConfigCluster.Namespace,
}
return client.Get(ctx, key, secret)
}, input.WaitForControlPlaneIntervals...).Should(Succeed())
clusterProxy := input.ClusterProxy.GetWorkloadCluster(ctx, input.ConfigCluster.Namespace, input.ConfigCluster.ClusterName)
By("Waiting for nodes to come online indicating that the cluster is ready to accept work")
Eventually(func() error {
clientSet := clusterProxy.GetClientSet()
var runningNodes int
list, err := clientSet.CoreV1().Nodes().List(ctx, v1.ListOptions{})
if err != nil {
return err
}
for _, n := range list.Items {
if n.Status.Phase == corev1.NodeRunning {
runningNodes++
}
}
if runningNodes > 0 {
return nil
}
return err
}, input.WaitForControlPlaneIntervals...).Should(Succeed())
By(fmt.Sprintf("Ensuring the kubeconfig secret for cluster %s/%s exists before installing cloud-provider-azure components", input.ConfigCluster.Namespace, input.ConfigCluster.ClusterName))
WaitForWorkloadClusterKubeconfigSecret(ctx, input)
By("Installing the correct version of cloud-provider-azure components via helm")
kubeConfigPath := clusterProxy.GetKubeconfigPath()
clusterName := input.ClusterProxy.GetName()
settings := helmCli.New()
settings.KubeConfig = kubeConfigPath
actionConfig := new(helmAction.Configuration)
err := actionConfig.Init(settings.RESTClientGetter(), "default", "secret", Logf)
Expect(err).To(BeNil())
i := helmAction.NewInstall(actionConfig)
i.RepoURL = "https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo"
i.ReleaseName = "cloud-provider-azure-oot"
Eventually(func() error {
cp, err := i.ChartPathOptions.LocateChart("cloud-provider-azure", helmCli.New())
if err != nil {
return err
}
p := helmGetter.All(settings)
valueOpts := &helmVals.Options{}
valueOpts.Values = []string{fmt.Sprintf("infra.clusterName=%s", clusterName)}
vals, err := valueOpts.MergeValues(p)
if err != nil {
return err
}
chartRequested, err := helmLoader.Load(cp)
if err != nil {
return err
}
release, err := i.RunWithContext(ctx, chartRequested, vals)
if err != nil {
return err
}
Logf(release.Info.Description)
return nil
}, input.WaitForControlPlaneIntervals...).Should(Succeed())
values := []string{fmt.Sprintf("infra.clusterName=%s", input.ConfigCluster.ClusterName)}
InstallHelmChart(ctx, input, cloudProviderAzureHelmRepoURL, cloudProviderAzureChartName, cloudProvierAzureHelmReleaseName, values)
By("Waiting for a Running cloud-controller-manager pod")
Eventually(func() bool {
clusterProxy := input.ClusterProxy.GetWorkloadCluster(ctx, input.ConfigCluster.Namespace, input.ConfigCluster.ClusterName)
clientSet := clusterProxy.GetClientSet()
var runningPods int
list, err := clientSet.CoreV1().Pods("kube-system").List(ctx, v1.ListOptions{
LabelSelector: "component=cloud-controller-manager",
})
if err != nil {
return false
}
for _, p := range list.Items {
if p.Status.Phase == corev1.PodRunning {
runningPods++
}
}
return runningPods > 0
}, input.WaitForControlPlaneIntervals...).Should(BeTrue())
By("Waiting for Running cloud-node-manager pods")
Eventually(func() bool {
clusterProxy := input.ClusterProxy.GetWorkloadCluster(ctx, input.ConfigCluster.Namespace, input.ConfigCluster.ClusterName)
clientSet := clusterProxy.GetClientSet()
var runningPods int64
list, err := clientSet.CoreV1().Pods("kube-system").List(ctx, v1.ListOptions{
LabelSelector: "k8s-app=cloud-node-manager",
})
if err != nil {
return false
}
for _, p := range list.Items {
if p.Status.Phase == corev1.PodRunning {
runningPods++
}
}
return runningPods >= to.Int64(input.ConfigCluster.ControlPlaneMachineCount)
}, input.WaitForControlPlaneIntervals...).Should(BeTrue())
clusterProxy := input.ClusterProxy.GetWorkloadCluster(ctx, input.ConfigCluster.Namespace, input.ConfigCluster.ClusterName)
workloadClusterClient := clusterProxy.GetClient()
cloudControllerManagerPodLabel, err := labels.Parse("component=cloud-controller-manager")
Expect(err).ToNot(HaveOccurred())
framework.WaitForPodListCondition(ctx, framework.WaitForPodListConditionInput{
Lister: workloadClusterClient,
ListOptions: &client.ListOptions{
LabelSelector: cloudControllerManagerPodLabel,
Namespace: "kube-system",
},
Condition: podListHasNumPods(1),
}, input.WaitForControlPlaneIntervals...)
Expect(err).ToNot(HaveOccurred())
By(fmt.Sprintf("Waiting for Ready cloud-node-manager daemonset pods"))
for _, ds := range []string{"cloud-node-manager", "cloud-node-manager-windows"} {
WaitForDaemonset(ctx, input, workloadClusterClient, ds, "kube-system")
}
By("Done installing cloud-provider-azure components, ensuring control plane is initialized")
result.ControlPlane = framework.DiscoveryAndWaitForControlPlaneInitialized(ctx, framework.DiscoveryAndWaitForControlPlaneInitializedInput{
Lister: input.ClusterProxy.GetClient(),
Expand Down
93 changes: 93 additions & 0 deletions test/e2e/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,12 @@ import (
"sigs.k8s.io/cluster-api/test/framework/kubernetesversions"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/controller-runtime/pkg/client"

helmAction "helm.sh/helm/v3/pkg/action"
helmLoader "helm.sh/helm/v3/pkg/chart/loader"
helmCli "helm.sh/helm/v3/pkg/cli"
helmVals "helm.sh/helm/v3/pkg/cli/values"
helmGetter "helm.sh/helm/v3/pkg/getter"
)

const (
Expand Down Expand Up @@ -823,3 +829,90 @@ func getPodLogs(ctx context.Context, clientset *kubernetes.Clientset, pod corev1
}
return b.String()
}

// InstallHelmChart takes a helm repo URL, a chart name, and release name, and installs a helm release onto the E2E workload cluster
func InstallHelmChart(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput, repoURL, chartName, releaseName string, values []string) {
clusterProxy := input.ClusterProxy.GetWorkloadCluster(ctx, input.ConfigCluster.Namespace, input.ConfigCluster.ClusterName)
kubeConfigPath := clusterProxy.GetKubeconfigPath()
settings := helmCli.New()
settings.KubeConfig = kubeConfigPath
actionConfig := new(helmAction.Configuration)
err := actionConfig.Init(settings.RESTClientGetter(), "default", "secret", Logf)
Expect(err).To(BeNil())
i := helmAction.NewInstall(actionConfig)
i.RepoURL = repoURL
i.ReleaseName = releaseName
Eventually(func() error {
cp, err := i.ChartPathOptions.LocateChart(chartName, helmCli.New())
if err != nil {
return err
}
p := helmGetter.All(settings)
valueOpts := &helmVals.Options{}
valueOpts.Values = values
vals, err := valueOpts.MergeValues(p)
if err != nil {
return err
}
chartRequested, err := helmLoader.Load(cp)
if err != nil {
return err
}
release, err := i.RunWithContext(ctx, chartRequested, vals)
if err != nil {
return err
}
Logf(release.Info.Description)
return nil
}, input.WaitForControlPlaneIntervals...).Should(Succeed())
}

// WaitForWorkloadClusterKubeconfigSecret retries during E2E until the workload cluster kubeconfig secret exists
func WaitForWorkloadClusterKubeconfigSecret(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput) {
// Ensure the workload cluster kubeconfig secret exists before getting the workload cluster clusterProxy object
Eventually(func() error {
cl := input.ClusterProxy.GetClient()
secret := &corev1.Secret{}
key := client.ObjectKey{
Name: fmt.Sprintf("%s-kubeconfig", input.ConfigCluster.ClusterName),
Namespace: input.ConfigCluster.Namespace,
}
err := cl.Get(ctx, key, secret)
if err != nil {
return err
}
return nil
}, input.WaitForControlPlaneIntervals...).Should(Succeed())
}

// WaitForDaemonset retries during E2E until a daemonset's pods are all Running
func WaitForDaemonset(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput, cl client.Client, name, namespace string) {
// Ensure the workload cluster kubeconfig secret exists before getting the workload cluster clusterProxy object
Eventually(func() bool {
ds := &appsv1.DaemonSet{}
if err := cl.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, ds); err != nil {
return false
}
if ds.Status.DesiredNumberScheduled == ds.Status.NumberReady {
return true
}
return false
}, input.WaitForControlPlaneIntervals...).Should(Equal(true))
}

// podListHasNumPods fulfills the cluster-api PodListCondition type spec
// given a list of pods, we validate for an exact number of those pods in a Running state
func podListHasNumPods(numPods int) func(pl *corev1.PodList) error {
return func(pl *corev1.PodList) error {
var runningPods int
for _, p := range pl.Items {
if p.Status.Phase == corev1.PodRunning {
runningPods++
}
}
if runningPods != numPods {
return errors.Errorf("expected %d Running pods, got %d", numPods, runningPods)
}
return nil
}
}

0 comments on commit 23666ad

Please sign in to comment.