diff --git a/test/e2e/cloud-provider-azure.go b/test/e2e/cloud-provider-azure.go index 59b97f58941..a0fb6a19d44 100644 --- a/test/e2e/cloud-provider-azure.go +++ b/test/e2e/cloud-provider-azure.go @@ -43,7 +43,7 @@ const ( // and validates that expected pods exist and are Ready. func InstallCalicoAndCloudProviderAzureHelmChart(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput, cidrBlocks []string, hasWindows bool) { specName := "cloud-provider-azure-install" - By("Installing the correct version of cloud-provider-azure components via helm") + By("Installing cloud-provider-azure components via helm") options := &helmVals.Options{ Values: []string{fmt.Sprintf("infra.clusterName=%s", input.ConfigCluster.ClusterName), fmt.Sprintf("cloudControllerManager.clusterCIDR=%s", strings.Join(cidrBlocks, `,`))}, } @@ -66,11 +66,15 @@ func InstallCalicoAndCloudProviderAzureHelmChart(ctx context.Context, input clus } // InstallAzureDiskCSIDriverHelmChart installs the official azure-disk CSI driver helm chart -func InstallAzureDiskCSIDriverHelmChart(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput) { +func InstallAzureDiskCSIDriverHelmChart(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput, hasWindows bool) { specName := "azuredisk-csi-drivers-install" - By("Installing the correct version of azure-disk CSI driver components via helm") + By("Installing azure-disk CSI driver components via helm") options := &helmVals.Options{ - Values: []string{"windows.useHostProcessContainers=true", "controller.replicas=1", "controller.runOnControlPlane=true"}, + Values: []string{"controller.replicas=1", "controller.runOnControlPlane=true"}, + } + // TODO: make this always true once HostProcessContainers are on for all supported k8s versions. + if hasWindows { + options.Values = append(options.Values, "windows.useHostProcessContainers=true") } InstallHelmChart(ctx, input, kubesystem, azureDiskCSIDriverHelmRepoURL, azureDiskCSIDriverChartName, azureDiskCSIDriverHelmReleaseName, options) clusterProxy := input.ClusterProxy.GetWorkloadCluster(ctx, input.ConfigCluster.Namespace, input.ConfigCluster.ClusterName) diff --git a/test/e2e/common.go b/test/e2e/common.go index 0307f9b1ab7..8dac76a2089 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -30,6 +30,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2020-10-01/resources" "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/blang/semver" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -261,7 +262,13 @@ func EnsureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCl InstallCalicoHelmChart(ctx, input, cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, hasWindows) } controlPlane := discoveryAndWaitForControlPlaneInitialized(ctx, input, result) - InstallAzureDiskCSIDriverHelmChart(ctx, input) + v, err := semver.ParseTolerant(input.ConfigCluster.KubernetesVersion) + Expect(err).NotTo(HaveOccurred()) + if v.GTE(semver.MustParse("1.23.0")) { + InstallAzureDiskCSIDriverHelmChart(ctx, input, hasWindows) + } else { + Logf("Skipping Azure Disk CSI Driver installation for Kubernetes version %s", input.ConfigCluster.KubernetesVersion) + } result.ControlPlane = controlPlane } diff --git a/test/e2e/csi_migration_test.go b/test/e2e/csi_migration_test.go index 8642dac7a6d..1fe4aed2f93 100644 --- a/test/e2e/csi_migration_test.go +++ b/test/e2e/csi_migration_test.go @@ -140,9 +140,6 @@ var _ = Describe("[K8s-Upgrade] Running the CSI migration tests", func() { Expect(os.Unsetenv(ClusterIdentitySecretName)).To(Succeed()) Expect(os.Unsetenv(ClusterIdentitySecretNamespace)).To(Succeed()) - Expect(os.Unsetenv("WINDOWS_WORKER_MACHINE_COUNT")).To(Succeed()) - Expect(os.Unsetenv("K8S_FEATURE_GATES")).To(Succeed()) - logCheckpoint(specTimes) }) @@ -243,7 +240,7 @@ var _ = Describe("[K8s-Upgrade] Running the CSI migration tests", func() { configCluster.KubernetesVersion = postCSIKubernetesVersion // This flavour uses external csi driver and in tree cloud provider configCluster.Flavor = "external-azurediskcsi-driver" - upgradedCluster, kcp := createClusterWithControlPlaneWaiters(ctx, configCluster, clusterctl.ControlPlaneWaiters{}, result) + upgradedCluster, kcp := createClusterWithControlPlaneWaiters(ctx, configCluster, clusterctl.ControlPlaneWaiters{WaitForControlPlaneInitialized: EnsureControlPlaneInitialized}, result) // Wait for control plane to be upgraded successfully By("Waiting for control-plane machines to have the upgraded kubernetes version") framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{ @@ -276,7 +273,7 @@ var _ = Describe("[K8s-Upgrade] Running the CSI migration tests", func() { }) Context("CSI=external CCM=external AzureDiskCSIMigration=true: upgrade to v1.23", func() { - It("should create volumes dynamically with intree cloud provider", func() { + It("should create volumes dynamically with out-of-tree cloud provider", func() { By("Creating workload cluster v1.22 using user-assigned identity") clusterName = getClusterName(clusterNamePrefix, "external-providers") configCluster := defaultConfigCluster(clusterName, namespace.Name) @@ -301,7 +298,7 @@ var _ = Describe("[K8s-Upgrade] Running the CSI migration tests", func() { By("Upgrade the workload cluster to v1.23") configCluster.KubernetesVersion = postCSIKubernetesVersion configCluster.Flavor = "external-cloud-provider" - upgradedCluster, kcp := createClusterWithControlPlaneWaiters(ctx, configCluster, clusterctl.ControlPlaneWaiters{}, result) + upgradedCluster, kcp := createClusterWithControlPlaneWaiters(ctx, configCluster, clusterctl.ControlPlaneWaiters{WaitForControlPlaneInitialized: EnsureControlPlaneInitialized}, result) // Wait for control plane to be upgraded successfully By("Waiting for control-plane machines to have the upgraded kubernetes version") diff --git a/test/e2e/helpers.go b/test/e2e/helpers.go index 2635fba0b24..74bfe67298f 100644 --- a/test/e2e/helpers.go +++ b/test/e2e/helpers.go @@ -48,9 +48,11 @@ import ( helmCli "helm.sh/helm/v3/pkg/cli" helmVals "helm.sh/helm/v3/pkg/cli/values" helmGetter "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/storage/driver" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" @@ -826,38 +828,58 @@ func InstallHelmChart(ctx context.Context, input clusterctl.ApplyClusterTemplate actionConfig := new(helmAction.Configuration) err := actionConfig.Init(settings.RESTClientGetter(), namespace, "secret", Logf) Expect(err).To(BeNil()) - i := helmAction.NewInstall(actionConfig) - if repoURL != "" { - i.RepoURL = repoURL - } - i.ReleaseName = releaseName - i.Namespace = namespace - i.CreateNamespace = true + + // If the release does not exist, install it. + histClient := helmAction.NewHistory(actionConfig) + histClient.Max = 1 + var releaseExists bool Eventually(func() error { - cp, err := i.ChartPathOptions.LocateChart(chartName, helmCli.New()) - if err != nil { - return err - } - p := helmGetter.All(settings) - if options == nil { - options = &helmVals.Options{} - } - valueOpts := options - vals, err := valueOpts.MergeValues(p) - if err != nil { - return err - } - chartRequested, err := helmLoader.Load(cp) - if err != nil { - return err - } - release, err := i.RunWithContext(ctx, chartRequested, vals) - if err != nil { - return err + _, err := histClient.Run(releaseName) + if err == driver.ErrReleaseNotFound { + releaseExists = false + return nil + } else if err == nil { + releaseExists = true } - Logf(release.Info.Description) - return nil + return err }, input.WaitForControlPlaneIntervals...).Should(Succeed()) + if releaseExists { + Logf("Release %s already exists, skipping install", releaseName) + } else { + Logf("Release %s does not exist, installing it", releaseName) + i := helmAction.NewInstall(actionConfig) + if repoURL != "" { + i.RepoURL = repoURL + } + i.ReleaseName = releaseName + i.Namespace = namespace + i.CreateNamespace = true + Eventually(func() error { + cp, err := i.ChartPathOptions.LocateChart(chartName, helmCli.New()) + if err != nil { + return err + } + p := helmGetter.All(settings) + if options == nil { + options = &helmVals.Options{} + } + valueOpts := options + vals, err := valueOpts.MergeValues(p) + if err != nil { + return err + } + chartRequested, err := helmLoader.Load(cp) + if err != nil { + return err + } + release, err := i.RunWithContext(ctx, chartRequested, vals) + if err != nil { + return err + } + Logf(release.Info.Description) + return nil + }, input.WaitForControlPlaneIntervals...).Should(Succeed()) + } } func defaultConfigCluster(clusterName, namespace string) clusterctl.ConfigClusterInput { @@ -896,5 +918,8 @@ func CopyConfigMap(ctx context.Context, cl client.Client, cmName, fromNamespace, cm.SetNamespace(toNamespace) cm.SetResourceVersion("") framework.EnsureNamespace(ctx, cl, toNamespace) - Expect(cl.Create(ctx, cm.DeepCopy())).To(Succeed()) + err := cl.Create(ctx, cm.DeepCopy()) + if !apierrors.IsAlreadyExists(err) { + Expect(err).To(Succeed()) + } }