diff --git a/test/e2e/aks_autoscaler.go b/test/e2e/aks_autoscaler.go index 0a5a887209f..be3653f0d8b 100644 --- a/test/e2e/aks_autoscaler.go +++ b/test/e2e/aks_autoscaler.go @@ -36,9 +36,10 @@ import ( ) type AKSAutoscaleSpecInput struct { - Cluster *clusterv1.Cluster - MachinePool *expv1.MachinePool - WaitIntervals []interface{} + Cluster *clusterv1.Cluster + MachinePool *expv1.MachinePool + WaitIntervals []interface{} + isClusterClass bool } func AKSAutoscaleSpec(ctx context.Context, inputGetter func() AKSAutoscaleSpecInput) { @@ -62,7 +63,14 @@ func AKSAutoscaleSpec(ctx context.Context, inputGetter func() AKSAutoscaleSpecIn Expect(err).NotTo(HaveOccurred()) ammp := &infrav1.AzureManagedMachinePool{} - err = mgmtClient.Get(ctx, client.ObjectKeyFromObject(input.MachinePool), ammp) + if input.isClusterClass { + err = bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{ + Namespace: input.MachinePool.Spec.Template.Spec.InfrastructureRef.Namespace, + Name: input.MachinePool.Spec.Template.Spec.InfrastructureRef.Name, + }, ammp) + } else { + err = mgmtClient.Get(ctx, client.ObjectKeyFromObject(input.MachinePool), ammp) + } Expect(err).NotTo(HaveOccurred()) resourceGroupName := amcp.Spec.ResourceGroupName diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index 1625f26b851..9dfd95f1bfe 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -844,6 +844,135 @@ var _ = Describe("Workload cluster creation", func() { }) }) + Context("Creating an AKS cluster using ClusterClass [Managed Kubernetes]", func() { + It("with a single control plane node, one linux worker node, and one windows worker node", func() { + // use default as the clusterclass name so test infra can find the clusterclass template + os.Setenv("CLUSTER_CLASS_NAME", "default") + + // use "cc" as spec name because natgw pip name exceeds limit. + clusterName = getClusterName(clusterNamePrefix, "cc") + kubernetesVersionUpgradeFrom, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersionUpgradeFrom) + Expect(err).To(BeNil()) + kubernetesVersion, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersion) + Expect(err).To(BeNil()) + + // Opt into using windows with prow template + Expect(os.Setenv("WINDOWS_WORKER_MACHINE_COUNT", "1")).To(Succeed()) + + // Create a cluster using the cluster class created above + clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput( + specName, + withFlavor("aks-clusterclass"), + withNamespace(namespace.Name), + withClusterName(clusterName), + withKubernetesVersion(kubernetesVersionUpgradeFrom), + withControlPlaneMachineCount(1), + withWorkerMachineCount(1), + withMachineDeploymentInterval(specName, ""), + withMachinePoolInterval(specName, "wait-worker-nodes"), + withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ + WaitForControlPlaneInitialized: WaitForAKSControlPlaneInitialized, + WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady, + }), + ), result) + + By("Upgrading the Kubernetes version of the cluster", func() { + AKSUpgradeSpec(ctx, func() AKSUpgradeSpecInput { + return AKSUpgradeSpecInput{ + Cluster: result.Cluster, + MachinePools: result.MachinePools, + KubernetesVersionUpgradeTo: kubernetesVersion, + WaitForControlPlane: e2eConfig.GetIntervals(specName, "wait-machine-upgrade"), + WaitForMachinePools: e2eConfig.GetIntervals(specName, "wait-machine-pool-upgrade"), + } + }) + }) + + By("Exercising machine pools", func() { + AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput { + return AKSMachinePoolSpecInput{ + Cluster: result.Cluster, + MachinePools: result.MachinePools, + WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + } + }) + }) + + By("creating a machine pool with public IP addresses from a prefix", func() { + // This test is also currently serving as the canonical + // "create/delete node pool" test. Eventually, that should be + // made more distinct from this public IP prefix test. + AKSPublicIPPrefixSpec(ctx, func() AKSPublicIPPrefixSpecInput { + return AKSPublicIPPrefixSpecInput{ + Cluster: result.Cluster, + KubernetesVersion: kubernetesVersion, + WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), + } + }) + }) + + By("creating a machine pool with spot max price and scale down mode", func() { + AKSSpotSpec(ctx, func() AKSSpotSpecInput { + return AKSSpotSpecInput{ + Cluster: result.Cluster, + KubernetesVersion: kubernetesVersion, + WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), + } + }) + }) + + By("modifying nodepool autoscaling configuration", func() { + AKSAutoscaleSpec(ctx, func() AKSAutoscaleSpecInput { + return AKSAutoscaleSpecInput{ + Cluster: result.Cluster, + MachinePool: result.MachinePools[0], + WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + isClusterClass: true, + } + }) + }) + + By("modifying additionalTags configuration", func() { + AKSAdditionalTagsSpec(ctx, func() AKSAdditionalTagsSpecInput { + return AKSAdditionalTagsSpecInput{ + Cluster: result.Cluster, + MachinePools: result.MachinePools, + WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + } + }) + }) + + By("modifying the azure cluster-autoscaler settings", func() { + AKSAzureClusterAutoscalerSettingsSpec(ctx, func() AKSAzureClusterAutoscalerSettingsSpecInput { + return AKSAzureClusterAutoscalerSettingsSpecInput{ + Cluster: result.Cluster, + WaitIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), + } + }) + }) + + By("modifying node labels configuration", func() { + AKSNodeLabelsSpec(ctx, func() AKSNodeLabelsSpecInput { + return AKSNodeLabelsSpecInput{ + Cluster: result.Cluster, + MachinePools: result.MachinePools, + WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + } + }) + }) + + By("modifying taints configuration", func() { + AKSNodeTaintsSpec(ctx, func() AKSNodeTaintsSpecInput { + return AKSNodeTaintsSpecInput{ + Cluster: result.Cluster, + MachinePools: result.MachinePools, + WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + } + }) + }) + }) + }) + // ci-e2e.sh and Prow CI skip this test by default. To include this test, set `GINKGO_SKIP=""`. // This spec expects a user-assigned identity named "cloud-provider-user-identity" in a "capz-ci" // resource group. Override these defaults by setting the USER_IDENTITY and CI_RG environment variables. diff --git a/test/e2e/common.go b/test/e2e/common.go index 3556c62f3b3..b690f6926e1 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -39,6 +39,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/utils/ptr" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" e2e_namespace "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/namespace" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -291,7 +292,7 @@ func ensureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCu }, input.WaitForControlPlaneIntervals...).Should(Succeed(), "API Server was not reachable in time") _, hasWindows := cluster.Labels["cni-windows"] - if kubeadmControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraArgs["cloud-provider"] != "azure" { + if kubeadmControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraArgs["cloud-provider"] != infrav1.CloudProviderName { // There is a co-dependency between cloud-provider and CNI so we install both together if cloud-provider is external. InstallCNIAndCloudProviderAzureHelmChart(ctx, input, installHelmCharts, cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, hasWindows) } else { diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index 6a6fa1ec4b3..acb3dffa831 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -3,11 +3,11 @@ managementClusterName: capz-e2e images: - name: ${MANAGER_IMAGE} loadBehavior: mustLoad - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.5.2 + - name: gcr.io/k8s-staging-cluster-api/cluster-api-controller:nightly_main_20230929 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.5.2 + - name: gcr.io/k8s-staging-cluster-api/kubeadm-bootstrap-controller:nightly_main_20230929 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.5.2 + - name: gcr.io/k8s-staging-cluster-api/kubeadm-control-plane-controller:nightly_main_20230929 loadBehavior: tryLoad - name: registry.k8s.io/cluster-api-helm/cluster-api-helm-controller:v0.1.0-alpha.9 loadBehavior: tryLoad @@ -26,7 +26,7 @@ providers: files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - name: v1.5.2 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.2/core-components.yaml + value: "https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20230929/core-components.yaml" type: url contract: v1beta1 files: @@ -49,7 +49,7 @@ providers: files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - name: v1.5.2 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.2/bootstrap-components.yaml + value: https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20230929/bootstrap-components.yaml type: url contract: v1beta1 files: @@ -71,7 +71,7 @@ providers: files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - name: v1.5.2 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.2/control-plane-components.yaml + value: https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20230929/control-plane-components.yaml type: url contract: v1beta1 files: @@ -132,12 +132,16 @@ providers: targetName: "cluster-template-workload-identity.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks.yaml" targetName: "cluster-template-aks.yaml" + - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks-clusterclass.yaml" + targetName: "cluster-template-aks-clusterclass.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-custom-vnet.yaml" targetName: "cluster-template-custom-vnet.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-dual-stack.yaml" targetName: "cluster-template-dual-stack.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-clusterclass-ci-default.yaml" targetName: "clusterclass-ci-default.yaml" + - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks-clusterclass.yaml" + targetName: "clusterclass-default.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-topology.yaml" targetName: "cluster-template-topology.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-flatcar.yaml"