Skip to content

Commit

Permalink
E2E tests
Browse files Browse the repository at this point in the history
  • Loading branch information
willie-yao committed Oct 19, 2023
1 parent 13d00b4 commit 9ce228c
Show file tree
Hide file tree
Showing 4 changed files with 155 additions and 13 deletions.
16 changes: 12 additions & 4 deletions test/e2e/aks_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,10 @@ import (
)

type AKSAutoscaleSpecInput struct {
Cluster *clusterv1.Cluster
MachinePool *expv1.MachinePool
WaitIntervals []interface{}
Cluster *clusterv1.Cluster
MachinePool *expv1.MachinePool
WaitIntervals []interface{}
isClusterClass bool
}

func AKSAutoscaleSpec(ctx context.Context, inputGetter func() AKSAutoscaleSpecInput) {
Expand All @@ -62,7 +63,14 @@ func AKSAutoscaleSpec(ctx context.Context, inputGetter func() AKSAutoscaleSpecIn
Expect(err).NotTo(HaveOccurred())

ammp := &infrav1.AzureManagedMachinePool{}
err = mgmtClient.Get(ctx, client.ObjectKeyFromObject(input.MachinePool), ammp)
if input.isClusterClass {
err = bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{
Namespace: input.MachinePool.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: input.MachinePool.Spec.Template.Spec.InfrastructureRef.Name,
}, ammp)
} else {
err = mgmtClient.Get(ctx, client.ObjectKeyFromObject(input.MachinePool), ammp)
}
Expect(err).NotTo(HaveOccurred())

resourceGroupName := amcp.Spec.ResourceGroupName
Expand Down
133 changes: 131 additions & 2 deletions test/e2e/azure_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -844,6 +844,135 @@ var _ = Describe("Workload cluster creation", func() {
})
})

Context("Creating an AKS cluster using ClusterClass [Managed Kubernetes]", func() {
It("with a single control plane node, one linux worker node, and one windows worker node", func() {
// Use default as the clusterclass name so test infra can find the clusterclass template
os.Setenv("CLUSTER_CLASS_NAME", "default")

// Use "cc" as spec name because NAT gateway pip name exceeds limit.
clusterName = getClusterName(clusterNamePrefix, "cc")
kubernetesVersionUpgradeFrom, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersionUpgradeFrom)
Expect(err).To(BeNil())
kubernetesVersion, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersion)
Expect(err).To(BeNil())

// Opt into using windows with prow template
Expect(os.Setenv("WINDOWS_WORKER_MACHINE_COUNT", "1")).To(Succeed())

// Create a cluster using the cluster class created above
clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
specName,
withFlavor("aks-clusterclass"),
withNamespace(namespace.Name),
withClusterName(clusterName),
withKubernetesVersion(kubernetesVersionUpgradeFrom),
withControlPlaneMachineCount(1),
withWorkerMachineCount(1),
withMachineDeploymentInterval(specName, ""),
withMachinePoolInterval(specName, "wait-worker-nodes"),
withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
WaitForControlPlaneInitialized: WaitForAKSControlPlaneInitialized,
WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady,
}),
), result)

By("Upgrading the Kubernetes version of the cluster", func() {
AKSUpgradeSpec(ctx, func() AKSUpgradeSpecInput {
return AKSUpgradeSpecInput{
Cluster: result.Cluster,
MachinePools: result.MachinePools,
KubernetesVersionUpgradeTo: kubernetesVersion,
WaitForControlPlane: e2eConfig.GetIntervals(specName, "wait-machine-upgrade"),
WaitForMachinePools: e2eConfig.GetIntervals(specName, "wait-machine-pool-upgrade"),
}
})
})

By("Exercising machine pools", func() {
AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput {
return AKSMachinePoolSpecInput{
Cluster: result.Cluster,
MachinePools: result.MachinePools,
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
}
})
})

By("creating a machine pool with public IP addresses from a prefix", func() {
// This test is also currently serving as the canonical
// "create/delete node pool" test. Eventually, that should be
// made more distinct from this public IP prefix test.
AKSPublicIPPrefixSpec(ctx, func() AKSPublicIPPrefixSpecInput {
return AKSPublicIPPrefixSpecInput{
Cluster: result.Cluster,
KubernetesVersion: kubernetesVersion,
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}
})
})

By("creating a machine pool with spot max price and scale down mode", func() {
AKSSpotSpec(ctx, func() AKSSpotSpecInput {
return AKSSpotSpecInput{
Cluster: result.Cluster,
KubernetesVersion: kubernetesVersion,
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}
})
})

By("modifying nodepool autoscaling configuration", func() {
AKSAutoscaleSpec(ctx, func() AKSAutoscaleSpecInput {
return AKSAutoscaleSpecInput{
Cluster: result.Cluster,
MachinePool: result.MachinePools[0],
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
isClusterClass: true,
}
})
})

By("modifying additionalTags configuration", func() {
AKSAdditionalTagsSpec(ctx, func() AKSAdditionalTagsSpecInput {
return AKSAdditionalTagsSpecInput{
Cluster: result.Cluster,
MachinePools: result.MachinePools,
WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
}
})
})

By("modifying the azure cluster-autoscaler settings", func() {
AKSAzureClusterAutoscalerSettingsSpec(ctx, func() AKSAzureClusterAutoscalerSettingsSpecInput {
return AKSAzureClusterAutoscalerSettingsSpecInput{
Cluster: result.Cluster,
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
}
})
})

By("modifying node labels configuration", func() {
AKSNodeLabelsSpec(ctx, func() AKSNodeLabelsSpecInput {
return AKSNodeLabelsSpecInput{
Cluster: result.Cluster,
MachinePools: result.MachinePools,
WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
}
})
})

By("modifying taints configuration", func() {
AKSNodeTaintsSpec(ctx, func() AKSNodeTaintsSpecInput {
return AKSNodeTaintsSpecInput{
Cluster: result.Cluster,
MachinePools: result.MachinePools,
WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
}
})
})
})
})

// ci-e2e.sh and Prow CI skip this test by default. To include this test, set `GINKGO_SKIP=""`.
// This spec expects a user-assigned identity named "cloud-provider-user-identity" in a "capz-ci"
// resource group. Override these defaults by setting the USER_IDENTITY and CI_RG environment variables.
Expand Down Expand Up @@ -917,10 +1046,10 @@ var _ = Describe("Workload cluster creation", func() {

Context("Creating clusters using clusterclass [OPTIONAL]", func() {
It("with a single control plane node, one linux worker node, and one windows worker node", func() {
// use ci-default as the clusterclass name so test infra can find the clusterclass template
// Use ci-default as the clusterclass name so test infra can find the clusterclass template
os.Setenv("CLUSTER_CLASS_NAME", "ci-default")

// use "cc" as spec name because natgw pip name exceeds limit.
// Use "cc" as spec name because NAT gateway pip name exceeds limit.
clusterName = getClusterName(clusterNamePrefix, "cc")

// Opt into using windows with prow template
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/utils/ptr"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-azure/azure"
e2e_namespace "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/namespace"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
Expand Down Expand Up @@ -291,7 +292,7 @@ func ensureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCu
}, input.WaitForControlPlaneIntervals...).Should(Succeed(), "API Server was not reachable in time")

_, hasWindows := cluster.Labels["cni-windows"]
if kubeadmControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraArgs["cloud-provider"] != "azure" {
if kubeadmControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraArgs["cloud-provider"] != infrav1.CloudProviderName {
// There is a co-dependency between cloud-provider and CNI so we install both together if cloud-provider is external.
InstallCNIAndCloudProviderAzureHelmChart(ctx, input, installHelmCharts, cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, hasWindows)
} else {
Expand Down
16 changes: 10 additions & 6 deletions test/e2e/config/azure-dev.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@ managementClusterName: capz-e2e
images:
- name: ${MANAGER_IMAGE}
loadBehavior: mustLoad
- name: registry.k8s.io/cluster-api/cluster-api-controller:v1.5.2
- name: gcr.io/k8s-staging-cluster-api/cluster-api-controller:nightly_main_20230929 # We are using a nightly build temporarily for ClusterClass machinepool support. Revert once CAPI v1.6 is released.
loadBehavior: tryLoad
- name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.5.2
- name: gcr.io/k8s-staging-cluster-api/kubeadm-bootstrap-controller:nightly_main_20230929
loadBehavior: tryLoad
- name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.5.2
- name: gcr.io/k8s-staging-cluster-api/kubeadm-control-plane-controller:nightly_main_20230929
loadBehavior: tryLoad
- name: registry.k8s.io/cluster-api-helm/cluster-api-helm-controller:v0.1.0-alpha.9
loadBehavior: tryLoad
Expand All @@ -26,7 +26,7 @@ providers:
files:
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
- name: v1.5.2
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.2/core-components.yaml
value: "https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20230929/core-components.yaml"
type: url
contract: v1beta1
files:
Expand All @@ -49,7 +49,7 @@ providers:
files:
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
- name: v1.5.2
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.2/bootstrap-components.yaml
value: https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20230929/bootstrap-components.yaml
type: url
contract: v1beta1
files:
Expand All @@ -71,7 +71,7 @@ providers:
files:
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
- name: v1.5.2
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.2/control-plane-components.yaml
value: https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20230929/control-plane-components.yaml
type: url
contract: v1beta1
files:
Expand Down Expand Up @@ -132,12 +132,16 @@ providers:
targetName: "cluster-template-workload-identity.yaml"
- sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks.yaml"
targetName: "cluster-template-aks.yaml"
- sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks-clusterclass.yaml"
targetName: "cluster-template-aks-clusterclass.yaml"
- sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-custom-vnet.yaml"
targetName: "cluster-template-custom-vnet.yaml"
- sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-dual-stack.yaml"
targetName: "cluster-template-dual-stack.yaml"
- sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-clusterclass-ci-default.yaml"
targetName: "clusterclass-ci-default.yaml"
- sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks-clusterclass.yaml"
targetName: "clusterclass-default.yaml"
- sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-topology.yaml"
targetName: "cluster-template-topology.yaml"
- sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-flatcar.yaml"
Expand Down

0 comments on commit 9ce228c

Please sign in to comment.