diff --git a/Makefile b/Makefile index 82d359ee49d..c983837d579 100644 --- a/Makefile +++ b/Makefile @@ -176,6 +176,9 @@ GINKGO_ARGS ?= ARTIFACTS ?= $(ROOT_DIR)/_artifacts E2E_CONF_FILE ?= $(ROOT_DIR)/test/e2e/config/azure-dev.yaml E2E_CONF_FILE_ENVSUBST := $(ROOT_DIR)/test/e2e/config/azure-dev-envsubst.yaml +E2E_CLOUD_PROVIDER_AZURE_PATH ?= $(ROOT_DIR)/templates/caaph/cloud-provider-azure.yaml +E2E_CLOUD_PROVIDER_AZURE_CI_PATH ?= $(ROOT_DIR)/templates/caaph/cloud-provider-azure-ci.yaml + SKIP_CLEANUP ?= false SKIP_LOG_COLLECTION ?= false # @sonasingh46: Skip creating mgmt cluster for ci as workload identity needs kind cluster @@ -690,6 +693,8 @@ test-e2e-run: generate-e2e-templates install-tools kind-create-bootstrap ## Run $(GINKGO) -v --trace --timeout=4h --tags=e2e --focus="$(GINKGO_FOCUS)" --skip="$(GINKGO_SKIP)" --nodes=$(GINKGO_NODES) --no-color=$(GINKGO_NOCOLOR) --output-dir="$(ARTIFACTS)" --junit-report="junit.e2e_suite.1.xml" $(GINKGO_ARGS) ./test/e2e -- \ -e2e.artifacts-folder="$(ARTIFACTS)" \ -e2e.config="$(E2E_CONF_FILE_ENVSUBST)" \ + -e2e.cloud-provider-azure="$(E2E_CLOUD_PROVIDER_AZURE_PATH)" \ + -e2e.cloud-provider-azure-ci="$(E2E_CLOUD_PROVIDER_AZURE_CI_PATH)" \ -e2e.skip-log-collection="$(SKIP_LOG_COLLECTION)" \ -e2e.skip-resource-cleanup=$(SKIP_CLEANUP) -e2e.use-existing-cluster=$(SKIP_CREATE_MGMT_CLUSTER) $(E2E_ARGS) $(MAKE) clean-release-git diff --git a/go.mod b/go.mod index 3fd79d61c82..787fada011b 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d github.com/blang/semver v3.5.1+incompatible + github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 github.com/go-logr/logr v1.2.4 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.1 @@ -41,15 +42,16 @@ require ( golang.org/x/crypto v0.12.0 golang.org/x/mod v0.12.0 golang.org/x/text v0.13.0 - k8s.io/api v0.27.2 - k8s.io/apimachinery v0.27.2 - k8s.io/client-go v0.27.2 - k8s.io/component-base v0.27.2 - k8s.io/klog/v2 v2.90.1 - k8s.io/kubectl v0.27.2 + k8s.io/api v0.27.3 + k8s.io/apimachinery v0.27.3 + k8s.io/client-go v0.27.3 + k8s.io/component-base v0.27.3 + k8s.io/klog/v2 v2.100.1 + k8s.io/kubectl v0.27.3 k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/cloud-provider-azure v1.27.7 sigs.k8s.io/cluster-api v1.5.1 + sigs.k8s.io/cluster-api-addon-provider-helm v0.1.0-alpha.9 sigs.k8s.io/cluster-api/test v1.5.1 sigs.k8s.io/controller-runtime v0.15.1 sigs.k8s.io/kind v0.20.0 @@ -90,7 +92,6 @@ require ( github.com/docker/docker v24.0.5+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect @@ -191,9 +192,9 @@ require ( gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.27.2 // indirect - k8s.io/apiserver v0.27.2 // indirect - k8s.io/cli-runtime v0.27.2 // indirect + k8s.io/apiextensions-apiserver v0.27.3 // indirect + k8s.io/apiserver v0.27.3 // indirect + k8s.io/cli-runtime v0.27.3 // indirect k8s.io/cloud-provider v0.27.1 // indirect k8s.io/cluster-bootstrap v0.27.2 // indirect k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect diff --git a/go.sum b/go.sum index 9166d41d458..fc3de705a24 100644 --- a/go.sum +++ b/go.sum @@ -1064,30 +1064,30 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.27.2 h1:+H17AJpUMvl+clT+BPnKf0E3ksMAzoBBg7CntpSuADo= -k8s.io/api v0.27.2/go.mod h1:ENmbocXfBT2ADujUXcBhHV55RIT31IIEvkntP6vZKS4= -k8s.io/apiextensions-apiserver v0.27.2 h1:iwhyoeS4xj9Y7v8YExhUwbVuBhMr3Q4bd/laClBV6Bo= -k8s.io/apiextensions-apiserver v0.27.2/go.mod h1:Oz9UdvGguL3ULgRdY9QMUzL2RZImotgxvGjdWRq6ZXQ= -k8s.io/apimachinery v0.27.2 h1:vBjGaKKieaIreI+oQwELalVG4d8f3YAMNpWLzDXkxeg= -k8s.io/apimachinery v0.27.2/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= -k8s.io/apiserver v0.27.2 h1:p+tjwrcQEZDrEorCZV2/qE8osGTINPuS5ZNqWAvKm5E= -k8s.io/apiserver v0.27.2/go.mod h1:EsOf39d75rMivgvvwjJ3OW/u9n1/BmUMK5otEOJrb1Y= -k8s.io/cli-runtime v0.27.2 h1:9HI8gfReNujKXt16tGOAnb8b4NZ5E+e0mQQHKhFGwYw= -k8s.io/cli-runtime v0.27.2/go.mod h1:9UecpyPDTkhiYY4d9htzRqN+rKomJgyb4wi0OfrmCjw= -k8s.io/client-go v0.27.2 h1:vDLSeuYvCHKeoQRhCXjxXO45nHVv2Ip4Fe0MfioMrhE= -k8s.io/client-go v0.27.2/go.mod h1:tY0gVmUsHrAmjzHX9zs7eCjxcBsf8IiNe7KQ52biTcQ= +k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= +k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg= +k8s.io/apiextensions-apiserver v0.27.3 h1:xAwC1iYabi+TDfpRhxh4Eapl14Hs2OftM2DN5MpgKX4= +k8s.io/apiextensions-apiserver v0.27.3/go.mod h1:BH3wJ5NsB9XE1w+R6SSVpKmYNyIiyIz9xAmBl8Mb+84= +k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= +k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= +k8s.io/apiserver v0.27.3 h1:AxLvq9JYtveYWK+D/Dz/uoPCfz8JC9asR5z7+I/bbQ4= +k8s.io/apiserver v0.27.3/go.mod h1:Y61+EaBMVWUBJtxD5//cZ48cHZbQD+yIyV/4iEBhhNA= +k8s.io/cli-runtime v0.27.3 h1:h592I+2eJfXj/4jVYM+tu9Rv8FEc/dyCoD80UJlMW2Y= +k8s.io/cli-runtime v0.27.3/go.mod h1:LzXud3vFFuDFXn2LIrWnscPgUiEj7gQQcYZE2UPn9Kw= +k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= +k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48= k8s.io/cloud-provider v0.27.1 h1:482W9e2Yp8LDgTUKrXAxT+nH4pHS2TiBElI/CnfGWac= k8s.io/cloud-provider v0.27.1/go.mod h1:oN7Zci2Ls2dorwSNd2fMiW/6DA40+F4o2QL70p63bqo= k8s.io/cluster-bootstrap v0.27.2 h1:OL3onrOwrUD7NQxBUqQwTl1Uu2GQKCkw9BMHpc4PbiA= k8s.io/cluster-bootstrap v0.27.2/go.mod h1:b++PF0mjUOiTKdPQFlDw7p4V2VquANZ8SfhAwzxZJFM= -k8s.io/component-base v0.27.2 h1:neju+7s/r5O4x4/txeUONNTS9r1HsPbyoPBAtHsDCpo= -k8s.io/component-base v0.27.2/go.mod h1:5UPk7EjfgrfgRIuDBFtsEFAe4DAvP3U+M8RTzoSJkpo= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/component-base v0.27.3 h1:g078YmdcdTfrCE4fFobt7qmVXwS8J/3cI1XxRi/2+6k= +k8s.io/component-base v0.27.3/go.mod h1:JNiKYcGImpQ44iwSYs6dysxzR9SxIIgQalk4HaCNVUY= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= -k8s.io/kubectl v0.27.2 h1:sSBM2j94MHBFRWfHIWtEXWCicViQzZsb177rNsKBhZg= -k8s.io/kubectl v0.27.2/go.mod h1:GCOODtxPcrjh+EC611MqREkU8RjYBh10ldQCQ6zpFKw= +k8s.io/kubectl v0.27.3 h1:HyC4o+8rCYheGDWrkcOQHGwDmyLKR5bxXFgpvF82BOw= +k8s.io/kubectl v0.27.3/go.mod h1:g9OQNCC2zxT+LT3FS09ZYqnDhlvsKAfFq76oyarBcq4= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -1097,6 +1097,8 @@ sigs.k8s.io/cloud-provider-azure v1.27.7 h1:X6DdOA1aUlnBfSffTdq1zGWph7L62VY6ZS1E sigs.k8s.io/cloud-provider-azure v1.27.7/go.mod h1:ytwtCpVGJFvIc+n9q6CB14QD+AiiuXnsqz3DkWG7bBI= sigs.k8s.io/cluster-api v1.5.1 h1:+oO4EbVQcbBJr5wjqmdjvewPHSTbVLigXZqPk3ZO8t0= sigs.k8s.io/cluster-api v1.5.1/go.mod h1:EGJUNpFWi7dF426tO8MG/jE+w7T0UO5KyMnOwQ5riUY= +sigs.k8s.io/cluster-api-addon-provider-helm v0.1.0-alpha.9 h1:swS0SD/RPF/J29ZOUCtC58QSb7vt3+P8nPxgFayeERE= +sigs.k8s.io/cluster-api-addon-provider-helm v0.1.0-alpha.9/go.mod h1:6v/V4h932kBJq2wZ78nRfWy9Nuu8U/52RsklokzU67w= sigs.k8s.io/cluster-api/test v1.5.1 h1:kYUfzE6RFsopXek+l/LDnh4gtfjTi2FUghEY8zx0Z9U= sigs.k8s.io/cluster-api/test v1.5.1/go.mod h1:mFlsY1y0lApBgQyXbmVprdzCK+9MQNp1C38K+aZdn5A= sigs.k8s.io/controller-runtime v0.15.1 h1:9UvgKD4ZJGcj24vefUFgZFP3xej/3igL9BsOUTb/+4c= diff --git a/internal/test/env/env.go b/internal/test/env/env.go index b70ab948bfb..ce39013e2c7 100644 --- a/internal/test/env/env.go +++ b/internal/test/env/env.go @@ -35,6 +35,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" "k8s.io/utils/ptr" + addonsv1alpha1 "sigs.k8s.io/cluster-api-addon-provider-helm/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/internal/test/record" @@ -59,6 +60,7 @@ func init() { utilruntime.Must(expv1.AddToScheme(scheme)) utilruntime.Must(infrav1.AddToScheme(scheme)) utilruntime.Must(infrav1exp.AddToScheme(scheme)) + utilruntime.Must(addonsv1alpha1.AddToScheme(scheme)) // Get the root of the current file to use in CRD paths. _, filename, _, _ := goruntime.Caller(0) //nolint:dogsled // Ignore "declaration has 3 blank identifiers" check. diff --git a/main.go b/main.go index b81da918138..bb7479ceb76 100644 --- a/main.go +++ b/main.go @@ -37,6 +37,7 @@ import ( cgrecord "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" + addonsv1alpha1 "sigs.k8s.io/cluster-api-addon-provider-helm/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/controllers" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" @@ -72,6 +73,7 @@ func init() { _ = infrav1exp.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) _ = expv1.AddToScheme(scheme) + _ = addonsv1alpha1.AddToScheme(scheme) _ = kubeadmv1.AddToScheme(scheme) _ = asoresourcesv1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme diff --git a/templates/caaph/cloud-provider-azure-ci.yaml b/templates/caaph/cloud-provider-azure-ci.yaml new file mode 100644 index 00000000000..6d71cf0155b --- /dev/null +++ b/templates/caaph/cloud-provider-azure-ci.yaml @@ -0,0 +1,25 @@ +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart-ci +spec: + clusterSelector: + matchLabels: + installCloudProviderAzureChart: "true" + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + chartName: cloud-provider-azure + releaseName: cloud-provider-azure + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + imageName: ${CCM_IMAGE_NAME} + imageRepository: ${CCM_IMAGE_REGISTRY} + imageTag: ${IMAGE_TAG_CCM} + logVerbosity: 4 + cloudNodeManager: + imageName: ${CNM_IMAGE_NAME} + imageRepository: ${CNM_IMAGE_REGISTRY} + imageTag: ${IMAGE_TAG_CCM} + diff --git a/templates/caaph/cloud-provider-azure.yaml b/templates/caaph/cloud-provider-azure.yaml new file mode 100644 index 00000000000..de480814739 --- /dev/null +++ b/templates/caaph/cloud-provider-azure.yaml @@ -0,0 +1,17 @@ +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart +spec: + clusterSelector: + matchLabels: + installCloudProviderAzureChart: "true" + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + chartName: cloud-provider-azure + releaseName: cloud-provider-azure + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + logVerbosity: 4 diff --git a/templates/cluster-template-aad.yaml b/templates/cluster-template-aad.yaml index 07f530abb2a..4f42a4ae070 100644 --- a/templates/cluster-template-aad.yaml +++ b/templates/cluster-template-aad.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/cluster-template-azure-bastion.yaml b/templates/cluster-template-azure-bastion.yaml index 98ffb2f540c..1658d2cfe61 100644 --- a/templates/cluster-template-azure-bastion.yaml +++ b/templates/cluster-template-azure-bastion.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/cluster-template-azure-cni-v1.yaml b/templates/cluster-template-azure-cni-v1.yaml index 4de37fdcc5c..f64e657fb98 100644 --- a/templates/cluster-template-azure-cni-v1.yaml +++ b/templates/cluster-template-azure-cni-v1.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/cluster-template-dual-stack.yaml b/templates/cluster-template-dual-stack.yaml index 1a72e1a0eca..f1eba4a7af0 100644 --- a/templates/cluster-template-dual-stack.yaml +++ b/templates/cluster-template-dual-stack.yaml @@ -3,6 +3,7 @@ kind: Cluster metadata: labels: cni: calico-dual-stack + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/cluster-template-edgezone.yaml b/templates/cluster-template-edgezone.yaml index 54b93177496..c7e8f67671f 100644 --- a/templates/cluster-template-edgezone.yaml +++ b/templates/cluster-template-edgezone.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/cluster-template-ephemeral.yaml b/templates/cluster-template-ephemeral.yaml index 55ec76cf701..196388d4968 100644 --- a/templates/cluster-template-ephemeral.yaml +++ b/templates/cluster-template-ephemeral.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/cluster-template-flatcar.yaml b/templates/cluster-template-flatcar.yaml index 2978263dab6..f270ed1be10 100644 --- a/templates/cluster-template-flatcar.yaml +++ b/templates/cluster-template-flatcar.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/cluster-template-ipv6.yaml b/templates/cluster-template-ipv6.yaml index caa7a12f41b..e0d22cbf6df 100644 --- a/templates/cluster-template-ipv6.yaml +++ b/templates/cluster-template-ipv6.yaml @@ -1,6 +1,9 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + cni: calico-ipv6 + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/cluster-template-machinepool-flex.yaml b/templates/cluster-template-machinepool-flex.yaml new file mode 100644 index 00000000000..9c13ab8a578 --- /dev/null +++ b/templates/cluster-template-machinepool-flex.yaml @@ -0,0 +1,699 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cni-windows: ${CLUSTER_NAME}-calico + containerd-logger: enabled + csi-proxy: enabled + windows: enabled + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + additionalTags: + buildProvenance: ${BUILD_PROVENANCE} + creationTimestamp: ${TIMESTAMP} + jobName: ${JOB_NAME} + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + networkSpec: + subnets: + - name: control-plane-subnet + role: control-plane + - name: node-subnet + role: node + vnet: + name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} + resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + allocate-node-cidrs: "false" + cloud-provider: external + cluster-name: ${CLUSTER_NAME} + v: "4" + etcd: + local: + dataDir: /var/lib/etcddisk/etcd + extraArgs: + quota-backend-bytes: "8589934592" + diskSetup: + filesystems: + - device: /dev/disk/azure/scsi1/lun0 + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: etcd_disk + - device: ephemeral0.1 + filesystem: ext4 + label: ephemeral0 + replaceFS: ntfs + partitions: + - device: /dev/disk/azure/scsi1/lun0 + layout: true + overwrite: false + tableType: gpt + files: + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + mounts: + - - LABEL=etcd_disk + - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + dataDisks: + - diskSizeGB: 256 + lun: 0 + nameSuffix: etcddisk + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachinePool +metadata: + name: ${CLUSTER_NAME}-mp-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfig + name: ${CLUSTER_NAME}-mp-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachinePool + name: ${CLUSTER_NAME}-mp-0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachinePool +metadata: + name: ${CLUSTER_NAME}-mp-0 + namespace: default +spec: + location: ${AZURE_LOCATION} + orchestrationMode: Flexible + strategy: + rollingUpdate: + maxSurge: 0% + maxUnavailable: 0 + type: RollingUpdate + template: + osDisk: + diskSizeGB: 30 + managedDisk: + storageAccountType: Premium_LRS + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmExtensions: + - name: CustomScript + protectedSettings: + commandToExecute: | + #!/bin/sh + echo "This script is a no-op used for extension testing purposes ..." + touch test_file + publisher: Microsoft.Azure.Extensions + version: "2.1" + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfig +metadata: + name: ${CLUSTER_NAME}-mp-0 + namespace: default +spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-mp-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID} + clientSecret: + name: ${AZURE_CLUSTER_IDENTITY_SECRET_NAME} + namespace: ${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE} + tenantID: ${AZURE_TENANT_ID} + type: ServicePrincipal +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachinePool +metadata: + name: ${CLUSTER_NAME}-mp-win + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WINDOWS_WORKER_MACHINE_COUNT:-0} + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfig + name: ${CLUSTER_NAME}-mp-win + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachinePool + name: ${CLUSTER_NAME}-mp-win + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachinePool +metadata: + annotations: + runtime: containerd + windowsServerVersion: ${WINDOWS_SERVER_VERSION:=""} + name: ${CLUSTER_NAME}-mp-win + namespace: default +spec: + location: ${AZURE_LOCATION} + orchestrationMode: Flexible + strategy: + rollingUpdate: + maxSurge: 0% + maxUnavailable: 0 + type: RollingUpdate + template: + osDisk: + diskSizeGB: 128 + managedDisk: + storageAccountType: Premium_LRS + osType: Windows + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfig +metadata: + name: ${CLUSTER_NAME}-mp-win + namespace: default +spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-mp-win-azure-json + owner: root:root + path: c:/k/azure.json + permissions: "0644" + - content: Add-MpPreference -ExclusionProcess C:/opt/cni/bin/calico.exe + path: C:/defender-exclude-calico.ps1 + permissions: "0744" + joinConfiguration: + nodeRegistration: + criSocket: npipe:////./pipe/containerd-containerd + kubeletExtraArgs: + azure-container-registry-config: c:/k/azure.json + cloud-provider: external + pod-infra-container-image: mcr.microsoft.com/oss/kubernetes/pause:3.9 + name: '{{ ds.meta_data["local_hostname"] }}' + postKubeadmCommands: + - nssm set kubelet start SERVICE_AUTO_START + - powershell C:/defender-exclude-calico.ps1 + preKubeadmCommands: + - powershell c:/create-external-network.ps1 + users: + - groups: Administrators + name: capi + sshAuthorizedKeys: + - ${AZURE_SSH_PUBLIC_KEY:=""} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: ${CLUSTER_NAME}-calico-windows + namespace: default +spec: + clusterSelector: + matchLabels: + cni-windows: ${CLUSTER_NAME}-calico + resources: + - kind: ConfigMap + name: cni-${CLUSTER_NAME}-calico-windows + strategy: ApplyOnce +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: csi-proxy + namespace: default +spec: + clusterSelector: + matchLabels: + csi-proxy: enabled + resources: + - kind: ConfigMap + name: csi-proxy-addon + strategy: ApplyOnce +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: containerd-logger-${CLUSTER_NAME} + namespace: default +spec: + clusterSelector: + matchLabels: + containerd-logger: enabled + resources: + - kind: ConfigMap + name: containerd-logger-${CLUSTER_NAME} + strategy: ApplyOnce +--- +apiVersion: v1 +data: + proxy: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy-windows + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: kube-proxy-windows + template: + metadata: + labels: + k8s-app: kube-proxy-windows + spec: + serviceAccountName: kube-proxy + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\system" + hostNetwork: true + containers: + - image: sigwindowstools/kube-proxy:${KUBERNETES_VERSION/+/_}-calico-hostprocess + args: ["$env:CONTAINER_SANDBOX_MOUNT_POINT/kube-proxy/start.ps1"] + workingDir: "$env:CONTAINER_SANDBOX_MOUNT_POINT/kube-proxy/" + name: kube-proxy + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: KUBEPROXY_PATH + valueFrom: + configMapKeyRef: + name: windows-kubeproxy-ci + key: KUBEPROXY_PATH + optional: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + nodeSelector: + kubernetes.io/os: windows + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - configMap: + name: kube-proxy + name: kube-proxy + updateStrategy: + type: RollingUpdate + windows-cni: "# strictAffinity required for windows\napiVersion: crd.projectcalico.org/v1\nkind: + IPAMConfig\nmetadata:\n name: default\nspec:\n autoAllocateBlocks: true\n strictAffinity: + true\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-static-rules\n + \ namespace: calico-system\n labels:\n tier: node\n app: calico\ndata:\n + \ static-rules.json: |\n {\n \"Provider\": \"azure\",\n \"Version\": + \"0.1\",\n \"Rules\": [\n {\n \"Name\": \"EndpointPolicy\",\n + \ \"Rule\": {\n \"Id\": \"wireserver\",\n \"Type\": + \"ACL\",\n \"Protocol\": 6,\n \"Action\": \"Block\",\n + \ \"Direction\": \"Out\",\n \"RemoteAddresses\": \"168.63.129.16/32\",\n + \ \"RemotePorts\": \"80\",\n \"Priority\": 200,\n \"RuleType\": + \"Switch\"\n }\n }\n ]\n } \n---\nkind: ConfigMap\napiVersion: + v1\nmetadata:\n name: calico-config-windows\n namespace: calico-system\n labels:\n + \ tier: node\n app: calico\ndata:\n veth_mtu: \"1350\"\n \n cni_network_config: + |\n {\n \"name\": \"Calico\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": + [\n {\n \"windows_use_single_network\": true,\n \"type\": + \"calico\",\n \"mode\": \"vxlan\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n + \ \"nodename_file_optional\": true,\n \"log_file_path\": \"c:/cni.log\",\n + \ \"log_level\": \"debug\",\n\n \"vxlan_mac_prefix\": \"0E-2A\",\n + \ \"vxlan_vni\": 4096,\n \"mtu\": __CNI_MTU__,\n \"policy\": + {\n \"type\": \"k8s\"\n },\n\n \"log_level\": \"info\",\n\n + \ \"capabilities\": {\"dns\": true},\n \"DNS\": {\n \"Search\": + \ [\n \"svc.cluster.local\"\n ]\n },\n\n \"datastore_type\": + \"kubernetes\",\n\n \"kubernetes\": {\n \"kubeconfig\": \"__KUBECONFIG_FILEPATH__\"\n + \ },\n\n \"ipam\": {\n \"type\": \"calico-ipam\",\n + \ \"subnet\": \"usePodCidr\"\n },\n\n \"policies\": + \ [\n {\n \"Name\": \"EndpointPolicy\",\n \"Value\": + \ {\n \"Type\": \"OutBoundNAT\",\n \"ExceptionList\": + \ [\n \"__K8S_SERVICE_CIDR__\"\n ]\n }\n + \ },\n {\n \"Name\": \"EndpointPolicy\",\n + \ \"Value\": {\n \"Type\": \"SDNROUTE\",\n \"DestinationPrefix\": + \ \"__K8S_SERVICE_CIDR__\",\n \"NeedEncap\": true\n }\n + \ }\n ]\n }\n ]\n\n }\n---\napiVersion: apps/v1\nkind: + DaemonSet\nmetadata:\n name: calico-node-windows\n labels:\n tier: node\n + \ app: calico\n namespace: calico-system\nspec:\n selector:\n matchLabels:\n + \ app: calico\n template:\n metadata:\n labels:\n tier: node\n + \ app: calico\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n + \ nodeSelectorTerms:\n - matchExpressions:\n - + key: kubernetes.io/os\n operator: In\n values:\n + \ - windows\n - key: kubernetes.io/arch\n + \ operator: In\n values:\n - + amd64\n securityContext:\n windowsOptions:\n hostProcess: + true\n runAsUserName: \"NT AUTHORITY\\\\system\"\n hostNetwork: + true\n serviceAccountName: calico-node\n tolerations:\n - operator: + Exists\n effect: NoSchedule\n # Mark the pod as a critical add-on + for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n + \ - effect: NoExecute\n operator: Exists\n initContainers:\n # + This container installs the CNI binaries\n # and CNI network config file + on each node.\n - name: install-cni\n image: sigwindowstools/calico-install:v3.25.0-hostprocess\n + \ args: [\"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/install.ps1\"]\n + \ imagePullPolicy: Always\n env:\n # Name of the CNI + config file to create.\n - name: CNI_CONF_NAME\n value: + \"10-calico.conflist\"\n # The CNI network config to install on each + node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n + \ name: calico-config-windows\n key: cni_network_config\n + \ # Set the hostname based on the k8s node name.\n - name: + KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: + spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n + \ valueFrom:\n configMapKeyRef:\n name: + calico-config-windows\n key: veth_mtu\n # Prevents + the container from sleeping forever.\n - name: SLEEP\n value: + \"false\"\n - name: K8S_SERVICE_CIDR\n value: \"10.96.0.0/12\"\n + \ volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: + cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: + cni-net-dir\n - name: kubeadm-config\n mountPath: /etc/kubeadm-config/\n + \ securityContext:\n windowsOptions:\n hostProcess: + true\n runAsUserName: \"NT AUTHORITY\\\\system\"\n containers:\n + \ - name: calico-node-startup\n image: sigwindowstools/calico-node:v3.25.0-hostprocess\n + \ args: [\"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/node-service.ps1\"]\n + \ workingDir: \"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/\"\n imagePullPolicy: + Always\n volumeMounts:\n - name: calico-config-windows\n mountPath: + /etc/kube-calico-windows/\n env:\n - name: POD_NAME\n valueFrom:\n + \ fieldRef:\n apiVersion: v1\n fieldPath: + metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n + \ apiVersion: v1\n fieldPath: metadata.namespace\n - + name: CNI_IPAM_TYPE\n value: \"calico-ipam\"\n - name: CALICO_NETWORKING_BACKEND\n + \ value: \"vxlan\"\n - name: KUBECONFIG\n value: \"C:/etc/cni/net.d/calico-kubeconfig\"\n + \ - name: VXLAN_VNI\n value: \"4096\"\n - name: calico-node-felix\n + \ image: sigwindowstools/calico-node:v3.25.0-hostprocess\n args: + [\"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/felix-service.ps1\"]\n imagePullPolicy: + Always\n workingDir: \"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/\"\n volumeMounts:\n + \ - name: calico-config-windows\n mountPath: /etc/kube-calico-windows/\n + \ - name: calico-static-rules\n mountPath: /calico/static-rules.json\n + \ subPath: static-rules.json\n env:\n - name: POD_NAME\n + \ valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: + metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n + \ apiVersion: v1\n fieldPath: metadata.namespace\n - + name: VXLAN_VNI\n value: \"4096\"\n - name: KUBECONFIG\n value: + \"C:/etc/cni/net.d/calico-kubeconfig\"\n volumes:\n - name: calico-config-windows\n + \ configMap:\n name: calico-config-windows\n - name: calico-static-rules\n + \ configMap:\n name: calico-static-rules\n # Used to install + CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n + \ - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n + \ - name: kubeadm-config\n configMap:\n name: kubeadm-config\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n + \ group: crd.projectcalico.org\n names:\n kind: IPAMConfig\n listKind: + IPAMConfigList\n plural: ipamconfigs\n singular: ipamconfig\n preserveUnknownFields: + false\n scope: Cluster\n versions:\n - name: v1\n schema:\n openAPIV3Schema:\n + \ properties:\n apiVersion:\n description: 'APIVersion + defines the versioned schema of this representation\n of an object. + Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind is a + string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n spec:\n + \ description: IPAMConfigSpec contains the specification for an IPAMConfig\n + \ resource.\n properties:\n autoAllocateBlocks:\n + \ type: boolean\n maxBlocksPerHost:\n description: + MaxBlocksPerHost, if non-zero, is the max number of blocks\n that + can be affine to each host.\n maximum: 2147483647\n minimum: + 0\n type: integer\n strictAffinity:\n type: + boolean\n required:\n - autoAllocateBlocks\n - + strictAffinity\n type: object\n type: object\n served: true\n + \ storage: true\nstatus:\n acceptedNames:\n kind: \"\"\n plural: \"\"\n + \ conditions: []\n storedVersions: []\n" +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: cni-${CLUSTER_NAME}-calico-windows + namespace: default +--- +apiVersion: v1 +data: + csi-proxy: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: csi-proxy + name: csi-proxy + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: csi-proxy + template: + metadata: + labels: + k8s-app: csi-proxy + spec: + nodeSelector: + "kubernetes.io/os": windows + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + containers: + - name: csi-proxy + image: ghcr.io/kubernetes-sigs/sig-windows/csi-proxy:v1.0.2 +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: csi-proxy-addon + namespace: default +--- +apiVersion: v1 +data: + containerd-windows-logger: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: containerd-logger + name: containerd-logger + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: containerd-logger + template: + metadata: + labels: + k8s-app: containerd-logger + spec: + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\system" + hostNetwork: true + containers: + - image: ghcr.io/kubernetes-sigs/sig-windows/eventflow-logger:v0.1.0 + args: [ "config.json" ] + name: containerd-logger + imagePullPolicy: Always + volumeMounts: + - name: containerd-logger-config + mountPath: /config.json + subPath: config.json + nodeSelector: + kubernetes.io/os: windows + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - configMap: + name: containerd-logger-config + name: containerd-logger-config + updateStrategy: + type: RollingUpdate + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: containerd-logger-config + namespace: kube-system + data: + config.json: | + { + "inputs": [ + { + "type": "ETW", + "sessionNamePrefix": "containerd", + "cleanupOldSessions": true, + "reuseExistingSession": true, + "providers": [ + { + "providerName": "Microsoft.Virtualization.RunHCS", + "providerGuid": "0B52781F-B24D-5685-DDF6-69830ED40EC3", + "level": "Verbose" + }, + { + "providerName": "ContainerD", + "providerGuid": "2acb92c0-eb9b-571a-69cf-8f3410f383ad", + "level": "Verbose" + } + ] + } + ], + "filters": [ + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == Stats && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == hcsshim::LayerID && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == hcsshim::NameToGuid && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == containerd.task.v2.Task.Stats && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == containerd.task.v2.Task.State && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == HcsGetProcessProperties && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == HcsGetComputeSystemProperties && hasnoproperty error" + } + ], + "outputs": [ + { + "type": "StdOutput" + } + ], + "schemaVersion": "2016-08-11" + } +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: containerd-logger-${CLUSTER_NAME} + namespace: default diff --git a/templates/cluster-template-machinepool-windows.yaml b/templates/cluster-template-machinepool-windows.yaml index d691f0bdf97..22671e03038 100644 --- a/templates/cluster-template-machinepool-windows.yaml +++ b/templates/cluster-template-machinepool-windows.yaml @@ -4,6 +4,7 @@ metadata: labels: cni-windows: calico csi-proxy: enabled + installCloudProviderAzureChart: "true" windows: enabled name: ${CLUSTER_NAME} namespace: default diff --git a/templates/cluster-template-machinepool.yaml b/templates/cluster-template-machinepool.yaml index b4f9bd97f1e..347d22480ff 100644 --- a/templates/cluster-template-machinepool.yaml +++ b/templates/cluster-template-machinepool.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/cluster-template-nvidia-gpu.yaml b/templates/cluster-template-nvidia-gpu.yaml index bb758313b47..1320d0a3653 100644 --- a/templates/cluster-template-nvidia-gpu.yaml +++ b/templates/cluster-template-nvidia-gpu.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/cluster-template-private.yaml b/templates/cluster-template-private.yaml index 252b03eab1a..9d340b00200 100644 --- a/templates/cluster-template-private.yaml +++ b/templates/cluster-template-private.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/cluster-template-windows.yaml b/templates/cluster-template-windows.yaml index c84d2a1b6af..bd234c01bd9 100644 --- a/templates/cluster-template-windows.yaml +++ b/templates/cluster-template-windows.yaml @@ -4,6 +4,7 @@ metadata: labels: cni-windows: calico csi-proxy: enabled + installCloudProviderAzureChart: "true" windows: enabled name: ${CLUSTER_NAME} namespace: default diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index 270dd7b3758..b4c961cab77 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/flavors/base/cluster-template.yaml b/templates/flavors/base/cluster-template.yaml index 95f645b7153..120f4cc27a9 100644 --- a/templates/flavors/base/cluster-template.yaml +++ b/templates/flavors/base/cluster-template.yaml @@ -3,6 +3,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: name: ${CLUSTER_NAME} + labels: + installCloudProviderAzureChart: "true" spec: clusterNetwork: pods: diff --git a/templates/flavors/ipv6/patches/ipv6.yaml b/templates/flavors/ipv6/patches/ipv6.yaml index ea0eff7286d..1731bfa559a 100644 --- a/templates/flavors/ipv6/patches/ipv6.yaml +++ b/templates/flavors/ipv6/patches/ipv6.yaml @@ -3,6 +3,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: name: ${CLUSTER_NAME} + labels: + cni: "calico-ipv6" spec: clusterNetwork: pods: diff --git a/templates/test/ci/cluster-template-prow-azure-cni-v1.yaml b/templates/test/ci/cluster-template-prow-azure-cni-v1.yaml index b6f02f0153c..ea60dcfbfca 100644 --- a/templates/test/ci/cluster-template-prow-azure-cni-v1.yaml +++ b/templates/test/ci/cluster-template-prow-azure-cni-v1.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/test/ci/cluster-template-prow-ci-version-windows-containerd-2022.yaml b/templates/test/ci/cluster-template-prow-ci-version-windows-containerd-2022.yaml index 98a029b0438..92ddcfafed2 100644 --- a/templates/test/ci/cluster-template-prow-ci-version-windows-containerd-2022.yaml +++ b/templates/test/ci/cluster-template-prow-ci-version-windows-containerd-2022.yaml @@ -5,6 +5,7 @@ metadata: cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled csi-proxy: enabled + enabledCloudProviderAzureChart: true metrics-server: enabled name: ${CLUSTER_NAME} namespace: default diff --git a/templates/test/ci/cluster-template-prow-ci-version.yaml b/templates/test/ci/cluster-template-prow-ci-version.yaml index 94159b73bc9..daa1c476a51 100644 --- a/templates/test/ci/cluster-template-prow-ci-version.yaml +++ b/templates/test/ci/cluster-template-prow-ci-version.yaml @@ -5,6 +5,7 @@ metadata: cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled csi-proxy: enabled + installCloudProviderAzureChart: "true" metrics-server: enabled name: ${CLUSTER_NAME} namespace: default diff --git a/templates/test/ci/cluster-template-prow-custom-vnet.yaml b/templates/test/ci/cluster-template-prow-custom-vnet.yaml index 877f8c4521e..6a2cc05f1c4 100644 --- a/templates/test/ci/cluster-template-prow-custom-vnet.yaml +++ b/templates/test/ci/cluster-template-prow-custom-vnet.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/test/ci/cluster-template-prow-dual-stack.yaml b/templates/test/ci/cluster-template-prow-dual-stack.yaml index cd735e156ed..9c105e52945 100644 --- a/templates/test/ci/cluster-template-prow-dual-stack.yaml +++ b/templates/test/ci/cluster-template-prow-dual-stack.yaml @@ -3,6 +3,7 @@ kind: Cluster metadata: labels: cni: calico-dual-stack + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/test/ci/cluster-template-prow-edgezone.yaml b/templates/test/ci/cluster-template-prow-edgezone.yaml index c15c46bab89..d61aa39c228 100644 --- a/templates/test/ci/cluster-template-prow-edgezone.yaml +++ b/templates/test/ci/cluster-template-prow-edgezone.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/test/ci/cluster-template-prow-flatcar.yaml b/templates/test/ci/cluster-template-prow-flatcar.yaml index 38a0c5ddf31..00fe8060c9f 100644 --- a/templates/test/ci/cluster-template-prow-flatcar.yaml +++ b/templates/test/ci/cluster-template-prow-flatcar.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/test/ci/cluster-template-prow-intree-cloud-provider-machine-pool.yaml b/templates/test/ci/cluster-template-prow-intree-cloud-provider-machine-pool.yaml index 3a1ee843e3b..0c2cf395e36 100644 --- a/templates/test/ci/cluster-template-prow-intree-cloud-provider-machine-pool.yaml +++ b/templates/test/ci/cluster-template-prow-intree-cloud-provider-machine-pool.yaml @@ -5,6 +5,7 @@ metadata: cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled csi-proxy: enabled + installCloudProviderAzureChart: "true" windows: enabled name: ${CLUSTER_NAME} namespace: default diff --git a/templates/test/ci/cluster-template-prow-intree-cloud-provider.yaml b/templates/test/ci/cluster-template-prow-intree-cloud-provider.yaml index f31151ab360..62750cb1423 100644 --- a/templates/test/ci/cluster-template-prow-intree-cloud-provider.yaml +++ b/templates/test/ci/cluster-template-prow-intree-cloud-provider.yaml @@ -5,6 +5,7 @@ metadata: cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled csi-proxy: enabled + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/test/ci/cluster-template-prow-ipv6.yaml b/templates/test/ci/cluster-template-prow-ipv6.yaml index 378c835f9b8..9c2aeedcf43 100644 --- a/templates/test/ci/cluster-template-prow-ipv6.yaml +++ b/templates/test/ci/cluster-template-prow-ipv6.yaml @@ -1,6 +1,9 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + cni: calico-ipv6 + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/test/ci/cluster-template-prow-machine-pool-ci-version.yaml b/templates/test/ci/cluster-template-prow-machine-pool-ci-version.yaml index 7390123c805..32ba39e4b46 100644 --- a/templates/test/ci/cluster-template-prow-machine-pool-ci-version.yaml +++ b/templates/test/ci/cluster-template-prow-machine-pool-ci-version.yaml @@ -5,6 +5,7 @@ metadata: cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled csi-proxy: enabled + installCloudProviderAzureChart: "true" windows: enabled name: ${CLUSTER_NAME} namespace: default diff --git a/templates/test/ci/cluster-template-prow-machine-pool-flex.yaml b/templates/test/ci/cluster-template-prow-machine-pool-flex.yaml index d48450b8c5c..0ab26e08e58 100644 --- a/templates/test/ci/cluster-template-prow-machine-pool-flex.yaml +++ b/templates/test/ci/cluster-template-prow-machine-pool-flex.yaml @@ -5,6 +5,7 @@ metadata: cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled csi-proxy: enabled + installCloudProviderAzureChart: "true" windows: enabled name: ${CLUSTER_NAME} namespace: default diff --git a/templates/test/ci/cluster-template-prow-machine-pool.yaml b/templates/test/ci/cluster-template-prow-machine-pool.yaml index dfa85452fc4..e6d3c4a8c04 100644 --- a/templates/test/ci/cluster-template-prow-machine-pool.yaml +++ b/templates/test/ci/cluster-template-prow-machine-pool.yaml @@ -5,6 +5,7 @@ metadata: cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled csi-proxy: enabled + installCloudProviderAzureChart: "true" windows: enabled name: ${CLUSTER_NAME} namespace: default diff --git a/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml b/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml index a79d306a73c..cc989650567 100644 --- a/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml +++ b/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/test/ci/cluster-template-prow-private.yaml b/templates/test/ci/cluster-template-prow-private.yaml index ad9f070ff2b..8453a12ba28 100644 --- a/templates/test/ci/cluster-template-prow-private.yaml +++ b/templates/test/ci/cluster-template-prow-private.yaml @@ -3,6 +3,7 @@ kind: Cluster metadata: labels: cni: ${CLUSTER_NAME}-calico + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/test/ci/cluster-template-prow-workload-identity.yaml b/templates/test/ci/cluster-template-prow-workload-identity.yaml index fe261dd09fb..296a4be38c7 100644 --- a/templates/test/ci/cluster-template-prow-workload-identity.yaml +++ b/templates/test/ci/cluster-template-prow-workload-identity.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/test/ci/cluster-template-prow.yaml b/templates/test/ci/cluster-template-prow.yaml index bf0b7ed61dd..7ae09d205ca 100644 --- a/templates/test/ci/cluster-template-prow.yaml +++ b/templates/test/ci/cluster-template-prow.yaml @@ -5,6 +5,7 @@ metadata: cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled csi-proxy: enabled + installCloudProviderAzureChart: "true" name: ${CLUSTER_NAME} namespace: default spec: diff --git a/templates/test/dev/cluster-template-custom-builds-machine-pool.yaml b/templates/test/dev/cluster-template-custom-builds-machine-pool.yaml index f02c9865c56..ce5052e1fb2 100644 --- a/templates/test/dev/cluster-template-custom-builds-machine-pool.yaml +++ b/templates/test/dev/cluster-template-custom-builds-machine-pool.yaml @@ -5,6 +5,7 @@ metadata: cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled csi-proxy: enabled + installCloudProviderAzureChart: "true" windows: enabled name: ${CLUSTER_NAME} namespace: default diff --git a/templates/test/dev/cluster-template-custom-builds.yaml b/templates/test/dev/cluster-template-custom-builds.yaml index cc8604c2616..5674bd4af60 100644 --- a/templates/test/dev/cluster-template-custom-builds.yaml +++ b/templates/test/dev/cluster-template-custom-builds.yaml @@ -5,6 +5,7 @@ metadata: cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled csi-proxy: enabled + installCloudProviderAzureChart: "true" metrics-server: enabled name: ${CLUSTER_NAME} namespace: default diff --git a/test/e2e/azure_clusterproxy.go b/test/e2e/azure_clusterproxy.go index b34c08c0695..4617d053dba 100644 --- a/test/e2e/azure_clusterproxy.go +++ b/test/e2e/azure_clusterproxy.go @@ -44,6 +44,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubectl/pkg/describe" "k8s.io/utils/ptr" + addonsv1alpha1 "sigs.k8s.io/cluster-api-addon-provider-helm/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure" @@ -73,6 +74,7 @@ func initScheme() *runtime.Scheme { Expect(infrav1.AddToScheme(scheme)).To(Succeed()) Expect(infrav1exp.AddToScheme(scheme)).To(Succeed()) Expect(expv1.AddToScheme(scheme)).To(Succeed()) + Expect(addonsv1alpha1.AddToScheme(scheme)).To(Succeed()) // Add aadpodidentity v1 to the scheme. aadPodIdentityGroupVersion := schema.GroupVersion{Group: aadpodv1.GroupName, Version: "v1"} scheme.AddKnownTypes(aadPodIdentityGroupVersion, diff --git a/test/e2e/azure_privatecluster.go b/test/e2e/azure_privatecluster.go index 4e034352867..bf72ea74f9b 100644 --- a/test/e2e/azure_privatecluster.go +++ b/test/e2e/azure_privatecluster.go @@ -95,6 +95,7 @@ func AzurePrivateClusterSpec(ctx context.Context, inputGetter func() AzurePrivat ClusterProxy: publicClusterProxy, ClusterctlConfigPath: input.ClusterctlConfigPath, InfrastructureProviders: input.E2EConfig.InfrastructureProviders(), + AddonProviders: input.E2EConfig.AddonProviders(), LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.ClusterName), }, input.E2EConfig.GetIntervals(specName, "wait-controllers")...) diff --git a/test/e2e/azure_selfhosted.go b/test/e2e/azure_selfhosted.go index b1bb9c1ca09..2b06c5e5dfb 100644 --- a/test/e2e/azure_selfhosted.go +++ b/test/e2e/azure_selfhosted.go @@ -134,6 +134,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) ClusterProxy: selfHostedClusterProxy, ClusterctlConfigPath: input.ClusterctlConfigPath, InfrastructureProviders: input.E2EConfig.InfrastructureProviders(), + AddonProviders: input.E2EConfig.AddonProviders(), LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name), }, input.E2EConfig.GetIntervals(specName, "wait-controllers")...) diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index 3a564228220..bc9552c8d39 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -25,16 +25,20 @@ import ( "os" "time" + "github.com/drone/envsubst/v2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/yaml" + addonsv1alpha1 "sigs.k8s.io/cluster-api-addon-provider-helm/api/v1alpha1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/controller-runtime/pkg/client" ) var _ = Describe("Workload cluster creation", func() { @@ -76,6 +80,17 @@ var _ = Describe("Workload cluster creation", func() { namespace, cancelWatches, err = setupSpecNamespace(ctx, clusterNamePrefix, bootstrapClusterProxy, artifactFolder) Expect(err).NotTo(HaveOccurred()) + By("Initialize bootstrap client to install add-ons") + bootstrapClient := bootstrapClusterProxy.GetClient() + Expect(bootstrapClient).NotTo(BeNil()) + + By("Creating cloud-provider-azure HelmChartProxy") + path := cloudProviderAzurePath + if useCIArtifacts { + path = cloudProviderAzureCIPath + } + createHelmChartProxyFromFilePath(bootstrapClient, path, namespace.Name) + result = new(clusterctl.ApplyClusterTemplateAndWaitResult) spClientSecret := os.Getenv(AzureClientSecret) @@ -1056,3 +1071,27 @@ var _ = Describe("Workload cluster creation", func() { }) }) }) + +func createHelmChartProxyFromFilePath(client client.Client, path string, namespace string) { + content, err := os.ReadFile(path) + Expect(err).NotTo(HaveOccurred()) + + result, err := envsubst.EvalEnv(string(content)) + Expect(err).NotTo(HaveOccurred()) + content = []byte(result) + + Logf("HelmChartProxy content:\n %s", string(content)) + + var helmChartProxy addonsv1alpha1.HelmChartProxy + err = yaml.Unmarshal(content, &helmChartProxy) + Expect(err).NotTo(HaveOccurred()) + + helmChartProxy.Namespace = namespace + + Logf("HelmChartProxy object is %+v", helmChartProxy) + + if err := client.Create(context.Background(), &helmChartProxy); err != nil { + Logf("Failed to create HelmChartProxy: %v", err) + Expect(err).NotTo(HaveOccurred()) + } +} diff --git a/test/e2e/capi_test.go b/test/e2e/capi_test.go index 9f0eae27124..1996c392f7f 100644 --- a/test/e2e/capi_test.go +++ b/test/e2e/capi_test.go @@ -241,6 +241,7 @@ var _ = Describe("Running the Cluster API E2E tests", func() { InitWithBootstrapProviders: []string{"kubeadm:v1.0.5"}, InitWithControlPlaneProviders: []string{"kubeadm:v1.0.5"}, InitWithInfrastructureProviders: []string{"azure:v1.0.2"}, + // Note: this probably won't work with CAAPH since the Helm install only works with CAPI/clusterctl v1.5.0. } }) }) diff --git a/test/e2e/cloud-provider-azure.go b/test/e2e/cloud-provider-azure.go index 34848f06817..78b10251d55 100644 --- a/test/e2e/cloud-provider-azure.go +++ b/test/e2e/cloud-provider-azure.go @@ -21,62 +21,17 @@ package e2e import ( "context" - "fmt" - "os" - "strings" . "github.com/onsi/ginkgo/v2" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) const ( - cloudProviderAzureHelmRepoURL = "https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo" - cloudProviderAzureChartName = "cloud-provider-azure" - cloudProviderAzureHelmReleaseName = "cloud-provider-azure-oot" azureDiskCSIDriverHelmRepoURL = "https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts" azureDiskCSIDriverChartName = "azuredisk-csi-driver" azureDiskCSIDriverHelmReleaseName = "azuredisk-csi-driver-oot" ) -// InstallCalicoAndCloudProviderAzureHelmChart installs the official cloud-provider-azure helm chart -// and validates that expected pods exist and are Ready. -func InstallCalicoAndCloudProviderAzureHelmChart(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, cidrBlocks []string, hasWindows bool) { - specName := "cloud-provider-azure-install" - By("Installing cloud-provider-azure components via helm") - options := &HelmOptions{ - Values: []string{ - fmt.Sprintf("infra.clusterName=%s", input.ClusterName), - "cloudControllerManager.logVerbosity=4", - }, - StringValues: []string{fmt.Sprintf("cloudControllerManager.clusterCIDR=%s", strings.Join(cidrBlocks, `\,`))}, - } - // If testing a CI version of Kubernetes, use CCM and CNM images built from source. - if useCIArtifacts || usePRArtifacts { - options.Values = append(options.Values, fmt.Sprintf("cloudControllerManager.imageName=%s", os.Getenv("CCM_IMAGE_NAME"))) - options.Values = append(options.Values, fmt.Sprintf("cloudNodeManager.imageName=%s", os.Getenv("CNM_IMAGE_NAME"))) - options.Values = append(options.Values, fmt.Sprintf("cloudControllerManager.imageRepository=%s", os.Getenv("IMAGE_REGISTRY"))) - options.Values = append(options.Values, fmt.Sprintf("cloudNodeManager.imageRepository=%s", os.Getenv("IMAGE_REGISTRY"))) - options.StringValues = append(options.StringValues, fmt.Sprintf("cloudControllerManager.imageTag=%s", os.Getenv("IMAGE_TAG_CCM"))) - options.StringValues = append(options.StringValues, fmt.Sprintf("cloudNodeManager.imageTag=%s", os.Getenv("IMAGE_TAG_CNM"))) - } - - if strings.Contains(input.ClusterName, "flatcar") { - options.StringValues = append(options.StringValues, "cloudControllerManager.caCertDir=/usr/share/ca-certificates") - } - - clusterProxy := input.ClusterProxy.GetWorkloadCluster(ctx, input.Namespace, input.ClusterName) - InstallHelmChart(ctx, clusterProxy, defaultNamespace, cloudProviderAzureHelmRepoURL, cloudProviderAzureChartName, cloudProviderAzureHelmReleaseName, options, "") - - // We do this before waiting for the pods to be ready because there is a co-dependency between CNI (nodes ready) and cloud-provider being initialized. - InstallCNI(ctx, input, cidrBlocks, hasWindows) - - By("Waiting for Ready cloud-controller-manager deployment pods") - for _, d := range []string{"cloud-controller-manager"} { - waitInput := GetWaitForDeploymentsAvailableInput(ctx, clusterProxy, d, kubesystem, specName) - WaitForDeploymentsAvailable(ctx, waitInput, e2eConfig.GetIntervals(specName, "wait-deployment")...) - } -} - // InstallAzureDiskCSIDriverHelmChart installs the official azure-disk CSI driver helm chart func InstallAzureDiskCSIDriverHelmChart(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, hasWindows bool) { specName := "azuredisk-csi-drivers-install" diff --git a/test/e2e/cni.go b/test/e2e/cni.go index 6c57112e2d7..b5d037a831d 100644 --- a/test/e2e/cni.go +++ b/test/e2e/cni.go @@ -67,7 +67,7 @@ func InstallCNIManifest(ctx context.Context, input clusterctl.ApplyCustomCluster func InstallCalicoHelmChart(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, cidrBlocks []string, hasWindows bool) { specName := "calico-install" - By("Installing Calico CNI via helm") + By("Installing Calico via Helm") values := getCalicoValues(cidrBlocks) clusterProxy := input.ClusterProxy.GetWorkloadCluster(ctx, input.Namespace, input.ClusterName) InstallHelmChart(ctx, clusterProxy, calicoOperatorNamespace, calicoHelmChartRepoURL, calicoHelmChartName, calicoHelmReleaseName, values, os.Getenv(CalicoVersion)) diff --git a/test/e2e/common.go b/test/e2e/common.go index 30674d3e79b..ba4c14db128 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -275,12 +275,8 @@ func EnsureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCu }, input.WaitForControlPlaneIntervals...).Should(Succeed(), "API Server was not reachable in time") _, hasWindows := cluster.Labels["cni-windows"] - if kubeadmControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraArgs["cloud-provider"] != "azure" { - // There is a co-dependency between cloud-provider and CNI so we install both together if cloud-provider is external. - InstallCalicoAndCloudProviderAzureHelmChart(ctx, input, cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, hasWindows) - } else { - InstallCNI(ctx, input, cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, hasWindows) - } + InstallCNI(ctx, input, cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, hasWindows) + controlPlane := discoveryAndWaitForControlPlaneInitialized(ctx, input, result) InstallAzureDiskCSIDriverHelmChart(ctx, input, hasWindows) result.ControlPlane = controlPlane diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index 699749effab..0e163b16dd1 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -9,6 +9,8 @@ images: loadBehavior: tryLoad - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.5.1 loadBehavior: tryLoad + - name: registry.k8s.io/cluster-api-helm/cluster-api-helm-controller:v0.1.0-alpha.9 + loadBehavior: tryLoad providers: - name: cluster-api @@ -144,6 +146,21 @@ providers: - old: "--v=0" new: "--v=2" + - name: helm + type: AddonProvider + versions: + - name: v0.1.0-alpha.9 + value: https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/download/v0.1.0-alpha.9/addon-components.yaml + type: url + contract: v1beta1 + files: + - sourcePath: "../data/shared/v1beta1_addon_provider/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - old: "image: registry.k8s.io/cluster-api-helm/cluster-api-helm-controller:v0.1.0-alpha.9" + new: "image: gcr.io/k8s-staging-cluster-api-helm/cluster-api-helm-controller:v0.1.0-alpha.9" + variables: AKS_KUBERNETES_VERSION: "latest" AKS_KUBERNETES_VERSION_UPGRADE_FROM: "latest-1" diff --git a/test/e2e/data/shared/v1beta1_addon_provider/metadata.yaml b/test/e2e/data/shared/v1beta1_addon_provider/metadata.yaml new file mode 100644 index 00000000000..a087e998608 --- /dev/null +++ b/test/e2e/data/shared/v1beta1_addon_provider/metadata.yaml @@ -0,0 +1,5 @@ +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +releaseSeries: + - major: 0 + minor: 1 + contract: v1beta1 diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 07ab0336f5c..5e121e6f69f 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -42,6 +42,8 @@ import ( func init() { flag.StringVar(&configPath, "e2e.config", "", "path to the e2e config file") + flag.StringVar(&cloudProviderAzurePath, "e2e.cloud-provider-azure", "", "path to the cloud-provider-azure HelmChartProxy") + flag.StringVar(&cloudProviderAzureCIPath, "e2e.cloud-provider-azure-ci", "", "path to the cloud-provider-azure HelmChartProxy with CI artifacts") flag.StringVar(&artifactFolder, "e2e.artifacts-folder", "", "folder where e2e test artifact should be stored") flag.BoolVar(&useCIArtifacts, "kubetest.use-ci-artifacts", false, "use the latest build from the main branch of the Kubernetes repository. Set KUBERNETES_VERSION environment variable to latest-1.xx to use the build from 1.xx release branch.") flag.BoolVar(&usePRArtifacts, "kubetest.use-pr-artifacts", false, "use the build from a PR of the Kubernetes repository") @@ -78,6 +80,8 @@ var _ = SynchronizedBeforeSuite(func() []byte { By("Initializing the bootstrap cluster") initBootstrapCluster(bootstrapClusterProxy, e2eConfig, clusterctlConfigPath, artifactFolder) + // TODO: set this helm stuff up + // encode the e2e config into the byte array. var configBuf bytes.Buffer enc := gob.NewEncoder(&configBuf) @@ -190,6 +194,7 @@ func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config * ClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: clusterctlConfig, InfrastructureProviders: config.InfrastructureProviders(), + AddonProviders: config.AddonProviders(), LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), }, config.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...) } diff --git a/test/e2e/e2e_suite_vars.go b/test/e2e/e2e_suite_vars.go index c39ce2376b1..221ff3e513b 100644 --- a/test/e2e/e2e_suite_vars.go +++ b/test/e2e/e2e_suite_vars.go @@ -76,4 +76,10 @@ var ( // usePRArtifacts specifies whether or not to use the build from a PR of the Kubernetes repository usePRArtifacts bool + + // cloudProviderAzurePath specifies the path to the cloud-provider-azure HelmChartProxy + cloudProviderAzurePath string + + // cloudProviderAzureCIPath specifies the path to the cloud-provider-azure HelmChartProxy with CI artifacts enabled + cloudProviderAzureCIPath string )