From 3a432fc23b9a4e32b462bdd4282a66d516c75faf Mon Sep 17 00:00:00 2001 From: Jon Huhn Date: Thu, 2 Nov 2023 11:22:00 -0500 Subject: [PATCH 01/10] workaround cloud provider chart federated token bug --- scripts/ci-entrypoint.sh | 1 + test/e2e/cloud-provider-azure.go | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/ci-entrypoint.sh b/scripts/ci-entrypoint.sh index 620061c1884..520e875410b 100755 --- a/scripts/ci-entrypoint.sh +++ b/scripts/ci-entrypoint.sh @@ -209,6 +209,7 @@ install_cloud_provider_azure() { --set cloudControllerManager.cloudConfig="${CLOUD_CONFIG}" \ --set cloudControllerManager.cloudConfigSecretName="${CONFIG_SECRET_NAME}" \ --set cloudControllerManager.logVerbosity="${CCM_LOG_VERBOSITY}" \ + --set-string cloudControllerManager.federatedTokenPath= \ --set-string cloudControllerManager.clusterCIDR="${CCM_CLUSTER_CIDR}" "${CCM_IMG_ARGS[@]}" || return 1 } diff --git a/test/e2e/cloud-provider-azure.go b/test/e2e/cloud-provider-azure.go index a976a239d37..af66128865c 100644 --- a/test/e2e/cloud-provider-azure.go +++ b/test/e2e/cloud-provider-azure.go @@ -48,7 +48,10 @@ func InstallCNIAndCloudProviderAzureHelmChart(ctx context.Context, input cluster fmt.Sprintf("infra.clusterName=%s", input.ClusterName), "cloudControllerManager.logVerbosity=4", }, - StringValues: []string{fmt.Sprintf("cloudControllerManager.clusterCIDR=%s", strings.Join(cidrBlocks, `\,`))}, + StringValues: []string{ + fmt.Sprintf("cloudControllerManager.clusterCIDR=%s", strings.Join(cidrBlocks, `\,`)), + "cloudControllerManager.federatedTokenPath=", + }, } // If testing a CI version of Kubernetes, use CCM and CNM images built from source. if useCIArtifacts || usePRArtifacts { From cb54e08e43b9b508bb974f0e4fcdf18ee610e79c Mon Sep 17 00:00:00 2001 From: Jont828 Date: Mon, 23 Oct 2023 17:13:15 -0400 Subject: [PATCH 02/10] Install azure disk CSI driver Helm chart with CAAPH --- Makefile | 2 +- hack/create-dev-cluster.sh | 2 +- .../azuredisk-csi-driver.yaml | 18 + .../cluster-template-prow-azure-cni-v1.yaml | 22 + ...r-template-prow-ci-version-dual-stack.yaml | 21 + ...cluster-template-prow-ci-version-ipv6.yaml | 21 + ...ow-ci-version-windows-containerd-2022.yaml | 1179 ----------------- .../ci/cluster-template-prow-ci-version.yaml | 21 + .../ci/cluster-template-prow-custom-vnet.yaml | 21 + .../ci/cluster-template-prow-dual-stack.yaml | 21 + .../ci/cluster-template-prow-edgezone.yaml | 21 + .../ci/cluster-template-prow-flatcar.yaml | 21 + ...ow-intree-cloud-provider-machine-pool.yaml | 21 + ...r-template-prow-intree-cloud-provider.yaml | 21 + .../test/ci/cluster-template-prow-ipv6.yaml | 21 + ...template-prow-machine-pool-ci-version.yaml | 21 + ...uster-template-prow-machine-pool-flex.yaml | 21 + .../cluster-template-prow-machine-pool.yaml | 21 + .../ci/cluster-template-prow-nvidia-gpu.yaml | 21 + .../ci/cluster-template-prow-private.yaml | 21 + .../ci/cluster-template-prow-topology.yaml | 21 + ...uster-template-prow-workload-identity.yaml | 21 + templates/test/ci/cluster-template-prow.yaml | 21 + .../cluster-label-azuredisk-csi-driver.yaml | 6 + .../ci/prow-azure-cni-v1/kustomization.yaml | 2 + .../ci/prow-custom-vnet/kustomization.yaml | 4 +- .../ci/prow-dual-stack/kustomization.yaml | 2 + .../test/ci/prow-edgezone/kustomization.yaml | 4 +- .../test/ci/prow-flatcar/kustomization.yaml | 2 + .../test/ci/prow-ipv6/kustomization.yaml | 3 + .../ci/prow-machine-pool/kustomization.yaml | 2 + .../ci/prow-nvidia-gpu/kustomization.yaml | 2 + .../test/ci/prow-private/kustomization.yaml | 2 + .../test/ci/prow-topology/kustomization.yaml | 2 + .../prow-workload-identity/kustomization.yaml | 2 + templates/test/ci/prow/kustomization.yaml | 2 + ...r-template-custom-builds-machine-pool.yaml | 21 + .../dev/cluster-template-custom-builds.yaml | 21 + test/e2e/cloud-provider-azure.go | 30 +- test/e2e/common.go | 2 +- test/e2e/config/azure-dev.yaml | 6 +- .../v1beta1/bases/azuredisk-csi-driver.yaml | 18 + .../cluster-template-kcp-remediation.yaml | 20 + .../kustomization.yaml | 2 + .../cluster-template-kcp-scale-in.yaml | 20 + .../kustomization.yaml | 2 + .../cluster-template-machine-pool.yaml | 20 + .../kustomization.yaml | 2 + .../cluster-template-md-remediation.yaml | 20 + .../kustomization.yaml | 2 + .../v1beta1/cluster-template-node-drain.yaml | 20 + .../kustomization.yaml | 2 + .../v1beta1/cluster-template.yaml | 20 + .../cluster-template/kustomization.yaml | 2 + .../cluster-label-azuredisk-csi-driver.yaml | 6 + 55 files changed, 673 insertions(+), 1199 deletions(-) create mode 100644 templates/addons/cluster-api-helm/azuredisk-csi-driver.yaml delete mode 100644 templates/test/ci/cluster-template-prow-ci-version-windows-containerd-2022.yaml create mode 100644 templates/test/ci/patches/cluster-label-azuredisk-csi-driver.yaml create mode 100644 test/e2e/data/infrastructure-azure/v1beta1/bases/azuredisk-csi-driver.yaml create mode 100644 test/e2e/data/infrastructure-azure/v1beta1/patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/Makefile b/Makefile index 2111d4edeef..97c75c3e6f7 100644 --- a/Makefile +++ b/Makefile @@ -304,7 +304,7 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) $(KUBECTL) $(KIND) ## Create curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.3/cluster-api-components.yaml | $(ENVSUBST) | $(KUBECTL) apply -f - # Deploy CAAPH - curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/download/v0.1.0-alpha.9/addon-components.yaml | $(ENVSUBST) | $(KUBECTL) apply -f - + curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/download/v0.1.0-alpha.10/addon-components.yaml | $(ENVSUBST) | $(KUBECTL) apply -f - # Deploy CAPZ $(KIND) load docker-image $(CONTROLLER_IMG)-$(ARCH):$(TAG) --name=$(KIND_CLUSTER_NAME) diff --git a/hack/create-dev-cluster.sh b/hack/create-dev-cluster.sh index eb52a266bc8..328c515e90c 100755 --- a/hack/create-dev-cluster.sh +++ b/hack/create-dev-cluster.sh @@ -48,7 +48,7 @@ export CONTROL_PLANE_MACHINE_COUNT=${CONTROL_PLANE_MACHINE_COUNT:-3} export AZURE_CONTROL_PLANE_MACHINE_TYPE="${CONTROL_PLANE_MACHINE_TYPE:-Standard_B2s}" export AZURE_NODE_MACHINE_TYPE="${NODE_MACHINE_TYPE:-Standard_B2s}" export WORKER_MACHINE_COUNT=${WORKER_MACHINE_COUNT:-2} -export KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.25.6}" +export KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.26.6}" export CLUSTER_TEMPLATE="${CLUSTER_TEMPLATE:-cluster-template.yaml}" # identity secret settings. diff --git a/templates/addons/cluster-api-helm/azuredisk-csi-driver.yaml b/templates/addons/cluster-api-helm/azuredisk-csi-driver.yaml new file mode 100644 index 00000000000..07afde0faf6 --- /dev/null +++ b/templates/addons/cluster-api-helm/azuredisk-csi-driver.yaml @@ -0,0 +1,18 @@ +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart +spec: + clusterSelector: + matchLabels: + azuredisk-csi: "true" + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + chartName: azuredisk-csi-driver + releaseName: azuredisk-csi-driver-oot + namespace: kube-system + valuesTemplate: | + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} \ No newline at end of file diff --git a/templates/test/ci/cluster-template-prow-azure-cni-v1.yaml b/templates/test/ci/cluster-template-prow-azure-cni-v1.yaml index b6f02f0153c..a6b97a786c5 100644 --- a/templates/test/ci/cluster-template-prow-azure-cni-v1.yaml +++ b/templates/test/ci/cluster-template-prow-azure-cni-v1.yaml @@ -1,6 +1,8 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: + labels: + azuredisk-csi: "true" name: ${CLUSTER_NAME} namespace: default spec: @@ -220,3 +222,23 @@ spec: namespace: ${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE} tenantID: ${AZURE_TENANT_ID} type: ServicePrincipal +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} diff --git a/templates/test/ci/cluster-template-prow-ci-version-dual-stack.yaml b/templates/test/ci/cluster-template-prow-ci-version-dual-stack.yaml index 16f291632b1..05b6f26632b 100644 --- a/templates/test/ci/cluster-template-prow-ci-version-dual-stack.yaml +++ b/templates/test/ci/cluster-template-prow-ci-version-dual-stack.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico-dual-stack cni-windows: ${CLUSTER_NAME}-calico containerd-logger: disabled @@ -452,6 +453,26 @@ spec: name: containerd-logger-${CLUSTER_NAME} strategy: ApplyOnce --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: csi-proxy: | diff --git a/templates/test/ci/cluster-template-prow-ci-version-ipv6.yaml b/templates/test/ci/cluster-template-prow-ci-version-ipv6.yaml index 244cab5cd89..13a3934bf05 100644 --- a/templates/test/ci/cluster-template-prow-ci-version-ipv6.yaml +++ b/templates/test/ci/cluster-template-prow-ci-version-ipv6.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico-ipv6 cni-windows: ${CLUSTER_NAME}-calico containerd-logger: disabled @@ -470,6 +471,26 @@ spec: name: containerd-logger-${CLUSTER_NAME} strategy: ApplyOnce --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: csi-proxy: | diff --git a/templates/test/ci/cluster-template-prow-ci-version-windows-containerd-2022.yaml b/templates/test/ci/cluster-template-prow-ci-version-windows-containerd-2022.yaml deleted file mode 100644 index d2698ef88b1..00000000000 --- a/templates/test/ci/cluster-template-prow-ci-version-windows-containerd-2022.yaml +++ /dev/null @@ -1,1179 +0,0 @@ -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - labels: - cni-windows: ${CLUSTER_NAME}-calico - containerd-logger: enabled - csi-proxy: enabled - metrics-server: enabled - name: ${CLUSTER_NAME} - namespace: default -spec: - clusterNetwork: - pods: - cidrBlocks: - - 192.168.0.0/16 - controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 - kind: KubeadmControlPlane - name: ${CLUSTER_NAME}-control-plane - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: AzureCluster - name: ${CLUSTER_NAME} ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureCluster -metadata: - name: ${CLUSTER_NAME} - namespace: default -spec: - additionalTags: - buildProvenance: ${BUILD_PROVENANCE} - creationTimestamp: ${TIMESTAMP} - jobName: ${JOB_NAME} - identityRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: AzureClusterIdentity - name: ${CLUSTER_IDENTITY_NAME} - location: ${AZURE_LOCATION} - networkSpec: - subnets: - - name: control-plane-subnet - role: control-plane - - name: node-subnet - natGateway: - name: node-natgateway - role: node - vnet: - name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} - resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} - subscriptionID: ${AZURE_SUBSCRIPTION_ID} ---- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 -kind: KubeadmControlPlane -metadata: - name: ${CLUSTER_NAME}-control-plane - namespace: default -spec: - kubeadmConfigSpec: - clusterConfiguration: - apiServer: - extraArgs: - cloud-provider: external - feature-gates: ${K8S_FEATURE_GATES:-""} - timeoutForControlPlane: 20m - controllerManager: - extraArgs: - allocate-node-cidrs: "false" - cloud-provider: external - cluster-name: ${CLUSTER_NAME} - feature-gates: HPAContainerMetrics=true - v: "4" - etcd: - local: - dataDir: /var/lib/etcddisk/etcd - extraArgs: - quota-backend-bytes: "8589934592" - kubernetesVersion: ci/${CI_VERSION} - diskSetup: - filesystems: - - device: /dev/disk/azure/scsi1/lun0 - extraOpts: - - -E - - lazy_itable_init=1,lazy_journal_init=1 - filesystem: ext4 - label: etcd_disk - - device: ephemeral0.1 - filesystem: ext4 - label: ephemeral0 - replaceFS: ntfs - partitions: - - device: /dev/disk/azure/scsi1/lun0 - layout: true - overwrite: false - tableType: gpt - files: - - contentFrom: - secret: - key: control-plane-azure.json - name: ${CLUSTER_NAME}-control-plane-azure-json - owner: root:root - path: /etc/kubernetes/azure.json - permissions: "0644" - - content: | - #!/bin/bash - - set -o nounset - set -o pipefail - set -o errexit - [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" - - # This test installs release packages or binaries that are a result of the CI and release builds. - # It runs '... --version' commands to verify that the binaries are correctly installed - # and finally uninstalls the packages. - # For the release packages it tests all versions in the support skew. - LINE_SEPARATOR="*************************************************" - echo "$$LINE_SEPARATOR" - CI_VERSION=${CI_VERSION} - if [[ "$${CI_VERSION}" != "" ]]; then - CI_DIR=/tmp/k8s-ci - mkdir -p $$CI_DIR - declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") - declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") - CONTAINER_EXT="tar" - echo "* testing CI version $$CI_VERSION" - # Check for semver - if [[ "$${CI_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - VERSION_WITHOUT_PREFIX="${CI_VERSION#v}" - DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl - curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg - echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list - apt-get update - # replace . with \. - VERSION_REGEX="${VERSION_WITHOUT_PREFIX//./\\.}" - PACKAGE_VERSION="$(apt-cache madison kubelet|grep $${VERSION_REGEX}- | head -n1 | cut -d '|' -f 2 | tr -d '[:space:]')" - for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do - echo "* installing package: $$CI_PACKAGE $${PACKAGE_VERSION}" - DEBIAN_FRONTEND=noninteractive apt-get install -y $$CI_PACKAGE=$$PACKAGE_VERSION - done - else - CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" - for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do - echo "* downloading binary: $$CI_URL/$$CI_PACKAGE" - wget --inet4-only "$$CI_URL/$$CI_PACKAGE" -nv -O "$$CI_DIR/$$CI_PACKAGE" - chmod +x "$$CI_DIR/$$CI_PACKAGE" - mv "$$CI_DIR/$$CI_PACKAGE" "/usr/bin/$$CI_PACKAGE" - done - IMAGE_REGISTRY_PREFIX=registry.k8s.io - for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do - echo "* downloading package: $$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" - wget --inet4-only "$$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" -nv -O "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" - $${SUDO} ctr -n k8s.io images import "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" || echo "* ignoring expected 'ctr images import' result" - $${SUDO} ctr -n k8s.io images tag $$IMAGE_REGISTRY_PREFIX/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" $$IMAGE_REGISTRY_PREFIX/$$CI_CONTAINER:"$${CI_VERSION//+/_}" - $${SUDO} ctr -n k8s.io images tag $$IMAGE_REGISTRY_PREFIX/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" gcr.io/k8s-staging-ci-images/$$CI_CONTAINER:"$${CI_VERSION//+/_}" - done - fi - systemctl restart kubelet - fi - echo "* checking binary versions" - echo "ctr version: " $(ctr version) - echo "kubeadm version: " $(kubeadm version -o=short) - echo "kubectl version: " $(kubectl version --client=true) - echo "kubelet version: " $(kubelet --version) - echo "$$LINE_SEPARATOR" - owner: root:root - path: /tmp/kubeadm-bootstrap.sh - permissions: "0744" - initConfiguration: - nodeRegistration: - kubeletExtraArgs: - azure-container-registry-config: /etc/kubernetes/azure.json - cloud-provider: external - name: '{{ ds.meta_data["local_hostname"] }}' - joinConfiguration: - nodeRegistration: - kubeletExtraArgs: - azure-container-registry-config: /etc/kubernetes/azure.json - cloud-provider: external - name: '{{ ds.meta_data["local_hostname"] }}' - mounts: - - - LABEL=etcd_disk - - /var/lib/etcddisk - postKubeadmCommands: [] - preKubeadmCommands: - - bash -c /tmp/kubeadm-bootstrap.sh - verbosity: 5 - machineTemplate: - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: AzureMachineTemplate - name: ${CLUSTER_NAME}-control-plane - replicas: ${CONTROL_PLANE_MACHINE_COUNT} - version: ${KUBERNETES_VERSION} ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureMachineTemplate -metadata: - name: ${CLUSTER_NAME}-control-plane - namespace: default -spec: - template: - spec: - dataDisks: - - diskSizeGB: 256 - lun: 0 - nameSuffix: etcddisk - image: - marketplace: - offer: capi - publisher: cncf-upstream - sku: ubuntu-1804-gen1 - version: latest - osDisk: - diskSizeGB: 128 - osType: Linux - sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} - vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineDeployment -metadata: - name: ${CLUSTER_NAME}-md-0 - namespace: default -spec: - clusterName: ${CLUSTER_NAME} - replicas: ${WORKER_MACHINE_COUNT} - selector: {} - template: - metadata: - labels: - nodepool: pool1 - spec: - bootstrap: - configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 - kind: KubeadmConfigTemplate - name: ${CLUSTER_NAME}-md-0 - clusterName: ${CLUSTER_NAME} - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: AzureMachineTemplate - name: ${CLUSTER_NAME}-md-0 - version: ${KUBERNETES_VERSION} ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureMachineTemplate -metadata: - name: ${CLUSTER_NAME}-md-0 - namespace: default -spec: - template: - spec: - image: - marketplace: - offer: capi - publisher: cncf-upstream - sku: ubuntu-1804-gen1 - version: latest - osDisk: - diskSizeGB: 128 - osType: Linux - sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} - vmExtensions: - - name: CustomScript - protectedSettings: - commandToExecute: | - #!/bin/sh - echo "This script is a no-op used for extension testing purposes ..." - touch test_file - publisher: Microsoft.Azure.Extensions - version: "2.1" - vmSize: ${AZURE_NODE_MACHINE_TYPE} ---- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 -kind: KubeadmConfigTemplate -metadata: - name: ${CLUSTER_NAME}-md-0 - namespace: default -spec: - template: - spec: - files: - - contentFrom: - secret: - key: worker-node-azure.json - name: ${CLUSTER_NAME}-md-0-azure-json - owner: root:root - path: /etc/kubernetes/azure.json - permissions: "0644" - - content: | - #!/bin/bash - - set -o nounset - set -o pipefail - set -o errexit - [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" - - # This test installs release packages or binaries that are a result of the CI and release builds. - # It runs '... --version' commands to verify that the binaries are correctly installed - # and finally uninstalls the packages. - # For the release packages it tests all versions in the support skew. - LINE_SEPARATOR="*************************************************" - echo "$$LINE_SEPARATOR" - CI_VERSION=${CI_VERSION} - if [[ "$${CI_VERSION}" != "" ]]; then - CI_DIR=/tmp/k8s-ci - mkdir -p $$CI_DIR - declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") - declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") - CONTAINER_EXT="tar" - echo "* testing CI version $$CI_VERSION" - # Check for semver - if [[ "$${CI_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - VERSION_WITHOUT_PREFIX="${CI_VERSION#v}" - DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl - curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg - echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list - apt-get update - # replace . with \. - VERSION_REGEX="${VERSION_WITHOUT_PREFIX//./\\.}" - PACKAGE_VERSION="$(apt-cache madison kubelet|grep $${VERSION_REGEX}- | head -n1 | cut -d '|' -f 2 | tr -d '[:space:]')" - for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do - echo "* installing package: $$CI_PACKAGE $${PACKAGE_VERSION}" - DEBIAN_FRONTEND=noninteractive apt-get install -y $$CI_PACKAGE=$$PACKAGE_VERSION - done - else - CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" - for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do - echo "* downloading binary: $$CI_URL/$$CI_PACKAGE" - wget --inet4-only "$$CI_URL/$$CI_PACKAGE" -nv -O "$$CI_DIR/$$CI_PACKAGE" - chmod +x "$$CI_DIR/$$CI_PACKAGE" - mv "$$CI_DIR/$$CI_PACKAGE" "/usr/bin/$$CI_PACKAGE" - done - IMAGE_REGISTRY_PREFIX=registry.k8s.io - for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do - echo "* downloading package: $$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" - wget --inet4-only "$$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" -nv -O "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" - $${SUDO} ctr -n k8s.io images import "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" || echo "* ignoring expected 'ctr images import' result" - $${SUDO} ctr -n k8s.io images tag $$IMAGE_REGISTRY_PREFIX/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" $$IMAGE_REGISTRY_PREFIX/$$CI_CONTAINER:"$${CI_VERSION//+/_}" - $${SUDO} ctr -n k8s.io images tag $$IMAGE_REGISTRY_PREFIX/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" gcr.io/k8s-staging-ci-images/$$CI_CONTAINER:"$${CI_VERSION//+/_}" - done - fi - systemctl restart kubelet - fi - echo "* checking binary versions" - echo "ctr version: " $(ctr version) - echo "kubeadm version: " $(kubeadm version -o=short) - echo "kubectl version: " $(kubectl version --client=true) - echo "kubelet version: " $(kubelet --version) - echo "$$LINE_SEPARATOR" - owner: root:root - path: /tmp/kubeadm-bootstrap.sh - permissions: "0744" - joinConfiguration: - nodeRegistration: - kubeletExtraArgs: - azure-container-registry-config: /etc/kubernetes/azure.json - cloud-provider: external - name: '{{ ds.meta_data["local_hostname"] }}' - preKubeadmCommands: - - bash -c /tmp/kubeadm-bootstrap.sh - verbosity: 5 ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineDeployment -metadata: - name: ${CLUSTER_NAME}-md-win - namespace: default -spec: - clusterName: ${CLUSTER_NAME} - replicas: ${WINDOWS_WORKER_MACHINE_COUNT:-0} - selector: {} - template: - spec: - bootstrap: - configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 - kind: KubeadmConfigTemplate - name: ${CLUSTER_NAME}-md-win - clusterName: ${CLUSTER_NAME} - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: AzureMachineTemplate - name: ${CLUSTER_NAME}-md-win - version: ${KUBERNETES_VERSION} ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureMachineTemplate -metadata: - annotations: - runtime: containerd - name: ${CLUSTER_NAME}-md-win - namespace: default -spec: - template: - metadata: - annotations: - runtime: containerd - windowsServerVersion: ${WINDOWS_SERVER_VERSION:=""} - spec: - image: - marketplace: - offer: capi-windows - publisher: cncf-upstream - sku: windows-2022-containerd-gen1 - version: latest - osDisk: - diskSizeGB: 128 - managedDisk: - storageAccountType: Premium_LRS - osType: Windows - sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} - vmSize: ${AZURE_NODE_MACHINE_TYPE} ---- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 -kind: KubeadmConfigTemplate -metadata: - name: ${CLUSTER_NAME}-md-win - namespace: default -spec: - template: - spec: - files: - - contentFrom: - secret: - key: worker-node-azure.json - name: ${CLUSTER_NAME}-md-win-azure-json - owner: root:root - path: c:/k/azure.json - permissions: "0644" - - content: |- - Add-MpPreference -ExclusionProcess C:/opt/cni/bin/calico.exe - Add-MpPreference -ExclusionProcess C:/opt/cni/bin/calico-ipam.exe - path: C:/defender-exclude-calico.ps1 - permissions: "0744" - - content: | - # /tmp is assumed created and required for upstream e2e tests to pass - New-Item -ItemType Directory -Force -Path C:\tmp\ - path: C:/create-temp-folder.ps1 - permissions: "0744" - - content: | - $ErrorActionPreference = 'Stop' - - $$CONTAINERD_URL="${WINDOWS_CONTAINERD_URL}" - if($$CONTAINERD_URL -ne ""){ - # Kubelet service depends on contianerd service so make a best effort attempt to stop it - Stop-Service kubelet -Force -ErrorAction SilentlyContinue - Stop-Service containerd -Force - echo "downloading containerd: $$CONTAINERD_URL" - curl.exe --retry 10 --retry-delay 5 -L "$$CONTAINERD_URL" --output "c:/k/containerd.tar.gz" - tar.exe -zxvf c:/k/containerd.tar.gz -C "c:/Program Files/containerd" --strip-components 1 - - Start-Service containerd - } - - containerd.exe --version - containerd-shim-runhcs-v1.exe --version - path: C:/replace-containerd.ps1 - permissions: "0744" - - content: | - mkdir -Force c:/localdumps - reg.exe add "HKLM\Software\Microsoft\Windows\Windows Error Reporting\LocalDumps" /V DumpCount /t REG_DWORD /d 50 /f - reg.exe add "HKLM\Software\Microsoft\Windows\Windows Error Reporting\LocalDumps" /V DumpType /t REG_DWORD /d 2 /f - reg.exe add "HKLM\Software\Microsoft\Windows\Windows Error Reporting\LocalDumps" /V DumpFolder /t REG_EXPAND_SZ /d "c:/LocalDumps" /f - # Enable sftp so we can copy crash dump files during log collection of stfp - $sshd_config = "$env:ProgramData\ssh\sshd_config" - if (-not (Test-Path $sshd_config)) { mkdir -Force $sshd_config } - Add-Content -Path $sshd_config "Subsystem sftp sftp-server.exe" - sc.exe stop sshd - sc.exe start sshd - path: C:/collect-hns-crashes.ps1 - permissions: "0744" - - content: | - $ErrorActionPreference = 'Stop' - - Stop-Service kubelet -Force - - $$CI_VERSION="${CI_VERSION}" - if($$CI_VERSION -ne "") - { - $$binaries=@("kubeadm", "kubectl", "kubelet", "kube-proxy") - $$ci_url="https://storage.googleapis.com/k8s-release-dev/ci/$$CI_VERSION/bin/windows/amd64" - foreach ( $$binary in $$binaries ) - { - echo "downloading binary: $$ci_url/$$binary.exe" - curl.exe --retry 10 --retry-delay 5 "$$ci_url/$$binary.exe" --output "c:/k/$$binary.exe" - } - } - - # Tag it to the ci version. The image knows how to use the copy locally with the configmap - # that is applied at at this stage (windows-kubeproxy-ci.yaml) - ctr.exe -n k8s.io images pull docker.io/sigwindowstools/kube-proxy:v1.23.1-calico-hostprocess - ctr.exe -n k8s.io images tag docker.io/sigwindowstools/kube-proxy:v1.23.1-calico-hostprocess "docker.io/sigwindowstools/kube-proxy:${CI_VERSION/+/_}-calico-hostprocess" - - kubeadm.exe version -o=short - kubectl.exe version --client=true - kubelet.exe --version - kube-proxy.exe --version - path: C:/replace-ci-binaries.ps1 - permissions: "0744" - joinConfiguration: - nodeRegistration: - criSocket: npipe:////./pipe/containerd-containerd - kubeletExtraArgs: - azure-container-registry-config: c:/k/azure.json - cloud-provider: external - feature-gates: ${NODE_FEATURE_GATES:-""} - v: "2" - windows-priorityclass: ABOVE_NORMAL_PRIORITY_CLASS - name: '{{ ds.meta_data["local_hostname"] }}' - postKubeadmCommands: - - nssm set kubelet start SERVICE_AUTO_START - - powershell C:/defender-exclude-calico.ps1 - preKubeadmCommands: - - powershell C:/create-temp-folder.ps1 - - powershell C:/replace-containerd.ps1 - - powershell C:/collect-hns-crashes.ps1 - - powershell C:/replace-ci-binaries.ps1 - users: - - groups: Administrators - name: capi - sshAuthorizedKeys: - - ${AZURE_SSH_PUBLIC_KEY:=""} ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineHealthCheck -metadata: - name: ${CLUSTER_NAME}-mhc-0 - namespace: default -spec: - clusterName: ${CLUSTER_NAME} - maxUnhealthy: 100% - selector: - matchLabels: - nodepool: pool1 - unhealthyConditions: - - status: "True" - timeout: 30s - type: E2ENodeUnhealthy ---- -apiVersion: addons.cluster.x-k8s.io/v1beta1 -kind: ClusterResourceSet -metadata: - name: ${CLUSTER_NAME}-calico-windows - namespace: default -spec: - clusterSelector: - matchLabels: - cni-windows: ${CLUSTER_NAME}-calico - resources: - - kind: ConfigMap - name: cni-${CLUSTER_NAME}-calico-windows - strategy: ApplyOnce ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureClusterIdentity -metadata: - labels: - clusterctl.cluster.x-k8s.io/move-hierarchy: "true" - name: ${CLUSTER_IDENTITY_NAME} - namespace: default -spec: - allowedNamespaces: {} - clientID: ${AZURE_CLIENT_ID} - clientSecret: - name: ${AZURE_CLUSTER_IDENTITY_SECRET_NAME} - namespace: ${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE} - tenantID: ${AZURE_TENANT_ID} - type: ServicePrincipal ---- -apiVersion: addons.cluster.x-k8s.io/v1beta1 -kind: ClusterResourceSet -metadata: - name: csi-proxy - namespace: default -spec: - clusterSelector: - matchLabels: - csi-proxy: enabled - resources: - - kind: ConfigMap - name: csi-proxy-addon - strategy: ApplyOnce ---- -apiVersion: addons.cluster.x-k8s.io/v1beta1 -kind: ClusterResourceSet -metadata: - name: containerd-logger-${CLUSTER_NAME} - namespace: default -spec: - clusterSelector: - matchLabels: - containerd-logger: enabled - resources: - - kind: ConfigMap - name: containerd-logger-${CLUSTER_NAME} - strategy: ApplyOnce ---- -apiVersion: v1 -data: - kube-proxy-patch: |- - apiVersion: v1 - kind: ConfigMap - metadata: - name: windows-kubeproxy-ci - namespace: kube-system - data: - KUBEPROXY_PATH: "c:/k/kube-proxy.exe" - proxy: | - apiVersion: apps/v1 - kind: DaemonSet - metadata: - labels: - k8s-app: kube-proxy - name: kube-proxy-windows - namespace: kube-system - spec: - selector: - matchLabels: - k8s-app: kube-proxy-windows - template: - metadata: - labels: - k8s-app: kube-proxy-windows - spec: - serviceAccountName: kube-proxy - securityContext: - windowsOptions: - hostProcess: true - runAsUserName: "NT AUTHORITY\\system" - hostNetwork: true - containers: - - image: sigwindowstools/kube-proxy:${KUBERNETES_VERSION/+/_}-calico-hostprocess - args: ["$env:CONTAINER_SANDBOX_MOUNT_POINT/kube-proxy/start.ps1"] - workingDir: "$env:CONTAINER_SANDBOX_MOUNT_POINT/kube-proxy/" - name: kube-proxy - env: - - name: NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: KUBEPROXY_PATH - valueFrom: - configMapKeyRef: - name: windows-kubeproxy-ci - key: KUBEPROXY_PATH - optional: true - volumeMounts: - - mountPath: /var/lib/kube-proxy - name: kube-proxy - nodeSelector: - kubernetes.io/os: windows - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - operator: Exists - volumes: - - configMap: - name: kube-proxy - name: kube-proxy - updateStrategy: - type: RollingUpdate - windows-cni: "# strictAffinity required for windows\napiVersion: crd.projectcalico.org/v1\nkind: - IPAMConfig\nmetadata:\n name: default\nspec:\n autoAllocateBlocks: true\n strictAffinity: - true\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-static-rules\n - \ namespace: calico-system\n labels:\n tier: node\n app: calico\ndata:\n - \ static-rules.json: |\n {\n \"Provider\": \"azure\",\n \"Version\": - \"0.1\",\n \"Rules\": [\n {\n \"Name\": \"EndpointPolicy\",\n - \ \"Rule\": {\n \"Id\": \"wireserver\",\n \"Type\": - \"ACL\",\n \"Protocol\": 6,\n \"Action\": \"Block\",\n - \ \"Direction\": \"Out\",\n \"RemoteAddresses\": \"168.63.129.16/32\",\n - \ \"RemotePorts\": \"80\",\n \"Priority\": 200,\n \"RuleType\": - \"Switch\"\n }\n }\n ]\n } \n---\nkind: ConfigMap\napiVersion: - v1\nmetadata:\n name: calico-config-windows\n namespace: calico-system\n labels:\n - \ tier: node\n app: calico\ndata:\n veth_mtu: \"1350\"\n \n cni_network_config: - |\n {\n \"name\": \"Calico\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": - [\n {\n \"windows_use_single_network\": true,\n \"type\": - \"calico\",\n \"mode\": \"vxlan\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n - \ \"nodename_file_optional\": true,\n \"log_file_path\": \"c:/cni.log\",\n - \ \"log_level\": \"debug\",\n\n \"vxlan_mac_prefix\": \"0E-2A\",\n - \ \"vxlan_vni\": 4096,\n \"mtu\": __CNI_MTU__,\n \"policy\": - {\n \"type\": \"k8s\"\n },\n\n \"log_level\": \"info\",\n\n - \ \"capabilities\": {\"dns\": true},\n \"DNS\": {\n \"Search\": - \ [\n \"svc.cluster.local\"\n ]\n },\n\n \"datastore_type\": - \"kubernetes\",\n\n \"kubernetes\": {\n \"kubeconfig\": \"__KUBECONFIG_FILEPATH__\"\n - \ },\n\n \"ipam\": {\n \"type\": \"calico-ipam\",\n - \ \"subnet\": \"usePodCidr\"\n },\n\n \"policies\": - \ [\n {\n \"Name\": \"EndpointPolicy\",\n \"Value\": - \ {\n \"Type\": \"OutBoundNAT\",\n \"ExceptionList\": - \ [\n \"__K8S_SERVICE_CIDR__\"\n ]\n }\n - \ },\n {\n \"Name\": \"EndpointPolicy\",\n - \ \"Value\": {\n \"Type\": \"SDNROUTE\",\n \"DestinationPrefix\": - \ \"__K8S_SERVICE_CIDR__\",\n \"NeedEncap\": true\n }\n - \ }\n ]\n }\n ]\n\n }\n---\napiVersion: apps/v1\nkind: - DaemonSet\nmetadata:\n name: calico-node-windows\n labels:\n tier: node\n - \ app: calico\n namespace: calico-system\nspec:\n selector:\n matchLabels:\n - \ app: calico\n template:\n metadata:\n labels:\n tier: node\n - \ app: calico\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - \ nodeSelectorTerms:\n - matchExpressions:\n - - key: kubernetes.io/os\n operator: In\n values:\n - \ - windows\n - key: kubernetes.io/arch\n - \ operator: In\n values:\n - - amd64\n securityContext:\n windowsOptions:\n hostProcess: - true\n runAsUserName: \"NT AUTHORITY\\\\system\"\n hostNetwork: - true\n serviceAccountName: calico-node\n tolerations:\n - operator: - Exists\n effect: NoSchedule\n # Mark the pod as a critical add-on - for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - \ - effect: NoExecute\n operator: Exists\n initContainers:\n # - This container installs the CNI binaries\n # and CNI network config file - on each node.\n - name: install-cni\n image: sigwindowstools/calico-install:v3.26.1-hostprocess\n - \ args: [\"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/install.ps1\"]\n - \ imagePullPolicy: Always\n env:\n # Name of the CNI - config file to create.\n - name: CNI_CONF_NAME\n value: - \"10-calico.conflist\"\n # The CNI network config to install on each - node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n - \ name: calico-config-windows\n key: cni_network_config\n - \ # Set the hostname based on the k8s node name.\n - name: - KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: - spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n - \ valueFrom:\n configMapKeyRef:\n name: - calico-config-windows\n key: veth_mtu\n # Prevents - the container from sleeping forever.\n - name: SLEEP\n value: - \"false\"\n - name: K8S_SERVICE_CIDR\n value: \"10.96.0.0/12\"\n - \ volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: - cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: - cni-net-dir\n - name: kubeadm-config\n mountPath: /etc/kubeadm-config/\n - \ securityContext:\n windowsOptions:\n hostProcess: - true\n runAsUserName: \"NT AUTHORITY\\\\system\"\n containers:\n - \ - name: calico-node-startup\n image: sigwindowstools/calico-node:v3.26.1-hostprocess\n - \ args: [\"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/node-service.ps1\"]\n - \ workingDir: \"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/\"\n imagePullPolicy: - Always\n volumeMounts:\n - name: calico-config-windows\n mountPath: - /etc/kube-calico-windows/\n env:\n - name: POD_NAME\n valueFrom:\n - \ fieldRef:\n apiVersion: v1\n fieldPath: - metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n - \ apiVersion: v1\n fieldPath: metadata.namespace\n - - name: CNI_IPAM_TYPE\n value: \"calico-ipam\"\n - name: CALICO_NETWORKING_BACKEND\n - \ value: \"vxlan\"\n - name: KUBECONFIG\n value: \"C:/etc/cni/net.d/calico-kubeconfig\"\n - \ - name: VXLAN_VNI\n value: \"4096\"\n - name: calico-node-felix\n - \ image: sigwindowstools/calico-node:v3.26.1-hostprocess\n args: - [\"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/felix-service.ps1\"]\n imagePullPolicy: - Always\n workingDir: \"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/\"\n volumeMounts:\n - \ - name: calico-config-windows\n mountPath: /etc/kube-calico-windows/\n - \ - name: calico-static-rules\n mountPath: /calico/static-rules.json\n - \ subPath: static-rules.json\n env:\n - name: POD_NAME\n - \ valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: - metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n - \ apiVersion: v1\n fieldPath: metadata.namespace\n - - name: VXLAN_VNI\n value: \"4096\"\n - name: KUBECONFIG\n value: - \"C:/etc/cni/net.d/calico-kubeconfig\"\n volumes:\n - name: calico-config-windows\n - \ configMap:\n name: calico-config-windows\n - name: calico-static-rules\n - \ configMap:\n name: calico-static-rules\n # Used to install - CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - \ - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n - \ - name: kubeadm-config\n configMap:\n name: kubeadm-config\n---\napiVersion: - apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n - \ group: crd.projectcalico.org\n names:\n kind: IPAMConfig\n listKind: - IPAMConfigList\n plural: ipamconfigs\n singular: ipamconfig\n preserveUnknownFields: - false\n scope: Cluster\n versions:\n - name: v1\n schema:\n openAPIV3Schema:\n - \ properties:\n apiVersion:\n description: 'APIVersion - defines the versioned schema of this representation\n of an object. - Servers should convert recognized schemas to the latest\n internal - value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n - \ type: string\n kind:\n description: 'Kind is a - string value representing the REST resource this\n object represents. - Servers may infer this from the endpoint the client\n submits requests - to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n - \ type: string\n metadata:\n type: object\n spec:\n - \ description: IPAMConfigSpec contains the specification for an IPAMConfig\n - \ resource.\n properties:\n autoAllocateBlocks:\n - \ type: boolean\n maxBlocksPerHost:\n description: - MaxBlocksPerHost, if non-zero, is the max number of blocks\n that - can be affine to each host.\n maximum: 2147483647\n minimum: - 0\n type: integer\n strictAffinity:\n type: - boolean\n required:\n - autoAllocateBlocks\n - - strictAffinity\n type: object\n type: object\n served: true\n - \ storage: true\nstatus:\n acceptedNames:\n kind: \"\"\n plural: \"\"\n - \ conditions: []\n storedVersions: []\n" -kind: ConfigMap -metadata: - annotations: - note: generated - labels: - type: generated - name: cni-${CLUSTER_NAME}-calico-windows - namespace: default ---- -apiVersion: v1 -data: - csi-proxy: | - apiVersion: apps/v1 - kind: DaemonSet - metadata: - labels: - k8s-app: csi-proxy - name: csi-proxy - namespace: kube-system - spec: - selector: - matchLabels: - k8s-app: csi-proxy - template: - metadata: - labels: - k8s-app: csi-proxy - spec: - nodeSelector: - "kubernetes.io/os": windows - securityContext: - windowsOptions: - hostProcess: true - runAsUserName: "NT AUTHORITY\\SYSTEM" - hostNetwork: true - containers: - - name: csi-proxy - image: ghcr.io/kubernetes-sigs/sig-windows/csi-proxy:v1.0.2 -kind: ConfigMap -metadata: - annotations: - note: generated - labels: - type: generated - name: csi-proxy-addon - namespace: default ---- -apiVersion: v1 -data: - containerd-windows-logger: | - apiVersion: apps/v1 - kind: DaemonSet - metadata: - labels: - k8s-app: containerd-logger - name: containerd-logger - namespace: kube-system - spec: - selector: - matchLabels: - k8s-app: containerd-logger - template: - metadata: - labels: - k8s-app: containerd-logger - spec: - securityContext: - windowsOptions: - hostProcess: true - runAsUserName: "NT AUTHORITY\\system" - hostNetwork: true - containers: - - image: ghcr.io/kubernetes-sigs/sig-windows/eventflow-logger:v0.1.0 - args: [ "config.json" ] - name: containerd-logger - imagePullPolicy: Always - volumeMounts: - - name: containerd-logger-config - mountPath: /config.json - subPath: config.json - nodeSelector: - kubernetes.io/os: windows - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - operator: Exists - volumes: - - configMap: - name: containerd-logger-config - name: containerd-logger-config - updateStrategy: - type: RollingUpdate - --- - kind: ConfigMap - apiVersion: v1 - metadata: - name: containerd-logger-config - namespace: kube-system - data: - config.json: | - { - "inputs": [ - { - "type": "ETW", - "sessionNamePrefix": "containerd", - "cleanupOldSessions": true, - "reuseExistingSession": true, - "providers": [ - { - "providerName": "Microsoft.Virtualization.RunHCS", - "providerGuid": "0B52781F-B24D-5685-DDF6-69830ED40EC3", - "level": "Verbose" - }, - { - "providerName": "ContainerD", - "providerGuid": "2acb92c0-eb9b-571a-69cf-8f3410f383ad", - "level": "Verbose" - } - ] - } - ], - "filters": [ - { - "type": "drop", - "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == Stats && hasnoproperty error" - }, - { - "type": "drop", - "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == hcsshim::LayerID && hasnoproperty error" - }, - { - "type": "drop", - "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == hcsshim::NameToGuid && hasnoproperty error" - }, - { - "type": "drop", - "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == containerd.task.v2.Task.Stats && hasnoproperty error" - }, - { - "type": "drop", - "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == containerd.task.v2.Task.State && hasnoproperty error" - }, - { - "type": "drop", - "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == HcsGetProcessProperties && hasnoproperty error" - }, - { - "type": "drop", - "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == HcsGetComputeSystemProperties && hasnoproperty error" - } - ], - "outputs": [ - { - "type": "StdOutput" - } - ], - "schemaVersion": "2016-08-11" - } -kind: ConfigMap -metadata: - annotations: - note: generated - labels: - type: generated - name: containerd-logger-${CLUSTER_NAME} - namespace: default ---- -apiVersion: addons.cluster.x-k8s.io/v1beta1 -kind: ClusterResourceSet -metadata: - name: metrics-server-${CLUSTER_NAME} - namespace: default -spec: - clusterSelector: - matchLabels: - metrics-server: enabled - resources: - - kind: ConfigMap - name: metrics-server-${CLUSTER_NAME} - strategy: ApplyOnce ---- -apiVersion: v1 -data: - metrics-server: | - apiVersion: v1 - kind: ServiceAccount - metadata: - labels: - k8s-app: metrics-server - name: metrics-server - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - labels: - k8s-app: metrics-server - rbac.authorization.k8s.io/aggregate-to-admin: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-view: "true" - name: system:aggregated-metrics-reader - rules: - - apiGroups: - - metrics.k8s.io - resources: - - pods - - nodes - verbs: - - get - - list - - watch - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - labels: - k8s-app: metrics-server - name: system:metrics-server - rules: - - apiGroups: - - "" - resources: - - nodes/metrics - verbs: - - get - - apiGroups: - - "" - resources: - - pods - - nodes - verbs: - - get - - list - - watch - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - labels: - k8s-app: metrics-server - name: metrics-server-auth-reader - namespace: kube-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader - subjects: - - kind: ServiceAccount - name: metrics-server - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - labels: - k8s-app: metrics-server - name: metrics-server:system:auth-delegator - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator - subjects: - - kind: ServiceAccount - name: metrics-server - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - labels: - k8s-app: metrics-server - name: system:metrics-server - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:metrics-server - subjects: - - kind: ServiceAccount - name: metrics-server - namespace: kube-system - --- - apiVersion: v1 - kind: Service - metadata: - labels: - k8s-app: metrics-server - name: metrics-server - namespace: kube-system - spec: - ports: - - name: https - port: 443 - protocol: TCP - targetPort: https - selector: - k8s-app: metrics-server - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - labels: - k8s-app: metrics-server - name: metrics-server - namespace: kube-system - spec: - selector: - matchLabels: - k8s-app: metrics-server - strategy: - rollingUpdate: - maxUnavailable: 0 - template: - metadata: - labels: - k8s-app: metrics-server - spec: - containers: - - args: - - --cert-dir=/tmp - - --secure-port=4443 - - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - - --kubelet-use-node-status-port - - --metric-resolution=15s - - --kubelet-insecure-tls - image: registry.k8s.io/metrics-server/metrics-server:v0.6.3 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 3 - httpGet: - path: /livez - port: https - scheme: HTTPS - periodSeconds: 10 - name: metrics-server - ports: - - containerPort: 4443 - name: https - protocol: TCP - readinessProbe: - failureThreshold: 3 - httpGet: - path: /readyz - port: https - scheme: HTTPS - initialDelaySeconds: 20 - periodSeconds: 10 - resources: - requests: - cpu: 100m - memory: 200Mi - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - volumeMounts: - - mountPath: /tmp - name: tmp-dir - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-cluster-critical - serviceAccountName: metrics-server - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - operator: Exists - volumes: - - emptyDir: {} - name: tmp-dir - --- - apiVersion: apiregistration.k8s.io/v1 - kind: APIService - metadata: - labels: - k8s-app: metrics-server - name: v1beta1.metrics.k8s.io - spec: - group: metrics.k8s.io - groupPriorityMinimum: 100 - insecureSkipTLSVerify: true - service: - name: metrics-server - namespace: kube-system - version: v1beta1 - versionPriority: 100 -kind: ConfigMap -metadata: - annotations: - note: generated - labels: - type: generated - name: metrics-server-${CLUSTER_NAME} - namespace: default diff --git a/templates/test/ci/cluster-template-prow-ci-version.yaml b/templates/test/ci/cluster-template-prow-ci-version.yaml index b457d2fd9f9..ab842110b54 100644 --- a/templates/test/ci/cluster-template-prow-ci-version.yaml +++ b/templates/test/ci/cluster-template-prow-ci-version.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled @@ -628,6 +629,26 @@ spec: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: kube-proxy-patch: |- diff --git a/templates/test/ci/cluster-template-prow-custom-vnet.yaml b/templates/test/ci/cluster-template-prow-custom-vnet.yaml index 55a06a87790..013bdaf70fd 100644 --- a/templates/test/ci/cluster-template-prow-custom-vnet.yaml +++ b/templates/test/ci/cluster-template-prow-custom-vnet.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico name: ${CLUSTER_NAME} namespace: default @@ -277,3 +278,23 @@ spec: calicoctl: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} diff --git a/templates/test/ci/cluster-template-prow-dual-stack.yaml b/templates/test/ci/cluster-template-prow-dual-stack.yaml index 9d272681783..8782c06ec51 100644 --- a/templates/test/ci/cluster-template-prow-dual-stack.yaml +++ b/templates/test/ci/cluster-template-prow-dual-stack.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico-dual-stack name: ${CLUSTER_NAME} namespace: default @@ -366,3 +367,23 @@ spec: calicoctl: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} diff --git a/templates/test/ci/cluster-template-prow-edgezone.yaml b/templates/test/ci/cluster-template-prow-edgezone.yaml index f5a657ce6e4..c5bf852d13e 100644 --- a/templates/test/ci/cluster-template-prow-edgezone.yaml +++ b/templates/test/ci/cluster-template-prow-edgezone.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico name: ${CLUSTER_NAME} namespace: default @@ -261,3 +262,23 @@ spec: calicoctl: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} diff --git a/templates/test/ci/cluster-template-prow-flatcar.yaml b/templates/test/ci/cluster-template-prow-flatcar.yaml index 2717aaacde9..2254726d569 100644 --- a/templates/test/ci/cluster-template-prow-flatcar.yaml +++ b/templates/test/ci/cluster-template-prow-flatcar.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico name: ${CLUSTER_NAME} namespace: default @@ -288,3 +289,23 @@ spec: calicoctl: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} diff --git a/templates/test/ci/cluster-template-prow-intree-cloud-provider-machine-pool.yaml b/templates/test/ci/cluster-template-prow-intree-cloud-provider-machine-pool.yaml index c3a43b4cdb2..1b2776209aa 100644 --- a/templates/test/ci/cluster-template-prow-intree-cloud-provider-machine-pool.yaml +++ b/templates/test/ci/cluster-template-prow-intree-cloud-provider-machine-pool.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled @@ -398,6 +399,26 @@ spec: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: proxy: | diff --git a/templates/test/ci/cluster-template-prow-intree-cloud-provider.yaml b/templates/test/ci/cluster-template-prow-intree-cloud-provider.yaml index 7b65f75175c..12a90dee52b 100644 --- a/templates/test/ci/cluster-template-prow-intree-cloud-provider.yaml +++ b/templates/test/ci/cluster-template-prow-intree-cloud-provider.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled @@ -462,6 +463,26 @@ spec: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: proxy: | diff --git a/templates/test/ci/cluster-template-prow-ipv6.yaml b/templates/test/ci/cluster-template-prow-ipv6.yaml index 273e90c4ba5..ac9e94b5c39 100644 --- a/templates/test/ci/cluster-template-prow-ipv6.yaml +++ b/templates/test/ci/cluster-template-prow-ipv6.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico-ipv6 name: ${CLUSTER_NAME} namespace: default @@ -383,3 +384,23 @@ spec: calicoctl: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} diff --git a/templates/test/ci/cluster-template-prow-machine-pool-ci-version.yaml b/templates/test/ci/cluster-template-prow-machine-pool-ci-version.yaml index fdbbc5bed84..b1312f90adc 100644 --- a/templates/test/ci/cluster-template-prow-machine-pool-ci-version.yaml +++ b/templates/test/ci/cluster-template-prow-machine-pool-ci-version.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled @@ -562,6 +563,26 @@ spec: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: kube-proxy-patch: |- diff --git a/templates/test/ci/cluster-template-prow-machine-pool-flex.yaml b/templates/test/ci/cluster-template-prow-machine-pool-flex.yaml index 3579753e1b7..9105375b4a9 100644 --- a/templates/test/ci/cluster-template-prow-machine-pool-flex.yaml +++ b/templates/test/ci/cluster-template-prow-machine-pool-flex.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled @@ -388,6 +389,26 @@ spec: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: proxy: | diff --git a/templates/test/ci/cluster-template-prow-machine-pool.yaml b/templates/test/ci/cluster-template-prow-machine-pool.yaml index fb55151d07d..0947e9a2c30 100644 --- a/templates/test/ci/cluster-template-prow-machine-pool.yaml +++ b/templates/test/ci/cluster-template-prow-machine-pool.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled @@ -382,6 +383,26 @@ spec: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: proxy: | diff --git a/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml b/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml index 6d863ca8ce0..23062dd4383 100644 --- a/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml +++ b/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico name: ${CLUSTER_NAME} namespace: default @@ -247,3 +248,23 @@ spec: calicoctl: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} diff --git a/templates/test/ci/cluster-template-prow-private.yaml b/templates/test/ci/cluster-template-prow-private.yaml index 9d7e03c2fb7..bfa26ccb463 100644 --- a/templates/test/ci/cluster-template-prow-private.yaml +++ b/templates/test/ci/cluster-template-prow-private.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico name: ${CLUSTER_NAME} namespace: default @@ -316,6 +317,26 @@ spec: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: resources: | diff --git a/templates/test/ci/cluster-template-prow-topology.yaml b/templates/test/ci/cluster-template-prow-topology.yaml index b0201abf9d9..c6e880ded37 100644 --- a/templates/test/ci/cluster-template-prow-topology.yaml +++ b/templates/test/ci/cluster-template-prow-topology.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled @@ -113,6 +114,26 @@ spec: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: proxy: | diff --git a/templates/test/ci/cluster-template-prow-workload-identity.yaml b/templates/test/ci/cluster-template-prow-workload-identity.yaml index cbb61315b46..d46601ecb24 100644 --- a/templates/test/ci/cluster-template-prow-workload-identity.yaml +++ b/templates/test/ci/cluster-template-prow-workload-identity.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico name: ${CLUSTER_NAME} namespace: default @@ -253,3 +254,23 @@ spec: calicoctl: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} diff --git a/templates/test/ci/cluster-template-prow.yaml b/templates/test/ci/cluster-template-prow.yaml index f31e475dbaa..e3dcbd88459 100644 --- a/templates/test/ci/cluster-template-prow.yaml +++ b/templates/test/ci/cluster-template-prow.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled @@ -446,6 +447,26 @@ spec: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: proxy: | diff --git a/templates/test/ci/patches/cluster-label-azuredisk-csi-driver.yaml b/templates/test/ci/patches/cluster-label-azuredisk-csi-driver.yaml new file mode 100644 index 00000000000..080f57937ff --- /dev/null +++ b/templates/test/ci/patches/cluster-label-azuredisk-csi-driver.yaml @@ -0,0 +1,6 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + labels: + azuredisk-csi: "true" \ No newline at end of file diff --git a/templates/test/ci/prow-azure-cni-v1/kustomization.yaml b/templates/test/ci/prow-azure-cni-v1/kustomization.yaml index 459a03a481b..fe00ee0a141 100644 --- a/templates/test/ci/prow-azure-cni-v1/kustomization.yaml +++ b/templates/test/ci/prow-azure-cni-v1/kustomization.yaml @@ -3,6 +3,8 @@ kind: Kustomization namespace: default resources: - ../../../flavors/azure-cni-v1/ + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/tags.yaml - ../patches/controller-manager.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/templates/test/ci/prow-custom-vnet/kustomization.yaml b/templates/test/ci/prow-custom-vnet/kustomization.yaml index 1ec37da5fad..0565fdf4edf 100644 --- a/templates/test/ci/prow-custom-vnet/kustomization.yaml +++ b/templates/test/ci/prow-custom-vnet/kustomization.yaml @@ -5,6 +5,7 @@ resources: - ../../../flavors/default - ../prow/mhc.yaml - ../../../addons/cluster-api-helm/calico.yaml + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/tags.yaml - ../patches/mhc.yaml @@ -12,4 +13,5 @@ patchesStrategicMerge: - patches/custom-vnet.yaml - ../patches/uami-md-0.yaml - ../patches/uami-control-plane.yaml - - ../patches/cluster-label-calico.yaml \ No newline at end of file + - ../patches/cluster-label-calico.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/templates/test/ci/prow-dual-stack/kustomization.yaml b/templates/test/ci/prow-dual-stack/kustomization.yaml index 459b8c4c907..d123be7c0df 100644 --- a/templates/test/ci/prow-dual-stack/kustomization.yaml +++ b/templates/test/ci/prow-dual-stack/kustomization.yaml @@ -5,9 +5,11 @@ resources: - ../../../flavors/dual-stack - machine-pool-dualstack.yaml - ../../../addons/cluster-api-helm/calico-dual-stack.yaml + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/tags.yaml - ../patches/controller-manager.yaml - patches/azure-machine-template-control-plane.yaml - patches/azure-machine-template.yaml - patches/cluster-label-calico-dual-stack.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/templates/test/ci/prow-edgezone/kustomization.yaml b/templates/test/ci/prow-edgezone/kustomization.yaml index 6bb2a8f70df..e8a0aa31b58 100644 --- a/templates/test/ci/prow-edgezone/kustomization.yaml +++ b/templates/test/ci/prow-edgezone/kustomization.yaml @@ -4,6 +4,7 @@ namespace: default resources: - ../../../flavors/edgezone - ../../../addons/cluster-api-helm/calico.yaml + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/tags.yaml - ../patches/controller-manager.yaml @@ -14,4 +15,5 @@ patchesStrategicMerge: - patches/standardssd-disk.yaml - patches/machine-type.yaml - patches/kubernetes-version.yaml - - ../patches/cluster-label-calico.yaml \ No newline at end of file + - ../patches/cluster-label-calico.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/templates/test/ci/prow-flatcar/kustomization.yaml b/templates/test/ci/prow-flatcar/kustomization.yaml index c341146cab4..60af393772e 100644 --- a/templates/test/ci/prow-flatcar/kustomization.yaml +++ b/templates/test/ci/prow-flatcar/kustomization.yaml @@ -4,7 +4,9 @@ namespace: default resources: - ../../../flavors/flatcar/ - ../../../addons/cluster-api-helm/calico.yaml + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/tags.yaml - ../patches/controller-manager.yaml - ../patches/cluster-label-calico.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/templates/test/ci/prow-ipv6/kustomization.yaml b/templates/test/ci/prow-ipv6/kustomization.yaml index b72de16056c..93cb5cf349b 100644 --- a/templates/test/ci/prow-ipv6/kustomization.yaml +++ b/templates/test/ci/prow-ipv6/kustomization.yaml @@ -5,7 +5,10 @@ resources: - ../../../flavors/ipv6 - machine-pool-ipv6.yaml - ../../../addons/cluster-api-helm/calico-ipv6.yaml + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/tags.yaml - ../patches/controller-manager.yaml - patches/cluster-label-calico-ipv6.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml + diff --git a/templates/test/ci/prow-machine-pool/kustomization.yaml b/templates/test/ci/prow-machine-pool/kustomization.yaml index c189fde424e..59822b47c4a 100644 --- a/templates/test/ci/prow-machine-pool/kustomization.yaml +++ b/templates/test/ci/prow-machine-pool/kustomization.yaml @@ -7,6 +7,7 @@ resources: - ../../../addons/windows/csi-proxy/csi-proxy-resource-set.yaml - ../../../addons/windows/containerd-logging/containerd-logger-resource-set.yaml - ../../../addons/cluster-api-helm/calico.yaml + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/azuremachinepool-vmextension.yaml - ../patches/tags.yaml @@ -14,6 +15,7 @@ patchesStrategicMerge: - ../patches/machine-pool-worker-counts.yaml - ../patches/windows-containerd-labels.yaml - ../patches/cluster-label-calico.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml configMapGenerator: - name: cni-${CLUSTER_NAME}-calico-windows files: diff --git a/templates/test/ci/prow-nvidia-gpu/kustomization.yaml b/templates/test/ci/prow-nvidia-gpu/kustomization.yaml index 13c1085fc3c..d96de0180ba 100644 --- a/templates/test/ci/prow-nvidia-gpu/kustomization.yaml +++ b/templates/test/ci/prow-nvidia-gpu/kustomization.yaml @@ -4,11 +4,13 @@ namespace: default resources: - ../../../flavors/nvidia-gpu - ../../../addons/cluster-api-helm/calico.yaml + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/tags.yaml - ../patches/controller-manager.yaml - ../patches/azurecluster-gpu.yaml - ../patches/cluster-label-calico.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml patches: - path: patches/node-storage-type.yaml target: diff --git a/templates/test/ci/prow-private/kustomization.yaml b/templates/test/ci/prow-private/kustomization.yaml index a097dc10699..a624b79ed55 100644 --- a/templates/test/ci/prow-private/kustomization.yaml +++ b/templates/test/ci/prow-private/kustomization.yaml @@ -5,6 +5,7 @@ resources: - ../../../flavors/private - cni-resource-set.yaml - ../../../addons/cluster-api-helm/calico.yaml + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/tags.yaml - ../patches/controller-manager.yaml @@ -15,6 +16,7 @@ patchesStrategicMerge: - ../prow-intree-cloud-provider/patches/intree-cp.yaml # TODO: remove once CAPI supports Helm addons - ../prow-intree-cloud-provider/patches/intree-md-0.yaml # TODO: remove once CAPI supports Helm addons - ../patches/cluster-label-calico.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml patches: - path: patches/user-assigned.yaml target: diff --git a/templates/test/ci/prow-topology/kustomization.yaml b/templates/test/ci/prow-topology/kustomization.yaml index 654099474e2..da93db782a5 100644 --- a/templates/test/ci/prow-topology/kustomization.yaml +++ b/templates/test/ci/prow-topology/kustomization.yaml @@ -6,10 +6,12 @@ resources: - cni-resource-set.yaml - ../../../addons/windows/csi-proxy/csi-proxy-resource-set.yaml - ../../../addons/cluster-api-helm/calico.yaml + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/windows-containerd-labels.yaml - cluster.yaml - ../patches/cluster-label-calico.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml configMapGenerator: - name: cni-${CLUSTER_NAME}-calico-windows files: diff --git a/templates/test/ci/prow-workload-identity/kustomization.yaml b/templates/test/ci/prow-workload-identity/kustomization.yaml index 45491162460..642b67c258a 100644 --- a/templates/test/ci/prow-workload-identity/kustomization.yaml +++ b/templates/test/ci/prow-workload-identity/kustomization.yaml @@ -4,6 +4,7 @@ namespace: default resources: - ../../../flavors/default - ../../../addons/cluster-api-helm/calico.yaml + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/azureclusteridentity-azwi.yaml - ../patches/tags.yaml @@ -12,3 +13,4 @@ patchesStrategicMerge: - ../patches/uami-md-0.yaml - ../patches/uami-control-plane.yaml - ../patches/cluster-label-calico.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/templates/test/ci/prow/kustomization.yaml b/templates/test/ci/prow/kustomization.yaml index 22bc0993504..9eb60192826 100644 --- a/templates/test/ci/prow/kustomization.yaml +++ b/templates/test/ci/prow/kustomization.yaml @@ -11,6 +11,7 @@ resources: - ../../../addons/windows/csi-proxy/csi-proxy-resource-set.yaml - ../../../addons/windows/containerd-logging/containerd-logger-resource-set.yaml - ../../../addons/cluster-api-helm/calico.yaml + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/tags.yaml - ../patches/mhc.yaml @@ -22,6 +23,7 @@ patchesStrategicMerge: - ../patches/windows-containerd-labels.yaml - ../patches/windows-server-version.yaml - ../patches/cluster-label-calico.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml patches: - target: group: bootstrap.cluster.x-k8s.io diff --git a/templates/test/dev/cluster-template-custom-builds-machine-pool.yaml b/templates/test/dev/cluster-template-custom-builds-machine-pool.yaml index ba70ad9f0b4..dea8f3a82a8 100644 --- a/templates/test/dev/cluster-template-custom-builds-machine-pool.yaml +++ b/templates/test/dev/cluster-template-custom-builds-machine-pool.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled @@ -510,6 +511,26 @@ spec: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: kube-proxy-patch: |- diff --git a/templates/test/dev/cluster-template-custom-builds.yaml b/templates/test/dev/cluster-template-custom-builds.yaml index 79f0b5375e1..a412aa50fcf 100644 --- a/templates/test/dev/cluster-template-custom-builds.yaml +++ b/templates/test/dev/cluster-template-custom-builds.yaml @@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico cni-windows: ${CLUSTER_NAME}-calico containerd-logger: enabled @@ -603,6 +604,26 @@ spec: image: mcr.microsoft.com/oss/calico/ctl version: ${CALICO_VERSION} --- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- apiVersion: v1 data: kube-proxy-patch: |- diff --git a/test/e2e/cloud-provider-azure.go b/test/e2e/cloud-provider-azure.go index af66128865c..834b38a666d 100644 --- a/test/e2e/cloud-provider-azure.go +++ b/test/e2e/cloud-provider-azure.go @@ -80,19 +80,25 @@ func InstallCNIAndCloudProviderAzureHelmChart(ctx context.Context, input cluster } } -// InstallAzureDiskCSIDriverHelmChart installs the official azure-disk CSI driver helm chart -func InstallAzureDiskCSIDriverHelmChart(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, hasWindows bool) { - specName := "azuredisk-csi-drivers-install" - By("Installing azure-disk CSI driver components via helm") - options := &HelmOptions{ - Values: []string{"controller.replicas=1", "controller.runOnControlPlane=true"}, - } - // TODO: make this always true once HostProcessContainers are on for all supported k8s versions. - if hasWindows { - options.Values = append(options.Values, "windows.useHostProcessContainers=true") - } +// EnsureAzureDiskCSIDriverHelmChart installs the official azure-disk CSI driver helm chart +func EnsureAzureDiskCSIDriverHelmChart(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, installHelmChart bool, hasWindows bool) { + specName := "ensure-azuredisk-csi-drivers" clusterProxy := input.ClusterProxy.GetWorkloadCluster(ctx, input.Namespace, input.ClusterName) - InstallHelmChart(ctx, clusterProxy, kubesystem, azureDiskCSIDriverHelmRepoURL, azureDiskCSIDriverChartName, azureDiskCSIDriverHelmReleaseName, options, "") + + if installHelmChart { + By("Installing azure-disk CSI driver components via helm") + options := &HelmOptions{ + Values: []string{"controller.replicas=1", "controller.runOnControlPlane=true"}, + } + // TODO: make this always true once HostProcessContainers are on for all supported k8s versions. + if hasWindows { + options.Values = append(options.Values, "windows.useHostProcessContainers=true") + } + InstallHelmChart(ctx, clusterProxy, kubesystem, azureDiskCSIDriverHelmRepoURL, azureDiskCSIDriverChartName, azureDiskCSIDriverHelmReleaseName, options, "") + } else { + By("Ensuring azure-disk CSI driver is installed via CAAPH") + } + By("Waiting for Ready csi-azuredisk-controller deployment pods") for _, d := range []string{"csi-azuredisk-controller"} { waitInput := GetWaitForDeploymentsAvailableInput(ctx, clusterProxy, d, kubesystem, specName) diff --git a/test/e2e/common.go b/test/e2e/common.go index 579d8ed0657..d9e3360b2e4 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -295,7 +295,7 @@ func ensureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCu EnsureCNI(ctx, input, installHelmCharts, cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, hasWindows) } controlPlane := discoveryAndWaitForControlPlaneInitialized(ctx, input, result) - InstallAzureDiskCSIDriverHelmChart(ctx, input, hasWindows) + EnsureAzureDiskCSIDriverHelmChart(ctx, input, installHelmCharts, hasWindows) result.ControlPlane = controlPlane } diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index 7da8849de81..16c6b67ac9e 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -9,7 +9,7 @@ images: loadBehavior: tryLoad - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.5.3 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api-helm/cluster-api-helm-controller:v0.1.0-alpha.9 + - name: registry.k8s.io/cluster-api-helm/cluster-api-helm-controller:v0.1.0-alpha.10 loadBehavior: tryLoad providers: @@ -152,8 +152,8 @@ providers: - name: helm type: AddonProvider versions: - - name: v0.1.0-alpha.9 - value: https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/download/v0.1.0-alpha.9/addon-components.yaml + - name: v0.1.0-alpha.10 + value: https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/download/v0.1.0-alpha.10/addon-components.yaml type: url contract: v1beta1 files: diff --git a/test/e2e/data/infrastructure-azure/v1beta1/bases/azuredisk-csi-driver.yaml b/test/e2e/data/infrastructure-azure/v1beta1/bases/azuredisk-csi-driver.yaml new file mode 100644 index 00000000000..07afde0faf6 --- /dev/null +++ b/test/e2e/data/infrastructure-azure/v1beta1/bases/azuredisk-csi-driver.yaml @@ -0,0 +1,18 @@ +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart +spec: + clusterSelector: + matchLabels: + azuredisk-csi: "true" + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + chartName: azuredisk-csi-driver + releaseName: azuredisk-csi-driver-oot + namespace: kube-system + valuesTemplate: | + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} \ No newline at end of file diff --git a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-remediation.yaml b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-remediation.yaml index 0dc99e10b74..3675868068c 100644 --- a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-remediation.yaml +++ b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-remediation.yaml @@ -1,5 +1,24 @@ apiVersion: addons.cluster.x-k8s.io/v1alpha1 kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy metadata: name: calico spec: @@ -56,6 +75,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico name: ${CLUSTER_NAME} namespace: default diff --git a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-remediation/kustomization.yaml b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-remediation/kustomization.yaml index 52e45be1f13..9e1f19137fe 100644 --- a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-remediation/kustomization.yaml +++ b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-remediation/kustomization.yaml @@ -4,7 +4,9 @@ bases: - mhc.yaml - ../bases/azure-cluster-identity.yaml - ../bases/calico.yaml + - ../bases/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/azurecluster-identity-ref.yaml - ../patches/cluster-label-calico.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-scale-in.yaml b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-scale-in.yaml index 30eda5aae7c..2634076849f 100644 --- a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-scale-in.yaml +++ b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-scale-in.yaml @@ -1,5 +1,24 @@ apiVersion: addons.cluster.x-k8s.io/v1alpha1 kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy metadata: name: calico spec: @@ -56,6 +75,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico name: ${CLUSTER_NAME} namespace: default diff --git a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-scale-in/kustomization.yaml b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-scale-in/kustomization.yaml index 54662455683..52dd66fa093 100644 --- a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-scale-in/kustomization.yaml +++ b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-kcp-scale-in/kustomization.yaml @@ -3,8 +3,10 @@ bases: - ../bases/md.yaml - ../bases/azure-cluster-identity.yaml - ../bases/calico.yaml +- ../bases/azuredisk-csi-driver.yaml patchesStrategicMerge: - ./cluster-with-kcp.yaml - ../patches/azurecluster-identity-ref.yaml - ../patches/cluster-label-calico.yaml +- ../patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-machine-pool.yaml b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-machine-pool.yaml index 594948ca941..44ad8083fca 100644 --- a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-machine-pool.yaml +++ b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-machine-pool.yaml @@ -1,5 +1,24 @@ apiVersion: addons.cluster.x-k8s.io/v1alpha1 kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy metadata: name: calico spec: @@ -55,6 +74,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico name: ${CLUSTER_NAME} namespace: default diff --git a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-machine-pool/kustomization.yaml b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-machine-pool/kustomization.yaml index 566770ccf02..dce55fb18ad 100644 --- a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-machine-pool/kustomization.yaml +++ b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-machine-pool/kustomization.yaml @@ -3,7 +3,9 @@ resources: - ../bases/mp.yaml - ../bases/azure-cluster-identity.yaml - ../bases/calico.yaml + - ../bases/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/azurecluster-identity-ref.yaml - ../patches/cluster-label-calico.yaml + - ../patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-md-remediation.yaml b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-md-remediation.yaml index 1c29569aa7d..bc22c3e24ab 100644 --- a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-md-remediation.yaml +++ b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-md-remediation.yaml @@ -1,5 +1,24 @@ apiVersion: addons.cluster.x-k8s.io/v1alpha1 kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy metadata: name: calico spec: @@ -56,6 +75,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico name: ${CLUSTER_NAME} namespace: default diff --git a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-md-remediation/kustomization.yaml b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-md-remediation/kustomization.yaml index 52219917bf3..ed195fc0bd6 100644 --- a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-md-remediation/kustomization.yaml +++ b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-md-remediation/kustomization.yaml @@ -4,8 +4,10 @@ bases: - mhc.yaml - ../bases/azure-cluster-identity.yaml - ../bases/calico.yaml + - ../bases/azuredisk-csi-driver.yaml patchesStrategicMerge: - ./md.yaml - ../patches/azurecluster-identity-ref.yaml - ../patches/cluster-label-calico.yaml +- ../patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-node-drain.yaml b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-node-drain.yaml index 63e173f160b..621d053fa6a 100644 --- a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-node-drain.yaml +++ b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-node-drain.yaml @@ -1,5 +1,24 @@ apiVersion: addons.cluster.x-k8s.io/v1alpha1 kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy metadata: name: calico spec: @@ -56,6 +75,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico name: ${CLUSTER_NAME} namespace: default diff --git a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-node-drain/kustomization.yaml b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-node-drain/kustomization.yaml index 23f1031e120..c292b89295f 100644 --- a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-node-drain/kustomization.yaml +++ b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template-node-drain/kustomization.yaml @@ -3,9 +3,11 @@ bases: - ../bases/md.yaml - ../bases/azure-cluster-identity.yaml - ../bases/calico.yaml +- ../bases/azuredisk-csi-driver.yaml patchesStrategicMerge: - ./md.yaml - ./cluster-with-kcp.yaml - ../patches/azurecluster-identity-ref.yaml - ../patches/cluster-label-calico.yaml +- ../patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template.yaml b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template.yaml index 6e25b06c1b0..1d3a8a0703f 100644 --- a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template.yaml +++ b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template.yaml @@ -1,5 +1,24 @@ apiVersion: addons.cluster.x-k8s.io/v1alpha1 kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy metadata: name: calico spec: @@ -56,6 +75,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + azuredisk-csi: "true" cni: calico name: ${CLUSTER_NAME} namespace: default diff --git a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template/kustomization.yaml b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template/kustomization.yaml index a5808c06bf3..31346933340 100644 --- a/test/e2e/data/infrastructure-azure/v1beta1/cluster-template/kustomization.yaml +++ b/test/e2e/data/infrastructure-azure/v1beta1/cluster-template/kustomization.yaml @@ -3,7 +3,9 @@ bases: - ../bases/md.yaml - ../bases/azure-cluster-identity.yaml - ../bases/calico.yaml +- ../bases/azuredisk-csi-driver.yaml patchesStrategicMerge: - ../patches/azurecluster-identity-ref.yaml - ../patches/cluster-label-calico.yaml +- ../patches/cluster-label-azuredisk-csi-driver.yaml diff --git a/test/e2e/data/infrastructure-azure/v1beta1/patches/cluster-label-azuredisk-csi-driver.yaml b/test/e2e/data/infrastructure-azure/v1beta1/patches/cluster-label-azuredisk-csi-driver.yaml new file mode 100644 index 00000000000..080f57937ff --- /dev/null +++ b/test/e2e/data/infrastructure-azure/v1beta1/patches/cluster-label-azuredisk-csi-driver.yaml @@ -0,0 +1,6 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + labels: + azuredisk-csi: "true" \ No newline at end of file From 0c70e543387274a6b2af2fda26d71a8e993d8f3b Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Tue, 24 Oct 2023 20:41:33 +0000 Subject: [PATCH 03/10] Don't use job name as container name for CI binaries --- docs/book/src/developers/development.md | 1 - .../src/developers/kubernetes-developers.md | 1 - scripts/ci-build-azure-ccm.sh | 19 ++++++++------- scripts/ci-build-kubernetes.sh | 23 +++++++++---------- ...r-template-custom-builds-machine-pool.yaml | 6 ++--- .../dev/cluster-template-custom-builds.yaml | 6 ++--- .../patches/custom-builds.yaml | 2 +- ...-machine-pool-windows-k8s-pr-binaries.yaml | 2 +- ...adm-bootstrap-windows-k8s-pr-binaries.yaml | 2 +- .../patches/kubeadm-bootstrap.yaml | 2 +- .../kubeadm-controlplane-bootstrap.yaml | 2 +- .../patches/control-plane-custom-builds.yaml | 2 +- 12 files changed, 32 insertions(+), 36 deletions(-) diff --git a/docs/book/src/developers/development.md b/docs/book/src/developers/development.md index 32edfb52d80..c1c91f47507 100644 --- a/docs/book/src/developers/development.md +++ b/docs/book/src/developers/development.md @@ -549,7 +549,6 @@ With the following environment variables defined, CAPZ runs `./scripts/ci-build- | ------------------------- | ------------------------------------------------------------------------ | | `AZURE_STORAGE_ACCOUNT` | Your Azure storage account name | | `AZURE_STORAGE_KEY` | Your Azure storage key | -| `JOB_NAME` | `test` (an environment variable used by CI, can be any non-empty string) | | `USE_LOCAL_KIND_REGISTRY` | `false` | | `REGISTRY` | Your Registry | | `TEST_K8S` | `true` | diff --git a/docs/book/src/developers/kubernetes-developers.md b/docs/book/src/developers/kubernetes-developers.md index 88934348300..00c7099c1ae 100644 --- a/docs/book/src/developers/kubernetes-developers.md +++ b/docs/book/src/developers/kubernetes-developers.md @@ -21,7 +21,6 @@ export AZURE_STORAGE_ACCOUNT= export AZURE_STORAGE_KEY= export REGISTRY= export TEST_K8S="true" -export JOB_NAME="test" # an environment variable used by CI, can be any non-empty string source ./scripts/ci-build-kubernetes.sh ``` diff --git a/scripts/ci-build-azure-ccm.sh b/scripts/ci-build-azure-ccm.sh index 7bb37caa608..9a2b9e9dcba 100755 --- a/scripts/ci-build-azure-ccm.sh +++ b/scripts/ci-build-azure-ccm.sh @@ -30,9 +30,6 @@ source "${REPO_ROOT}/hack/parse-prow-creds.sh" : "${AZURE_STORAGE_ACCOUNT:?Environment variable empty or not defined.}" : "${AZURE_STORAGE_KEY:?Environment variable empty or not defined.}" -# JOB_NAME is an environment variable set by a prow job - -# https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md#job-environment-variables -: "${JOB_NAME:?Environment variable empty or not defined.}" : "${REGISTRY:?Environment variable empty or not defined.}" # cloud controller manager image @@ -41,6 +38,8 @@ export CCM_IMAGE_NAME=azure-cloud-controller-manager export CNM_IMAGE_NAME=azure-cloud-node-manager # cloud node manager windows image version export WINDOWS_IMAGE_VERSION=1809 +# container name +export AZURE_BLOB_CONTAINER_NAME="${AZURE_BLOB_CONTAINER_NAME:-"kubernetes-ci"}" setup() { AZURE_CLOUD_PROVIDER_ROOT="${AZURE_CLOUD_PROVIDER_ROOT:-""}" @@ -85,14 +84,14 @@ main() { echo "Building and pushing Linux and Windows amd64 Azure ACR credential provider" make -C "${AZURE_CLOUD_PROVIDER_ROOT}" bin/azure-acr-credential-provider bin/azure-acr-credential-provider.exe - if [[ "$(az storage container exists --name "${JOB_NAME}" --query exists --output tsv)" == "false" ]]; then - echo "Creating ${JOB_NAME} storage container" - az storage container create --name "${JOB_NAME}" > /dev/null - az storage container set-permission --name "${JOB_NAME}" --public-access container > /dev/null + if [[ "$(az storage container exists --name "${AZURE_BLOB_CONTAINER_NAME}" --query exists --output tsv)" == "false" ]]; then + echo "Creating ${AZURE_BLOB_CONTAINER_NAME} storage container" + az storage container create --name "${AZURE_BLOB_CONTAINER_NAME}" > /dev/null + az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null fi - az storage blob upload --overwrite --container-name "${JOB_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" - az storage blob upload --overwrite --container-name "${JOB_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider.exe" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider.exe" + az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" + az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider.exe" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider.exe" fi fi } @@ -113,7 +112,7 @@ can_reuse_artifacts() { if [[ "${TEST_ACR_CREDENTIAL_PROVIDER:-}" =~ "true" ]]; then for BINARY in azure-acr-credential-provider azure-acr-credential-provider.exe; do - if [[ "$(az storage blob exists --container-name "${JOB_NAME}" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/${BINARY}" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage blob exists --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/${BINARY}" --query exists --output tsv)" == "false" ]]; then echo "false" && return fi done diff --git a/scripts/ci-build-kubernetes.sh b/scripts/ci-build-kubernetes.sh index 13f19ebe33e..3a81b761ac5 100755 --- a/scripts/ci-build-kubernetes.sh +++ b/scripts/ci-build-kubernetes.sh @@ -35,9 +35,6 @@ source "${REPO_ROOT}/hack/util.sh" : "${AZURE_STORAGE_ACCOUNT:?Environment variable empty or not defined.}" : "${AZURE_STORAGE_KEY:?Environment variable empty or not defined.}" : "${REGISTRY:?Environment variable empty or not defined.}" -# JOB_NAME is an environment variable set by a prow job - -# https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md#job-environment-variables -: "${JOB_NAME:?Environment variable empty or not defined.}" declare -a BINARIES=("kubeadm" "kubectl" "kubelet" "e2e.test") declare -a WINDOWS_BINARIES=("kubeadm" "kubectl" "kubelet" "kube-proxy") @@ -78,13 +75,15 @@ setup() { # ref: https://github.com/kubernetes/kubernetes/blob/5491484aa91fd09a01a68042e7674bc24d42687a/build/lib/release.sh#L345-L346 export KUBE_IMAGE_TAG="${KUBE_GIT_VERSION/+/_}" echo "using K8s KUBE_IMAGE_TAG=${KUBE_IMAGE_TAG}" + + export AZURE_BLOB_CONTAINER_NAME="${AZURE_BLOB_CONTAINER_NAME:-"kubernetes-ci"}" } main() { - if [[ "$(az storage container exists --name "${JOB_NAME}" --query exists --output tsv)" == "false" ]]; then - echo "Creating ${JOB_NAME} storage container" - az storage container create --name "${JOB_NAME}" > /dev/null - az storage container set-permission --name "${JOB_NAME}" --public-access container > /dev/null + if [[ "$(az storage container exists --name "${AZURE_BLOB_CONTAINER_NAME}" --query exists --output tsv)" == "false" ]]; then + echo "Creating ${AZURE_BLOB_CONTAINER_NAME} storage container" + az storage container create --name "${AZURE_BLOB_CONTAINER_NAME}" > /dev/null + az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null fi if [[ "${KUBE_BUILD_CONFORMANCE:-}" =~ [yY] ]]; then @@ -112,12 +111,12 @@ main() { docker tag "${OLD_IMAGE_URL}" "${NEW_IMAGE_URL}" && docker push "${NEW_IMAGE_URL}" done - echo "Uploading binaries to Azure storage container ${JOB_NAME}" + echo "Uploading binaries to Azure storage container ${AZURE_BLOB_CONTAINER_NAME}" for BINARY in "${BINARIES[@]}"; do BIN_PATH="${KUBE_GIT_VERSION}/bin/linux/amd64/${BINARY}" echo "uploading ${BIN_PATH}" - az storage blob upload --overwrite --container-name "${JOB_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/linux/amd64/${BINARY}" --name "${BIN_PATH}" + az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/linux/amd64/${BINARY}" --name "${BIN_PATH}" done if [[ "${TEST_WINDOWS:-}" == "true" ]]; then @@ -130,7 +129,7 @@ main() { for BINARY in "${WINDOWS_BINARIES[@]}"; do BIN_PATH="${KUBE_GIT_VERSION}/bin/windows/amd64/${BINARY}.exe" echo "uploading ${BIN_PATH}" - az storage blob upload --overwrite --container-name "${JOB_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/windows/amd64/${BINARY}.exe" --name "${BIN_PATH}" + az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/windows/amd64/${BINARY}.exe" --name "${BIN_PATH}" done fi fi @@ -145,14 +144,14 @@ can_reuse_artifacts() { done for BINARY in "${BINARIES[@]}"; do - if [[ "$(az storage blob exists --container-name "${JOB_NAME}" --name "${KUBE_GIT_VERSION}/bin/linux/amd64/${BINARY}" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage blob exists --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${KUBE_GIT_VERSION}/bin/linux/amd64/${BINARY}" --query exists --output tsv)" == "false" ]]; then echo "false" && return fi done if [[ "${TEST_WINDOWS:-}" == "true" ]]; then for BINARY in "${WINDOWS_BINARIES[@]}"; do - if [[ "$(az storage blob exists --container-name "${JOB_NAME}" --name "${KUBE_GIT_VERSION}/bin/windows/amd64/${BINARY}.exe" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage blob exists --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${KUBE_GIT_VERSION}/bin/windows/amd64/${BINARY}.exe" --query exists --output tsv)" == "false" ]]; then echo "false" && return fi done diff --git a/templates/test/dev/cluster-template-custom-builds-machine-pool.yaml b/templates/test/dev/cluster-template-custom-builds-machine-pool.yaml index dea8f3a82a8..c72a102b7c9 100644 --- a/templates/test/dev/cluster-template-custom-builds-machine-pool.yaml +++ b/templates/test/dev/cluster-template-custom-builds-machine-pool.yaml @@ -105,7 +105,7 @@ spec: declare -a BINARIES=("kubeadm" "kubectl" "kubelet") for BINARY in "$${BINARIES[@]}"; do echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" - curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" + curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" done systemctl restart kubelet @@ -282,7 +282,7 @@ spec: declare -a BINARIES=("kubeadm" "kubectl" "kubelet") for BINARY in "$${BINARIES[@]}"; do echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" - curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" + curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" done systemctl restart kubelet @@ -397,7 +397,7 @@ spec: if($$KUBE_GIT_VERSION -ne "") { $$binaries=@("kubeadm", "kubectl", "kubelet", "kube-proxy") - $$ci_url="https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/windows/amd64" + $$ci_url="https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/windows/amd64" foreach ( $$binary in $$binaries ) { echo "downloading binary: $$ci_url/$$binary.exe" diff --git a/templates/test/dev/cluster-template-custom-builds.yaml b/templates/test/dev/cluster-template-custom-builds.yaml index a412aa50fcf..042c973f8dd 100644 --- a/templates/test/dev/cluster-template-custom-builds.yaml +++ b/templates/test/dev/cluster-template-custom-builds.yaml @@ -115,7 +115,7 @@ spec: az login --identity for BINARY in "$${BINARIES[@]}"; do echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" - az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" -f "/usr/bin/$${BINARY}" --auth-mode login + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" -f "/usr/bin/$${BINARY}" --auth-mode login done systemctl restart kubelet @@ -297,7 +297,7 @@ spec: az login --identity for BINARY in "$${BINARIES[@]}"; do echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" - az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" -f "/usr/bin/$${BINARY}" --auth-mode login + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" -f "/usr/bin/$${BINARY}" --auth-mode login done systemctl restart kubelet @@ -451,7 +451,7 @@ spec: $env:PATH +=";C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\wbin" # Install Binaries $$binaries=@("kubeadm", "kubectl", "kubelet", "kube-proxy") - $$ci_url="https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/windows/amd64" + $$ci_url="https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/windows/amd64" echo "running az login via identity" az login --identity foreach ( $$binary in $$binaries ) diff --git a/templates/test/dev/custom-builds-machine-pool/patches/custom-builds.yaml b/templates/test/dev/custom-builds-machine-pool/patches/custom-builds.yaml index 9bd7d8b8e14..dd7b0cc1536 100644 --- a/templates/test/dev/custom-builds-machine-pool/patches/custom-builds.yaml +++ b/templates/test/dev/custom-builds-machine-pool/patches/custom-builds.yaml @@ -20,7 +20,7 @@ spec: declare -a BINARIES=("kubeadm" "kubectl" "kubelet") for BINARY in "$${BINARIES[@]}"; do echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" - curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" + curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" done systemctl restart kubelet diff --git a/templates/test/dev/custom-builds-machine-pool/patches/kubeadm-bootstrap-machine-pool-windows-k8s-pr-binaries.yaml b/templates/test/dev/custom-builds-machine-pool/patches/kubeadm-bootstrap-machine-pool-windows-k8s-pr-binaries.yaml index b10ac07c400..e52570e7103 100644 --- a/templates/test/dev/custom-builds-machine-pool/patches/kubeadm-bootstrap-machine-pool-windows-k8s-pr-binaries.yaml +++ b/templates/test/dev/custom-builds-machine-pool/patches/kubeadm-bootstrap-machine-pool-windows-k8s-pr-binaries.yaml @@ -10,7 +10,7 @@ if($$KUBE_GIT_VERSION -ne "") { $$binaries=@("kubeadm", "kubectl", "kubelet", "kube-proxy") - $$ci_url="https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/windows/amd64" + $$ci_url="https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/windows/amd64" foreach ( $$binary in $$binaries ) { echo "downloading binary: $$ci_url/$$binary.exe" diff --git a/templates/test/dev/custom-builds/patches/kubeadm-bootstrap-windows-k8s-pr-binaries.yaml b/templates/test/dev/custom-builds/patches/kubeadm-bootstrap-windows-k8s-pr-binaries.yaml index fab1f9e6723..731a5ab3a45 100644 --- a/templates/test/dev/custom-builds/patches/kubeadm-bootstrap-windows-k8s-pr-binaries.yaml +++ b/templates/test/dev/custom-builds/patches/kubeadm-bootstrap-windows-k8s-pr-binaries.yaml @@ -27,7 +27,7 @@ $env:PATH +=";C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\wbin" # Install Binaries $$binaries=@("kubeadm", "kubectl", "kubelet", "kube-proxy") - $$ci_url="https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/windows/amd64" + $$ci_url="https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/windows/amd64" echo "running az login via identity" az login --identity foreach ( $$binary in $$binaries ) diff --git a/templates/test/dev/custom-builds/patches/kubeadm-bootstrap.yaml b/templates/test/dev/custom-builds/patches/kubeadm-bootstrap.yaml index 423b18a4b9a..5f66b63b3fd 100644 --- a/templates/test/dev/custom-builds/patches/kubeadm-bootstrap.yaml +++ b/templates/test/dev/custom-builds/patches/kubeadm-bootstrap.yaml @@ -13,7 +13,7 @@ az login --identity for BINARY in "$${BINARIES[@]}"; do echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" - az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" -f "/usr/bin/$${BINARY}" --auth-mode login + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" -f "/usr/bin/$${BINARY}" --auth-mode login done systemctl restart kubelet diff --git a/templates/test/dev/custom-builds/patches/kubeadm-controlplane-bootstrap.yaml b/templates/test/dev/custom-builds/patches/kubeadm-controlplane-bootstrap.yaml index 16d55e3574d..d876f4446ee 100644 --- a/templates/test/dev/custom-builds/patches/kubeadm-controlplane-bootstrap.yaml +++ b/templates/test/dev/custom-builds/patches/kubeadm-controlplane-bootstrap.yaml @@ -13,7 +13,7 @@ az login --identity for BINARY in "$${BINARIES[@]}"; do echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" - az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" -f "/usr/bin/$${BINARY}" --auth-mode login + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" -f "/usr/bin/$${BINARY}" --auth-mode login done systemctl restart kubelet diff --git a/templates/test/dev/patches/control-plane-custom-builds.yaml b/templates/test/dev/patches/control-plane-custom-builds.yaml index e1daef7f875..e7ab9895227 100644 --- a/templates/test/dev/patches/control-plane-custom-builds.yaml +++ b/templates/test/dev/patches/control-plane-custom-builds.yaml @@ -28,7 +28,7 @@ spec: declare -a BINARIES=("kubeadm" "kubectl" "kubelet") for BINARY in "$${BINARIES[@]}"; do echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" - curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" + curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" done systemctl restart kubelet From c13f66bd15e52839b38c579eab899c59397ec1d8 Mon Sep 17 00:00:00 2001 From: Jon Huhn Date: Thu, 2 Nov 2023 22:07:51 -0500 Subject: [PATCH 04/10] Revert "workaround cloud provider chart federated token bug" This reverts commit d6420d060ef9769c0a8931742e9e02f1cb891bb2. --- scripts/ci-entrypoint.sh | 1 - test/e2e/cloud-provider-azure.go | 5 +---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/scripts/ci-entrypoint.sh b/scripts/ci-entrypoint.sh index 520e875410b..620061c1884 100755 --- a/scripts/ci-entrypoint.sh +++ b/scripts/ci-entrypoint.sh @@ -209,7 +209,6 @@ install_cloud_provider_azure() { --set cloudControllerManager.cloudConfig="${CLOUD_CONFIG}" \ --set cloudControllerManager.cloudConfigSecretName="${CONFIG_SECRET_NAME}" \ --set cloudControllerManager.logVerbosity="${CCM_LOG_VERBOSITY}" \ - --set-string cloudControllerManager.federatedTokenPath= \ --set-string cloudControllerManager.clusterCIDR="${CCM_CLUSTER_CIDR}" "${CCM_IMG_ARGS[@]}" || return 1 } diff --git a/test/e2e/cloud-provider-azure.go b/test/e2e/cloud-provider-azure.go index 834b38a666d..eee1ddc4b85 100644 --- a/test/e2e/cloud-provider-azure.go +++ b/test/e2e/cloud-provider-azure.go @@ -48,10 +48,7 @@ func InstallCNIAndCloudProviderAzureHelmChart(ctx context.Context, input cluster fmt.Sprintf("infra.clusterName=%s", input.ClusterName), "cloudControllerManager.logVerbosity=4", }, - StringValues: []string{ - fmt.Sprintf("cloudControllerManager.clusterCIDR=%s", strings.Join(cidrBlocks, `\,`)), - "cloudControllerManager.federatedTokenPath=", - }, + StringValues: []string{fmt.Sprintf("cloudControllerManager.clusterCIDR=%s", strings.Join(cidrBlocks, `\,`))}, } // If testing a CI version of Kubernetes, use CCM and CNM images built from source. if useCIArtifacts || usePRArtifacts { From cbad9563cc3d065e2ef42194adca7242a4b7ab24 Mon Sep 17 00:00:00 2001 From: Sujay Date: Sun, 5 Nov 2023 00:09:03 +0530 Subject: [PATCH 05/10] const for AzureCluster Signed-off-by: Sujay --- api/v1beta1/azurecluster_validation.go | 2 +- api/v1beta1/azurecluster_webhook.go | 3 ++- api/v1beta1/azuremachine_default_test.go | 2 +- api/v1beta1/azuremachine_webhook_test.go | 2 +- api/v1beta1/consts.go | 13 +++++++++++++ azure/scope/cluster.go | 2 +- controllers/asosecret_controller.go | 4 ++-- controllers/azurecluster_controller.go | 4 ++-- controllers/azureidentity_controller.go | 4 ++-- controllers/azurejson_machine_controller.go | 2 +- controllers/azurejson_machine_controller_test.go | 2 +- controllers/azurejson_machinepool_controller.go | 2 +- .../azurejson_machinepool_controller_test.go | 2 +- controllers/azurejson_machinetemplate_controller.go | 2 +- .../azurejson_machinetemplate_controller_test.go | 2 +- controllers/azuremachine_controller_test.go | 2 +- exp/api/v1beta1/azuremachinepool_webhook_test.go | 2 +- util/tele/span_logger.go | 2 +- 18 files changed, 34 insertions(+), 20 deletions(-) diff --git a/api/v1beta1/azurecluster_validation.go b/api/v1beta1/azurecluster_validation.go index e70ca0ff95f..c92bab794a3 100644 --- a/api/v1beta1/azurecluster_validation.go +++ b/api/v1beta1/azurecluster_validation.go @@ -79,7 +79,7 @@ func (c *AzureCluster) validateCluster(old *AzureCluster) (admission.Warnings, e } return nil, apierrors.NewInvalid( - schema.GroupKind{Group: "infrastructure.cluster.x-k8s.io", Kind: "AzureCluster"}, + schema.GroupKind{Group: "infrastructure.cluster.x-k8s.io", Kind: AzureClusterKind}, c.Name, allErrs) } diff --git a/api/v1beta1/azurecluster_webhook.go b/api/v1beta1/azurecluster_webhook.go index 8c25f4bf371..f4b102362b4 100644 --- a/api/v1beta1/azurecluster_webhook.go +++ b/api/v1beta1/azurecluster_webhook.go @@ -136,7 +136,8 @@ func (c *AzureCluster) ValidateUpdate(oldRaw runtime.Object) (admission.Warnings return c.validateCluster(old) } - return nil, apierrors.NewInvalid(GroupVersion.WithKind("AzureCluster").GroupKind(), c.Name, allErrs) + // replace with constant + return nil, apierrors.NewInvalid(GroupVersion.WithKind(AzureClusterKind).GroupKind(), c.Name, allErrs) } // validateSubnetUpdate validates a ClusterSpec.NetworkSpec.Subnets for immutability. diff --git a/api/v1beta1/azuremachine_default_test.go b/api/v1beta1/azuremachine_default_test.go index 7db6d2c5ba1..37f60be2864 100644 --- a/api/v1beta1/azuremachine_default_test.go +++ b/api/v1beta1/azuremachine_default_test.go @@ -567,7 +567,7 @@ func (m mockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Ob case *clusterv1.Cluster: obj.Spec = clusterv1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ - Kind: "AzureCluster", + Kind: AzureClusterKind, Name: "test-cluster", Namespace: "default", }, diff --git a/api/v1beta1/azuremachine_webhook_test.go b/api/v1beta1/azuremachine_webhook_test.go index 4f02f370de4..1f17e3bd355 100644 --- a/api/v1beta1/azuremachine_webhook_test.go +++ b/api/v1beta1/azuremachine_webhook_test.go @@ -832,7 +832,7 @@ func (m mockDefaultClient) Get(ctx context.Context, key client.ObjectKey, obj cl obj.Spec.SubscriptionID = m.SubscriptionID case *clusterv1.Cluster: obj.Spec.InfrastructureRef = &corev1.ObjectReference{ - Kind: "AzureCluster", + Kind: AzureClusterKind, Name: "test-cluster", } default: diff --git a/api/v1beta1/consts.go b/api/v1beta1/consts.go index 419cd3d8c63..63ad064a51b 100644 --- a/api/v1beta1/consts.go +++ b/api/v1beta1/consts.go @@ -166,3 +166,16 @@ const ( // value for the label is the CAPI Cluster Name. OwnedByClusterLabelKey = NameAzureProviderPrefix + string(ResourceLifecycleOwned) ) + +const ( + // AzureClusterKind is kind value for the AzureCluster kind name. + AzureClusterKind = "AzureCluster" + // AzureMachinePoolKind is kind value for the AzureMachinePool kind name. + AzureMachinePoolKind = "AzureMachinePool" + // AzureManagedClusterKind is kind value for the AzureManagedCluster kind name. + AzureManagedClusterKind = "AzureManagedCluster" + // AzureManagedControlPlaneKind is kind value for the AzureManagedControlPlane kind name. + AzureManagedControlPlaneKind = "AzureManagedControlPlane" + // AzureClusterIdentityKind is kind value for the AzureClusterIdentity kind name. + AzureClusterIdentityKind = "AzureClusterIdentity" +) diff --git a/azure/scope/cluster.go b/azure/scope/cluster.go index e6602d6fd2e..84c501b3293 100644 --- a/azure/scope/cluster.go +++ b/azure/scope/cluster.go @@ -425,7 +425,7 @@ func (s *ClusterScope) GroupSpecs() []azure.ASOResourceSpecGetter[*asoresourcesv Location: s.Location(), ClusterName: s.ClusterName(), AdditionalTags: s.AdditionalTags(), - Owner: *metav1.NewControllerRef(s.AzureCluster, infrav1.GroupVersion.WithKind("AzureCluster")), + Owner: *metav1.NewControllerRef(s.AzureCluster, infrav1.GroupVersion.WithKind(infrav1.AzureClusterKind)), }, } } diff --git a/controllers/asosecret_controller.go b/controllers/asosecret_controller.go index e0db61de581..2765633bfa7 100644 --- a/controllers/asosecret_controller.go +++ b/controllers/asosecret_controller.go @@ -94,7 +94,7 @@ func (asos *ASOSecretReconciler) SetupWithManager(ctx context.Context, mgr ctrl. // Add a watch on clusterv1.Cluster object for unpause notifications. if err = c.Watch( source.Kind(mgr.GetCache(), &clusterv1.Cluster{}), - handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("AzureCluster"), mgr.GetClient(), &infrav1.AzureCluster{})), + handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind(infrav1.AzureClusterKind), mgr.GetClient(), &infrav1.AzureCluster{})), predicates.ClusterUnpaused(log), predicates.ResourceNotPausedAndHasFilterLabel(log, asos.WatchFilterValue), ); err != nil { @@ -112,7 +112,7 @@ func (asos *ASOSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.ASOSecret.Reconcile", tele.KVP("namespace", req.Namespace), tele.KVP("name", req.Name), - tele.KVP("kind", "AzureCluster"), + tele.KVP("kind", infrav1.AzureClusterKind), ) defer done() diff --git a/controllers/azurecluster_controller.go b/controllers/azurecluster_controller.go index a4c894a6127..02fb3e39f36 100644 --- a/controllers/azurecluster_controller.go +++ b/controllers/azurecluster_controller.go @@ -95,7 +95,7 @@ func (acr *AzureClusterReconciler) SetupWithManager(ctx context.Context, mgr ctr // Add a watch on clusterv1.Cluster object for pause/unpause notifications. if err = c.Watch( source.Kind(mgr.GetCache(), &clusterv1.Cluster{}), - handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("AzureCluster"), mgr.GetClient(), &infrav1.AzureCluster{})), + handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind(infrav1.AzureClusterKind), mgr.GetClient(), &infrav1.AzureCluster{})), ClusterUpdatePauseChange(log), predicates.ResourceHasFilterLabel(log, acr.WatchFilterValue), ); err != nil { @@ -126,7 +126,7 @@ func (acr *AzureClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque "controllers.AzureClusterReconciler.Reconcile", tele.KVP("namespace", req.Namespace), tele.KVP("name", req.Name), - tele.KVP("kind", "AzureCluster"), + tele.KVP("kind", infrav1.AzureClusterKind), ) defer done() diff --git a/controllers/azureidentity_controller.go b/controllers/azureidentity_controller.go index b813c5ed22f..b1175b820ab 100644 --- a/controllers/azureidentity_controller.go +++ b/controllers/azureidentity_controller.go @@ -84,7 +84,7 @@ func (r *AzureIdentityReconciler) SetupWithManager(ctx context.Context, mgr ctrl // Add a watch on clusterv1.Cluster object for unpause notifications. if err = c.Watch( source.Kind(mgr.GetCache(), &clusterv1.Cluster{}), - handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("AzureCluster"), mgr.GetClient(), &infrav1.AzureCluster{})), + handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind(infrav1.AzureClusterKind), mgr.GetClient(), &infrav1.AzureCluster{})), predicates.ClusterUnpaused(log), predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue), ); err != nil { @@ -107,7 +107,7 @@ func (r *AzureIdentityReconciler) Reconcile(ctx context.Context, req ctrl.Reques ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureIdentityReconciler.Reconcile", tele.KVP("namespace", req.Namespace), tele.KVP("name", req.Name), - tele.KVP("kind", "AzureCluster"), + tele.KVP("kind", infrav1.AzureClusterKind), ) defer done() diff --git a/controllers/azurejson_machine_controller.go b/controllers/azurejson_machine_controller.go index 77c7b2cd1cd..bbe67fc4a30 100644 --- a/controllers/azurejson_machine_controller.go +++ b/controllers/azurejson_machine_controller.go @@ -169,7 +169,7 @@ func (r *AzureJSONMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, nil } - _, kind := infrav1.GroupVersion.WithKind("AzureCluster").ToAPIVersionAndKind() + _, kind := infrav1.GroupVersion.WithKind(infrav1.AzureClusterKind).ToAPIVersionAndKind() // only look at azure clusters if cluster.Spec.InfrastructureRef == nil { diff --git a/controllers/azurejson_machine_controller_test.go b/controllers/azurejson_machine_controller_test.go index 27754196f05..e6920b84ce3 100644 --- a/controllers/azurejson_machine_controller_test.go +++ b/controllers/azurejson_machine_controller_test.go @@ -99,7 +99,7 @@ func TestAzureJSONMachineReconciler(t *testing.T) { Spec: clusterv1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", - Kind: "AzureCluster", + Kind: infrav1.AzureClusterKind, Name: "my-azure-cluster", }, }, diff --git a/controllers/azurejson_machinepool_controller.go b/controllers/azurejson_machinepool_controller.go index 7019785e405..166b53a2238 100644 --- a/controllers/azurejson_machinepool_controller.go +++ b/controllers/azurejson_machinepool_controller.go @@ -138,7 +138,7 @@ func (r *AzureJSONMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl log = log.WithValues("cluster", cluster.Name) - _, kind := infrav1.GroupVersion.WithKind("AzureCluster").ToAPIVersionAndKind() + _, kind := infrav1.GroupVersion.WithKind(infrav1.AzureClusterKind).ToAPIVersionAndKind() // only look at azure clusters if cluster.Spec.InfrastructureRef == nil { diff --git a/controllers/azurejson_machinepool_controller_test.go b/controllers/azurejson_machinepool_controller_test.go index 59dcbd71479..f9a3a16bab1 100644 --- a/controllers/azurejson_machinepool_controller_test.go +++ b/controllers/azurejson_machinepool_controller_test.go @@ -49,7 +49,7 @@ func TestAzureJSONPoolReconciler(t *testing.T) { Spec: clusterv1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", - Kind: "AzureCluster", + Kind: infrav1.AzureClusterKind, Name: "my-azure-cluster", }, }, diff --git a/controllers/azurejson_machinetemplate_controller.go b/controllers/azurejson_machinetemplate_controller.go index c9f3675000d..c7a408d5e59 100644 --- a/controllers/azurejson_machinetemplate_controller.go +++ b/controllers/azurejson_machinetemplate_controller.go @@ -136,7 +136,7 @@ func (r *AzureJSONTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Re log.Info("infra ref is nil") return ctrl.Result{}, nil } - if cluster.Spec.InfrastructureRef.Kind != "AzureCluster" { + if cluster.Spec.InfrastructureRef.Kind != infrav1.AzureClusterKind { log.WithValues("kind", cluster.Spec.InfrastructureRef.Kind).Info("infra ref was not an AzureCluster") return ctrl.Result{}, nil } diff --git a/controllers/azurejson_machinetemplate_controller_test.go b/controllers/azurejson_machinetemplate_controller_test.go index 80af7a44387..ea53e7f6b46 100644 --- a/controllers/azurejson_machinetemplate_controller_test.go +++ b/controllers/azurejson_machinetemplate_controller_test.go @@ -47,7 +47,7 @@ func TestAzureJSONTemplateReconciler(t *testing.T) { Spec: clusterv1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", - Kind: "AzureCluster", + Kind: infrav1.AzureClusterKind, Name: "my-azure-cluster", }, }, diff --git a/controllers/azuremachine_controller_test.go b/controllers/azuremachine_controller_test.go index dc9f07d4900..65aa4355ec4 100644 --- a/controllers/azuremachine_controller_test.go +++ b/controllers/azuremachine_controller_test.go @@ -517,7 +517,7 @@ func getFakeCluster(changes ...func(*clusterv1.Cluster)) *clusterv1.Cluster { Spec: clusterv1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", - Kind: "AzureCluster", + Kind: infrav1.AzureClusterKind, Name: "my-azure-cluster", }, }, diff --git a/exp/api/v1beta1/azuremachinepool_webhook_test.go b/exp/api/v1beta1/azuremachinepool_webhook_test.go index e15c75ba7b4..afbd8b1e5a6 100644 --- a/exp/api/v1beta1/azuremachinepool_webhook_test.go +++ b/exp/api/v1beta1/azuremachinepool_webhook_test.go @@ -271,7 +271,7 @@ func (m mockDefaultClient) Get(ctx context.Context, key client.ObjectKey, obj cl obj.Spec.SubscriptionID = m.SubscriptionID case *clusterv1.Cluster: obj.Spec.InfrastructureRef = &corev1.ObjectReference{ - Kind: "AzureCluster", + Kind: infrav1.AzureClusterKind, Name: "test-cluster", } default: diff --git a/util/tele/span_logger.go b/util/tele/span_logger.go index be84793ae23..b65af5abffc 100644 --- a/util/tele/span_logger.go +++ b/util/tele/span_logger.go @@ -128,7 +128,7 @@ func (c Config) teleKeyValues() []attribute.KeyValue { // built-in functions (like KVP) that create them. type Option func(*Config) -// KVP returns a new Option function that adds a the given +// KVP returns a new Option function that adds the given // key-value pair. func KVP(key, value string) Option { return func(cfg *Config) { From d4f0d65e4aae170f439bb7662ddaafa20a31163e Mon Sep 17 00:00:00 2001 From: Sujay Date: Sun, 5 Nov 2023 00:15:56 +0530 Subject: [PATCH 06/10] const for AzureMachinePoolKind Signed-off-by: Sujay --- azure/scope/machinepool.go | 2 +- azure/scope/machinepool_test.go | 2 +- controllers/azurejson_machinepool_controller.go | 4 ++-- controllers/helpers.go | 2 +- controllers/helpers_test.go | 2 +- exp/controllers/azuremachinepool_controller.go | 4 ++-- exp/controllers/azuremachinepool_controller_unit_test.go | 2 +- exp/controllers/azuremachinepoolmachine_controller_test.go | 2 +- exp/controllers/helpers_test.go | 4 ++-- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/azure/scope/machinepool.go b/azure/scope/machinepool.go index 0a2a0929f16..1d29be38701 100644 --- a/azure/scope/machinepool.go +++ b/azure/scope/machinepool.go @@ -471,7 +471,7 @@ func (m *MachinePoolScope) createMachine(ctx context.Context, machine azure.VMSS OwnerReferences: []metav1.OwnerReference{ { APIVersion: infrav1exp.GroupVersion.String(), - Kind: "AzureMachinePool", + Kind: infrav1.AzureMachinePoolKind, Name: m.AzureMachinePool.Name, BlockOwnerDeletion: ptr.To(true), UID: m.AzureMachinePool.UID, diff --git a/azure/scope/machinepool_test.go b/azure/scope/machinepool_test.go index 86e33696350..6e6a44eaad2 100644 --- a/azure/scope/machinepool_test.go +++ b/azure/scope/machinepool_test.go @@ -1203,7 +1203,7 @@ func getReadyAzureMachinePoolMachines(count int32) []infrav1exp.AzureMachinePool OwnerReferences: []metav1.OwnerReference{ { Name: "amp", - Kind: "AzureMachinePool", + Kind: infrav1.AzureMachinePoolKind, APIVersion: infrav1exp.GroupVersion.String(), }, }, diff --git a/controllers/azurejson_machinepool_controller.go b/controllers/azurejson_machinepool_controller.go index 166b53a2238..9ab962a34b0 100644 --- a/controllers/azurejson_machinepool_controller.go +++ b/controllers/azurejson_machinepool_controller.go @@ -100,7 +100,7 @@ func (r *AzureJSONMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl "controllers.AzureJSONMachinePoolReconciler.Reconcile", tele.KVP("namespace", req.Namespace), tele.KVP("name", req.Name), - tele.KVP("kind", "AzureMachinePool"), + tele.KVP("kind", infrav1.AzureMachinePoolKind), ) defer done() @@ -186,7 +186,7 @@ func (r *AzureJSONMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl } } - apiVersion, kind := infrav1.GroupVersion.WithKind("AzureMachinePool").ToAPIVersionAndKind() + apiVersion, kind := infrav1.GroupVersion.WithKind(infrav1.AzureMachinePoolKind).ToAPIVersionAndKind() owner := metav1.OwnerReference{ APIVersion: apiVersion, Kind: kind, diff --git a/controllers/helpers.go b/controllers/helpers.go index f0399a84091..03738263c8d 100644 --- a/controllers/helpers.go +++ b/controllers/helpers.go @@ -555,7 +555,7 @@ func GetOwnerAzureMachinePool(ctx context.Context, c client.Client, obj metav1.O defer done() for _, ref := range obj.OwnerReferences { - if ref.Kind != "AzureMachinePool" { + if ref.Kind != infrav1.AzureMachinePoolKind { continue } diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index 0f238940c34..7280b3487b2 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -221,7 +221,7 @@ func TestReconcileAzureSecret(t *testing.T) { ownerName: "azureMachineName", }, "azuremachinepool should reconcile secret successfully": { - kind: "AzureMachinePool", + kind: infrav1.AzureMachinePoolKind, apiVersion: "infrastructure.cluster.x-k8s.io/v1beta1", ownerName: "azureMachinePoolName", }, diff --git a/exp/controllers/azuremachinepool_controller.go b/exp/controllers/azuremachinepool_controller.go index e565fb1c305..c152cfa88ee 100644 --- a/exp/controllers/azuremachinepool_controller.go +++ b/exp/controllers/azuremachinepool_controller.go @@ -110,7 +110,7 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg // watch for changes in CAPI MachinePool resources Watches( &expv1.MachinePool{}, - handler.EnqueueRequestsFromMapFunc(MachinePoolToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind("AzureMachinePool"), log)), + handler.EnqueueRequestsFromMapFunc(MachinePoolToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind(infrav1.AzureMachinePoolKind), log)), ). // watch for changes in AzureCluster resources Watches( @@ -172,7 +172,7 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl. "controllers.AzureMachinePoolReconciler.Reconcile", tele.KVP("namespace", req.Namespace), tele.KVP("name", req.Name), - tele.KVP("kind", "AzureMachinePool"), + tele.KVP("kind", infrav1.AzureMachinePoolKind), ) defer done() ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(ampr.ReconcileTimeout)) diff --git a/exp/controllers/azuremachinepool_controller_unit_test.go b/exp/controllers/azuremachinepool_controller_unit_test.go index 222d90725c6..8dc410e5b9b 100644 --- a/exp/controllers/azuremachinepool_controller_unit_test.go +++ b/exp/controllers/azuremachinepool_controller_unit_test.go @@ -117,7 +117,7 @@ func newAzureMachinePool(clusterName, poolName string) *infrav1exp.AzureMachineP func newMachinePoolWithInfrastructureRef(clusterName, poolName string) *expv1.MachinePool { m := newMachinePool(clusterName, poolName) m.Spec.Template.Spec.InfrastructureRef = corev1.ObjectReference{ - Kind: "AzureMachinePool", + Kind: infrav1.AzureMachinePoolKind, Namespace: m.Namespace, Name: "azure" + poolName, APIVersion: infrav1exp.GroupVersion.String(), diff --git a/exp/controllers/azuremachinepoolmachine_controller_test.go b/exp/controllers/azuremachinepoolmachine_controller_test.go index 1bd1dabca30..4c4eef2f562 100644 --- a/exp/controllers/azuremachinepoolmachine_controller_test.go +++ b/exp/controllers/azuremachinepoolmachine_controller_test.go @@ -178,7 +178,7 @@ func getAReadyMachinePoolMachineCluster() (*clusterv1.Cluster, *infrav1.AzureClu OwnerReferences: []metav1.OwnerReference{ { Name: amp.Name, - Kind: "AzureMachinePool", + Kind: infrav1.AzureMachinePoolKind, APIVersion: infrav1exp.GroupVersion.String(), }, }, diff --git a/exp/controllers/helpers_test.go b/exp/controllers/helpers_test.go index 5cf50dbf7c9..e448dd0e076 100644 --- a/exp/controllers/helpers_test.go +++ b/exp/controllers/helpers_test.go @@ -107,7 +107,7 @@ func Test_MachinePoolToInfrastructureMapFunc(t *testing.T) { return newMachinePool("azureCluster", "machinePool") }, Setup: func(logMock *mock_log.MockLogSink) { - ampGK := infrav1exp.GroupVersion.WithKind("AzureMachinePool").GroupKind() + ampGK := infrav1exp.GroupVersion.WithKind(infrav1.AzureMachinePoolKind).GroupKind() logMock.EXPECT().Init(logr.RuntimeInfo{CallDepth: 1}) logMock.EXPECT().Enabled(4).Return(true) logMock.EXPECT().Info(4, "gk does not match", "gk", ampGK, "infraGK", gomock.Any()) @@ -144,7 +144,7 @@ func Test_MachinePoolToInfrastructureMapFunc(t *testing.T) { if c.Setup != nil { c.Setup(sink) } - f := MachinePoolToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind("AzureMachinePool"), logr.New(sink)) + f := MachinePoolToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind(infrav1.AzureMachinePoolKind), logr.New(sink)) reqs := f(context.TODO(), c.MapObjectFactory(g)) c.Expect(g, reqs) }) From e743f3087bd0483733e1a7d289e30c56ffd5701f Mon Sep 17 00:00:00 2001 From: Sujay Date: Sun, 5 Nov 2023 00:19:48 +0530 Subject: [PATCH 07/10] const for AzureManagedCluster Signed-off-by: Sujay --- api/v1beta1/azuremanagedcluster_webhook.go | 2 +- controllers/azuremanagedcluster_controller.go | 4 ++-- controllers/azuremanagedmachinepool_controller_test.go | 2 +- controllers/helpers_test.go | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/v1beta1/azuremanagedcluster_webhook.go b/api/v1beta1/azuremanagedcluster_webhook.go index bad6c24d4cd..0f360200254 100644 --- a/api/v1beta1/azuremanagedcluster_webhook.go +++ b/api/v1beta1/azuremanagedcluster_webhook.go @@ -72,7 +72,7 @@ func (r *AzureManagedCluster) ValidateUpdate(oldRaw runtime.Object) (admission.W } if len(allErrs) != 0 { - return nil, apierrors.NewInvalid(GroupVersion.WithKind("AzureManagedCluster").GroupKind(), r.Name, allErrs) + return nil, apierrors.NewInvalid(GroupVersion.WithKind(AzureManagedClusterKind).GroupKind(), r.Name, allErrs) } return nil, nil diff --git a/controllers/azuremanagedcluster_controller.go b/controllers/azuremanagedcluster_controller.go index f3c2a5c304a..25cd3a51bc9 100644 --- a/controllers/azuremanagedcluster_controller.go +++ b/controllers/azuremanagedcluster_controller.go @@ -85,7 +85,7 @@ func (amcr *AzureManagedClusterReconciler) SetupWithManager(ctx context.Context, // Add a watch on clusterv1.Cluster object for unpause notifications. if err = c.Watch( source.Kind(mgr.GetCache(), &clusterv1.Cluster{}), - handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("AzureManagedCluster"), mgr.GetClient(), &infrav1.AzureManagedCluster{})), + handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind(infrav1.AzureManagedClusterKind), mgr.GetClient(), &infrav1.AzureManagedCluster{})), predicates.ClusterUnpaused(log), predicates.ResourceNotPausedAndHasFilterLabel(log, amcr.WatchFilterValue), ); err != nil { @@ -109,7 +109,7 @@ func (amcr *AzureManagedClusterReconciler) Reconcile(ctx context.Context, req ct "controllers.AzureManagedClusterReconciler.Reconcile", tele.KVP("namespace", req.Namespace), tele.KVP("name", req.Name), - tele.KVP("kind", "AzureManagedCluster"), + tele.KVP("kind", infrav1.AzureManagedClusterKind), ) defer done() diff --git a/controllers/azuremanagedmachinepool_controller_test.go b/controllers/azuremanagedmachinepool_controller_test.go index 7ea250db317..81f60907d6f 100644 --- a/controllers/azuremanagedmachinepool_controller_test.go +++ b/controllers/azuremanagedmachinepool_controller_test.go @@ -247,7 +247,7 @@ func newReadyAzureManagedMachinePoolCluster() (*clusterv1.Cluster, *infrav1.Azur }, InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", - Kind: "AzureManagedCluster", + Kind: infrav1.AzureManagedClusterKind, Name: azManagedCluster.Name, Namespace: azManagedCluster.Namespace, }, diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index 7280b3487b2..1a77acf1b89 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -1119,7 +1119,7 @@ func TestAzureManagedControlPlaneToAzureManagedClusterMapper(t *testing.T) { } cluster.Spec.InfrastructureRef = &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.String(), - Kind: "AzureManagedCluster", + Kind: infrav1.AzureManagedClusterKind, Name: azManagedCluster.Name, Namespace: azManagedCluster.Namespace, } From f2fa7b9a5ac90488283861ec07feac95d182acb4 Mon Sep 17 00:00:00 2001 From: Sujay Date: Sun, 5 Nov 2023 00:25:50 +0530 Subject: [PATCH 08/10] const for AzureManagedControlPlane Signed-off-by: Sujay --- api/v1beta1/azuremanagedcontrolplane_webhook.go | 2 +- azure/scope/managedcontrolplane.go | 4 ++-- controllers/azuremanagedcontrolplane_controller.go | 6 +++--- .../azuremanagedcontrolplane_controller_test.go | 2 +- .../azuremanagedmachinepool_controller_test.go | 2 +- controllers/helpers_test.go | 14 +++++++------- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/api/v1beta1/azuremanagedcontrolplane_webhook.go b/api/v1beta1/azuremanagedcontrolplane_webhook.go index ae145b4b0c0..e59b9775e3a 100644 --- a/api/v1beta1/azuremanagedcontrolplane_webhook.go +++ b/api/v1beta1/azuremanagedcontrolplane_webhook.go @@ -268,7 +268,7 @@ func (mw *azureManagedControlPlaneWebhook) ValidateUpdate(ctx context.Context, o return nil, m.Validate(mw.Client) } - return nil, apierrors.NewInvalid(GroupVersion.WithKind("AzureManagedControlPlane").GroupKind(), m.Name, allErrs) + return nil, apierrors.NewInvalid(GroupVersion.WithKind(AzureManagedControlPlaneKind).GroupKind(), m.Name, allErrs) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. diff --git a/azure/scope/managedcontrolplane.go b/azure/scope/managedcontrolplane.go index c52a4c5c9e9..38e858151ea 100644 --- a/azure/scope/managedcontrolplane.go +++ b/azure/scope/managedcontrolplane.go @@ -256,7 +256,7 @@ func (s *ManagedControlPlaneScope) GroupSpecs() []azure.ASOResourceSpecGetter[*a Location: s.Location(), ClusterName: s.ClusterName(), AdditionalTags: s.AdditionalTags(), - Owner: *metav1.NewControllerRef(s.ControlPlane, infrav1.GroupVersion.WithKind("AzureManagedControlPlane")), + Owner: *metav1.NewControllerRef(s.ControlPlane, infrav1.GroupVersion.WithKind(infrav1.AzureManagedControlPlaneKind)), }, } } @@ -655,7 +655,7 @@ func (s *ManagedControlPlaneScope) MakeEmptyKubeConfigSecret() corev1.Secret { Name: secret.Name(s.Cluster.Name, secret.Kubeconfig), Namespace: s.Cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(s.ControlPlane, infrav1.GroupVersion.WithKind("AzureManagedControlPlane")), + *metav1.NewControllerRef(s.ControlPlane, infrav1.GroupVersion.WithKind(infrav1.AzureManagedControlPlaneKind)), }, Labels: map[string]string{clusterv1.ClusterNameLabel: s.Cluster.Name}, }, diff --git a/controllers/azuremanagedcontrolplane_controller.go b/controllers/azuremanagedcontrolplane_controller.go index a161fa62d52..dd50ad8da7d 100644 --- a/controllers/azuremanagedcontrolplane_controller.go +++ b/controllers/azuremanagedcontrolplane_controller.go @@ -74,7 +74,7 @@ func (amcpr *AzureManagedControlPlaneReconciler) SetupWithManager(ctx context.Co } // map requests for machine pools corresponding to AzureManagedControlPlane's defaultPool back to the corresponding AzureManagedControlPlane. - azureManagedMachinePoolMapper := MachinePoolToAzureManagedControlPlaneMapFunc(ctx, amcpr.Client, infrav1.GroupVersion.WithKind("AzureManagedControlPlane"), log) + azureManagedMachinePoolMapper := MachinePoolToAzureManagedControlPlaneMapFunc(ctx, amcpr.Client, infrav1.GroupVersion.WithKind(infrav1.AzureManagedControlPlaneKind), log) c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options.Options). @@ -122,7 +122,7 @@ func (amcpr *AzureManagedControlPlaneReconciler) Reconcile(ctx context.Context, ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureManagedControlPlaneReconciler.Reconcile", tele.KVP("namespace", req.Namespace), tele.KVP("name", req.Name), - tele.KVP("kind", "AzureManagedControlPlane"), + tele.KVP("kind", infrav1.AzureManagedControlPlaneKind), ) defer done() @@ -333,7 +333,7 @@ func (amcpr *AzureManagedControlPlaneReconciler) ClusterToAzureManagedControlPla } controlPlaneRef := c.Spec.ControlPlaneRef - if controlPlaneRef != nil && controlPlaneRef.Kind == "AzureManagedControlPlane" { + if controlPlaneRef != nil && controlPlaneRef.Kind == infrav1.AzureManagedControlPlaneKind { return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}} } diff --git a/controllers/azuremanagedcontrolplane_controller_test.go b/controllers/azuremanagedcontrolplane_controller_test.go index 783fa3b17c8..7eac049831b 100644 --- a/controllers/azuremanagedcontrolplane_controller_test.go +++ b/controllers/azuremanagedcontrolplane_controller_test.go @@ -57,7 +57,7 @@ func TestClusterToAzureManagedControlPlane(t *testing.T) { { name: "ok", controlPlaneRef: &corev1.ObjectReference{ - Kind: "AzureManagedControlPlane", + Kind: infrav1.AzureManagedControlPlaneKind, Name: "name", Namespace: "namespace", }, diff --git a/controllers/azuremanagedmachinepool_controller_test.go b/controllers/azuremanagedmachinepool_controller_test.go index 81f60907d6f..ac0133010af 100644 --- a/controllers/azuremanagedmachinepool_controller_test.go +++ b/controllers/azuremanagedmachinepool_controller_test.go @@ -241,7 +241,7 @@ func newReadyAzureManagedMachinePoolCluster() (*clusterv1.Cluster, *infrav1.Azur Spec: clusterv1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", - Kind: "AzureManagedControlPlane", + Kind: infrav1.AzureManagedControlPlaneKind, Name: azManagedControlPlane.Name, Namespace: azManagedControlPlane.Namespace, }, diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index 1a77acf1b89..bbd377756ea 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -883,7 +883,7 @@ func TestAzureManagedControlPlaneToAzureManagedMachinePoolsMapper(t *testing.T) cluster := newCluster("my-cluster") cluster.Spec.ControlPlaneRef = &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.String(), - Kind: "AzureManagedControlPlane", + Kind: infrav1.AzureManagedControlPlaneKind, Name: cpName, Namespace: cluster.Namespace, } @@ -949,7 +949,7 @@ func TestMachinePoolToAzureManagedControlPlaneMapFuncSuccess(t *testing.T) { controlPlane := newAzureManagedControlPlane(cpName) cluster.Spec.ControlPlaneRef = &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.String(), - Kind: "AzureManagedControlPlane", + Kind: infrav1.AzureManagedControlPlaneKind, Name: cpName, Namespace: cluster.Namespace, } @@ -975,7 +975,7 @@ func TestMachinePoolToAzureManagedControlPlaneMapFuncSuccess(t *testing.T) { sink := mock_log.NewMockLogSink(gomock.NewController(t)) sink.EXPECT().Init(logr.RuntimeInfo{CallDepth: 1}) - mapper := MachinePoolToAzureManagedControlPlaneMapFunc(context.Background(), fakeClient, infrav1.GroupVersion.WithKind("AzureManagedControlPlane"), logr.New(sink)) + mapper := MachinePoolToAzureManagedControlPlaneMapFunc(context.Background(), fakeClient, infrav1.GroupVersion.WithKind(infrav1.AzureManagedControlPlaneKind), logr.New(sink)) // system pool should trigger requests := mapper(context.TODO(), newManagedMachinePoolInfraReference(clusterName, "my-mmp-0")) @@ -1000,7 +1000,7 @@ func TestMachinePoolToAzureManagedControlPlaneMapFuncFailure(t *testing.T) { cluster := newCluster(clusterName) cluster.Spec.ControlPlaneRef = &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.String(), - Kind: "AzureManagedControlPlane", + Kind: infrav1.AzureManagedControlPlaneKind, Name: cpName, Namespace: cluster.Namespace, } @@ -1019,7 +1019,7 @@ func TestMachinePoolToAzureManagedControlPlaneMapFuncFailure(t *testing.T) { sink.EXPECT().Error(gomock.Any(), "failed to fetch default pool reference") sink.EXPECT().Error(gomock.Any(), "failed to fetch default pool reference") // twice because we are testing two calls - mapper := MachinePoolToAzureManagedControlPlaneMapFunc(context.Background(), fakeClient, infrav1.GroupVersion.WithKind("AzureManagedControlPlane"), logr.New(sink)) + mapper := MachinePoolToAzureManagedControlPlaneMapFunc(context.Background(), fakeClient, infrav1.GroupVersion.WithKind(infrav1.AzureManagedControlPlaneKind), logr.New(sink)) // default pool should trigger if owned cluster could not be fetched requests := mapper(context.TODO(), newManagedMachinePoolInfraReference(clusterName, "my-mmp-0")) @@ -1051,7 +1051,7 @@ func TestAzureManagedClusterToAzureManagedControlPlaneMapper(t *testing.T) { cluster := newCluster("my-cluster") cluster.Spec.ControlPlaneRef = &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.String(), - Kind: "AzureManagedControlPlane", + Kind: infrav1.AzureManagedControlPlaneKind, Name: cpName, Namespace: cluster.Namespace, } @@ -1113,7 +1113,7 @@ func TestAzureManagedControlPlaneToAzureManagedClusterMapper(t *testing.T) { cluster.Spec.ControlPlaneRef = &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.String(), - Kind: "AzureManagedControlPlane", + Kind: infrav1.AzureManagedControlPlaneKind, Name: cpName, Namespace: cluster.Namespace, } From ec6e4c0385112d119b7b944b1c9d33d484bdf424 Mon Sep 17 00:00:00 2001 From: Sujay Date: Sun, 5 Nov 2023 00:30:04 +0530 Subject: [PATCH 09/10] const for AzureClusterIdentity Signed-off-by: Sujay --- api/v1beta1/azurecluster_default_test.go | 14 +++++++------- api/v1beta1/azurecluster_validation.go | 2 +- api/v1beta1/azurecluster_validation_test.go | 2 +- api/v1beta1/azureclusteridentity_validation.go | 2 +- api/v1beta1/azureclusteridentity_webhook.go | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/api/v1beta1/azurecluster_default_test.go b/api/v1beta1/azurecluster_default_test.go index d15072002ff..8084c54be0d 100644 --- a/api/v1beta1/azurecluster_default_test.go +++ b/api/v1beta1/azurecluster_default_test.go @@ -145,7 +145,7 @@ func TestVnetDefaults(t *testing.T) { }, AzureClusterClassSpec: AzureClusterClassSpec{ IdentityRef: &corev1.ObjectReference{ - Kind: "AzureClusterIdentity", + Kind: AzureClusterIdentityKind, }, }, }, @@ -161,7 +161,7 @@ func TestVnetDefaults(t *testing.T) { ResourceGroup: "cluster-test", AzureClusterClassSpec: AzureClusterClassSpec{ IdentityRef: &corev1.ObjectReference{ - Kind: "AzureClusterIdentity", + Kind: AzureClusterIdentityKind, }, }, }, @@ -183,7 +183,7 @@ func TestVnetDefaults(t *testing.T) { }, AzureClusterClassSpec: AzureClusterClassSpec{ IdentityRef: &corev1.ObjectReference{ - Kind: "AzureClusterIdentity", + Kind: AzureClusterIdentityKind, }, }, }, @@ -206,7 +206,7 @@ func TestVnetDefaults(t *testing.T) { }, AzureClusterClassSpec: AzureClusterClassSpec{ IdentityRef: &corev1.ObjectReference{ - Kind: "AzureClusterIdentity", + Kind: AzureClusterIdentityKind, }, }, }, @@ -228,7 +228,7 @@ func TestVnetDefaults(t *testing.T) { }, AzureClusterClassSpec: AzureClusterClassSpec{ IdentityRef: &corev1.ObjectReference{ - Kind: "AzureClusterIdentity", + Kind: AzureClusterIdentityKind, }, }, }, @@ -251,7 +251,7 @@ func TestVnetDefaults(t *testing.T) { }, AzureClusterClassSpec: AzureClusterClassSpec{ IdentityRef: &corev1.ObjectReference{ - Kind: "AzureClusterIdentity", + Kind: AzureClusterIdentityKind, }, }, }, @@ -273,7 +273,7 @@ func TestVnetDefaults(t *testing.T) { }, AzureClusterClassSpec: AzureClusterClassSpec{ IdentityRef: &corev1.ObjectReference{ - Kind: "AzureClusterIdentity", + Kind: AzureClusterIdentityKind, }, }, }, diff --git a/api/v1beta1/azurecluster_validation.go b/api/v1beta1/azurecluster_validation.go index c92bab794a3..93b91f90f36 100644 --- a/api/v1beta1/azurecluster_validation.go +++ b/api/v1beta1/azurecluster_validation.go @@ -147,7 +147,7 @@ func validateIdentityRef(identityRef *corev1.ObjectReference, fldPath *field.Pat if identityRef == nil { return field.Required(fldPath, "identityRef is required") } - if identityRef.Kind != "AzureClusterIdentity" { + if identityRef.Kind != AzureClusterIdentityKind { return field.NotSupported(fldPath.Child("name"), identityRef.Name, []string{"AzureClusterIdentity"}) } return nil diff --git a/api/v1beta1/azurecluster_validation_test.go b/api/v1beta1/azurecluster_validation_test.go index 94cef232bf6..29a1dd86e90 100644 --- a/api/v1beta1/azurecluster_validation_test.go +++ b/api/v1beta1/azurecluster_validation_test.go @@ -1312,7 +1312,7 @@ func createValidCluster() *AzureCluster { NetworkSpec: createValidNetworkSpec(), AzureClusterClassSpec: AzureClusterClassSpec{ IdentityRef: &corev1.ObjectReference{ - Kind: "AzureClusterIdentity", + Kind: AzureClusterIdentityKind, }, }, }, diff --git a/api/v1beta1/azureclusteridentity_validation.go b/api/v1beta1/azureclusteridentity_validation.go index 508be3ae852..ed4b2085db9 100644 --- a/api/v1beta1/azureclusteridentity_validation.go +++ b/api/v1beta1/azureclusteridentity_validation.go @@ -32,5 +32,5 @@ func (c *AzureClusterIdentity) validateClusterIdentity() (admission.Warnings, er if len(allErrs) == 0 { return nil, nil } - return nil, apierrors.NewInvalid(GroupVersion.WithKind("AzureClusterIdentity").GroupKind(), c.Name, allErrs) + return nil, apierrors.NewInvalid(GroupVersion.WithKind(AzureClusterIdentityKind).GroupKind(), c.Name, allErrs) } diff --git a/api/v1beta1/azureclusteridentity_webhook.go b/api/v1beta1/azureclusteridentity_webhook.go index 45f204b0e6c..3d961dab1d0 100644 --- a/api/v1beta1/azureclusteridentity_webhook.go +++ b/api/v1beta1/azureclusteridentity_webhook.go @@ -55,7 +55,7 @@ func (c *AzureClusterIdentity) ValidateUpdate(oldRaw runtime.Object) (admission. if len(allErrs) == 0 { return c.validateClusterIdentity() } - return nil, apierrors.NewInvalid(GroupVersion.WithKind("AzureClusterIdentity").GroupKind(), c.Name, allErrs) + return nil, apierrors.NewInvalid(GroupVersion.WithKind(AzureClusterIdentityKind).GroupKind(), c.Name, allErrs) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. From ec0e40e414df15a49221b85b3280f942860c82e0 Mon Sep 17 00:00:00 2001 From: Sujay Date: Sun, 5 Nov 2023 00:33:18 +0530 Subject: [PATCH 10/10] update comment Signed-off-by: Sujay --- api/v1beta1/consts.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/api/v1beta1/consts.go b/api/v1beta1/consts.go index 63ad064a51b..88095b3eecd 100644 --- a/api/v1beta1/consts.go +++ b/api/v1beta1/consts.go @@ -168,14 +168,14 @@ const ( ) const ( - // AzureClusterKind is kind value for the AzureCluster kind name. + // AzureClusterKind indicates the kind of an AzureCluster. AzureClusterKind = "AzureCluster" - // AzureMachinePoolKind is kind value for the AzureMachinePool kind name. + // AzureMachinePoolKind indicates the kind of an AzureMachinePool. AzureMachinePoolKind = "AzureMachinePool" - // AzureManagedClusterKind is kind value for the AzureManagedCluster kind name. + // AzureManagedClusterKind indicates the kind of an AzureManagedCluster. AzureManagedClusterKind = "AzureManagedCluster" - // AzureManagedControlPlaneKind is kind value for the AzureManagedControlPlane kind name. + // AzureManagedControlPlaneKind indicates the kind of an AzureManagedControlPlane. AzureManagedControlPlaneKind = "AzureManagedControlPlane" - // AzureClusterIdentityKind is kind value for the AzureClusterIdentity kind name. + // AzureClusterIdentityKind indicates the kind of an AzureClusterIdentity. AzureClusterIdentityKind = "AzureClusterIdentity" )