From 91a5c736646d597f3d39797ec8393ea9588f42f7 Mon Sep 17 00:00:00 2001 From: James Sturtevant Date: Thu, 26 May 2022 08:16:40 -0700 Subject: [PATCH] First pass at custom script and template for E2E --- .gitignore | 10 +- azure/run-capz-e2e.sh | 189 +++++++++++++++ azure/templates/windows-base.yaml | 378 ++++++++++++++++++++++++++++++ 3 files changed, 576 insertions(+), 1 deletion(-) create mode 100755 azure/run-capz-e2e.sh create mode 100644 azure/templates/windows-base.yaml diff --git a/.gitignore b/.gitignore index adb36c82..7c8cce46 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,9 @@ -*.exe \ No newline at end of file +*.exe +**/_artifacts +**/kubernetes +**/.sshkey +**/.sshkey.pub +**/*.kubeconfig +**/kubernetes-test-linux-amd64.tar.gz +**/kube-proxy-windows.yaml +**/capz-conf-*.yaml \ No newline at end of file diff --git a/azure/run-capz-e2e.sh b/azure/run-capz-e2e.sh new file mode 100755 index 00000000..740f4903 --- /dev/null +++ b/azure/run-capz-e2e.sh @@ -0,0 +1,189 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}") +export CAPZ_DIR="${CAPZ_DIR:-"${GOPATH}/src/sigs.k8s.io/cluster-api-provider-azure"}" +: "${CAPZ_DIR:?Environment variable empty or not defined.}" +if [[ ! -d $CAPZ_DIR ]]; then + echo "Must have capz repo present" +fi + +main() { + # defaults + export KUBERNETES_VERSION="${KUBERNETES_VERSION:-"latest"}" + export CONTROL_PLANE_MACHINE_COUNT="${AZURE_CONTROL_PLANE_MACHINE_COUNT:-"1"}" + export WINDOWS_WORKER_MACHINE_COUNT="${WINDOWS_WORKER_MACHINE_COUNT:-"2"}" + export WINDOWS_SERVER_VERSION="${WINDOWS_SERVER_VERSION:-"windows-2019"}" + export WINDOWS_CONTAINERD_URL="${WINDOWS_CONTAINERD_URL:-"https://github.com/containerd/containerd/releases/download/v1.6.4/containerd-1.6.4-windows-amd64.tar.gz"}" + + # other config + export ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}" + export CLUSTER_NAME="${CLUSTER_NAME:-capz-conf-$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 6 ; echo '')}" + export CAPI_EXTENSION_SOURCE="${CAPI_EXTENSION_SOURCE:-"https://github.com/Azure/azure-capi-cli-extension/releases/download/v0.0.5/capi-0.0.5-py2.py3-none-any.whl"}" + export IMAGE_SKU="${IMAGE_SKU:-"k8s-1dot23dot3-${WINDOWS_SERVER_VERSION}-containerd"}" + + # TODO if GMSA do additional set up + + set_azure_envs + set_ci_version + create_cluster + apply_workload_configuraiton + wait_for_nodes + run_e2e_test +} + +cleanup() { + # currently set KUBECONFIG is the workload cluster so reset to the management cluster + # also make sure the log collector can find the sshkey file + unset KUBECONFIG + export AZURE_SSH_PUBLIC_KEY_FILE="${AZURE_SSH_PUBLIC_KEY_FILE:-"${PWD}/.sshkey.pub"}" + echo "Key file $AZURE_SSH_PUBLIC_KEY_FILE" + ls + + pushd ${CAPZ_DIR} + # don't stop on errors here, so we always cleanup + go run -tags e2e "${CAPZ_DIR}/test/logger.go" --clustername "${CLUSTER_NAME}" --namespace default --artifacts-folder "${ARTIFACTS}" || true + popd + + "${CAPZ_DIR}/hack/log/redact.sh" || true + if [[ -z "${SKIP_CLEANUP:-}" ]]; then + timeout 1800 kubectl delete cluster "${CLUSTER_NAME}" --wait=false || true + az group delete --name "$CLUSTER_NAME" --no-wait -y --force-deletion-types=Microsoft.Compute/virtualMachines,Microsoft.Compute/virtualMachineScaleSets + else + echo "skipping clean up" + fi +} + +create_cluster(){ + export SKIP_CREATE="${SKIP_CREATE:-"false"}" + if [[ ! "$SKIP_CREATE" == "true" ]]; then + ## create cluster + az extension add -y --upgrade --source $CAPI_EXTENSION_SOURCE || true + az capi create -mg "${CLUSTER_NAME}" -y -w -n "${CLUSTER_NAME}" -l "$AZURE_LOCATION" --template "$SCRIPT_ROOT"/templates/windows-base.yaml + + # put a date on the rg to ensure it is deleted if failure to clean up + az group update --resource-group "${CLUSTER_NAME}" --tags creationTimestamp="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" + fi + export KUBECONFIG="$PWD"/"${CLUSTER_NAME}".kubeconfig +} + +apply_workload_configuraiton(){ + # A patch is needed to tell kube-proxy to use CI binaries. This could go away once we have build scripts for kubeproxy HostProcess image. + kubectl apply -f "${CAPZ_DIR}"/templates/test/ci/patches/windows-kubeproxy-ci.yaml + kubectl rollout restart ds -n kube-system kube-proxy-windows + + # apply additional helper manifests (logger etc) + kubectl apply -f "${CAPZ_DIR}"/templates/addons/windows/containerd-logging/containerd-logger.yaml + kubectl apply -f "${CAPZ_DIR}"/templates/addons/windows/csi-proxy/csi-proxy.yaml + kubectl apply -f "${CAPZ_DIR}"/templates/addons/metrics-server/metrics-server.yaml +} + +run_e2e_test() { + export SKIP_TEST="${SKIP_TEST:-"false"}" + if [[ ! "$SKIP_TEST" == "true" ]]; then + ## get and run e2e test + ## https://github.com/kubernetes/sig-release/blob/master/release-engineering/artifacts.md#content-of-kubernetes-test-system-archtargz-on-example-of-kubernetes-test-linux-amd64targz-directories-removed-from-list + curl -L -o /tmp/kubernetes-test-linux-amd64.tar.gz https://storage.googleapis.com/k8s-release-dev/ci/${CI_VERSION}/kubernetes-test-linux-amd64.tar.gz + tar -xzvf /tmp/kubernetes-test-linux-amd64.tar.gz + + export GINKGO_FOCUS=${GINKGO_FOCUS:-"\[Conformance\]|\[NodeConformance\]|\[sig-windows\]|\[sig-apps\].CronJob|\[sig-api-machinery\].ResourceQuota|\[sig-scheduling\].SchedulerPreemption"} + export GINKGO_SKIP=${GINKGO_SKIP:-"\[LinuxOnly\]|\[Serial\]|\[Slow\]|\[Excluded:WindowsDocker\]|Networking.Granular.Checks(.*)node-pod.communication|Guestbook.application.should.create.and.stop.a.working.application|device.plugin.for.Windows|Container.Lifecycle.Hook.when.create.a.pod.with.lifecycle.hook.should.execute(.*)http.hook.properly|\[sig-api-machinery\].Garbage.collector"} + + export GINKGO_NODES="${GINKGO_NODES:-"4"}" + + # CI is an environment variable set by a prow job: https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md#job-environment-variables + ADDITIONAL_E2E_ARGS=() + CI="${CI:-""}" + if [[ "$CI" == "true" ]]; then + # private image repository doesn't have a way to promote images: https://github.com/kubernetes/k8s.io/pull/1929 + # So we are using a cusotmer repository for the test "Container Runtime blackbox test when running a container with a new image should be able to pull from private registry with secret [NodeConformance]" + # Must also set label preset-windows-private-registry-cred: "true" on the job + ADDITIONAL_E2E_ARGS+="--docker-config-file=${DOCKER_CONFIG_FILE}" + fi + + set -x + "$PWD"/kubernetes/test/bin/ginkgo --nodes="${GINKGO_NODES}" "$PWD"/kubernetes/test/bin/e2e.test -- \ + --provider=skeleton \ + --ginkgo.noColor \ + --ginkgo.focus="$GINKGO_FOCUS" \ + --ginkgo.skip="$GINKGO_SKIP" \ + --node-os-distro="windows" \ + --disable-log-dump \ + --ginkgo.progress=true \ + --ginkgo.slowSpecThreshold=120.0 \ + --ginkgo.flakeAttempts=0 \ + --ginkgo.trace=true \ + --num-nodes="$WINDOWS_WORKER_MACHINE_COUNT" \ + --ginkgo.v=true \ + --dump-logs-on-failure=true \ + --report-dir="${ARTIFACTS}" \ + --prepull-images=true \ + --v=5 "${ADDITIONAL_E2E_ARGS[@]}" + set +x + fi +} + +wait_for_nodes() { + echo "Waiting for ${CONTROL_PLANE_MACHINE_COUNT} control plane machine(s) and ${WINDOWS_WORKER_MACHINE_COUNT} windows machine(s) to become Ready" + + # Ensure that all nodes are registered with the API server before checking for readiness + local total_nodes="$((CONTROL_PLANE_MACHINE_COUNT + WINDOWS_WORKER_MACHINE_COUNT))" + while [[ $(kubectl get nodes -ojson | jq '.items | length') -ne "${total_nodes}" ]]; do + kubectl get nodes -o wide + kubectl get pods -A -o wide + sleep 10 + done + + kubectl wait --for=condition=Ready node --all --timeout=5m + kubectl get nodes -owide +} + +set_azure_envs() { + # shellcheck source=hack/ensure-tags.sh + source "${CAPZ_DIR}/hack/ensure-tags.sh" + # shellcheck source=hack/parse-prow-creds.sh + source "${CAPZ_DIR}/hack/parse-prow-creds.sh" + # shellcheck source=hack/util.sh + source "${CAPZ_DIR}/hack/util.sh" + # shellcheck source=hack/ensure-azcli.sh + source "${CAPZ_DIR}/hack/ensure-azcli.sh" + + # Verify the required Environment Variables are present. + capz::util::ensure_azure_envs + + # Generate SSH key. + capz::util::generate_ssh_key + + export AZURE_LOCATION="${AZURE_LOCATION:-$(capz::util::get_random_region)}" +} + +set_ci_version() { + # select correct windows version for tests + if [[ "$(capz::util::should_build_kubernetes)" == "true" ]]; then + #todo - test this + : "${REGISTRY:?Environment variable empty or not defined.}" + "${CAPZ_DIR}"/hack/ensure-acr-login.sh + + export E2E_ARGS="-kubetest.use-pr-artifacts" + export KUBE_BUILD_CONFORMANCE="y" + source "${CAPZ_DIR}/scripts/ci-build-kubernetes.sh" + else + if [[ "${KUBERNETES_VERSION:-}" =~ "latest" ]]; then + CI_VERSION_URL="https://dl.k8s.io/ci/${KUBERNETES_VERSION}.txt" + else + CI_VERSION_URL="https://dl.k8s.io/ci/latest.txt" + fi + export CI_VERSION="${CI_VERSION:-$(curl -sSL ${CI_VERSION_URL})}" + export KUBERNETES_VERSION="${CI_VERSION}" + + echo "Selected Kubernetes version:" + echo $CI_VERSION + echo $KUBERNETES_VERSION + fi +} + +trap cleanup EXIT +main diff --git a/azure/templates/windows-base.yaml b/azure/templates/windows-base.yaml new file mode 100644 index 00000000..b2c5a732 --- /dev/null +++ b/azure/templates/windows-base.yaml @@ -0,0 +1,378 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cni: ${CLUSTER_NAME}-calico + containerd-logger: enabled + csi-proxy: enabled + metrics-server: enabled + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + additionalTags: + buildProvenance: ${BUILD_PROVENANCE} + creationTimestamp: ${TIMESTAMP} + jobName: ${JOB_NAME} + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + networkSpec: + subnets: + - name: control-plane-subnet + role: control-plane + - name: node-subnet + natGateway: + name: node-natgateway + role: node + vnet: + name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} + resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + feature-gates: ${K8S_FEATURE_GATES:-""} + extraVolumes: + - hostPath: /etc/kubernetes/azure.json + mountPath: /etc/kubernetes/azure.json + name: cloud-config + readOnly: true + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + allocate-node-cidrs: "false" + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + cluster-name: ${CLUSTER_NAME} + feature-gates: HPAContainerMetrics=true + v: "4" + extraVolumes: + - hostPath: /etc/kubernetes/azure.json + mountPath: /etc/kubernetes/azure.json + name: cloud-config + readOnly: true + etcd: + local: + dataDir: /var/lib/etcddisk/etcd + extraArgs: + quota-backend-bytes: "8589934592" + kubernetesVersion: ci/${CI_VERSION} + diskSetup: + filesystems: + - device: /dev/disk/azure/scsi1/lun0 + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: etcd_disk + - device: ephemeral0.1 + filesystem: ext4 + label: ephemeral0 + replaceFS: ntfs + partitions: + - device: /dev/disk/azure/scsi1/lun0 + layout: true + overwrite: false + tableType: gpt + files: + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: | + #!/bin/bash + set -o nounset + set -o pipefail + set -o errexit + + # This test installs release packages or binaries that are a result of the CI and release builds. + # It runs '... --version' commands to verify that the binaries are correctly installed + # and finally uninstalls the packages. + # For the release packages it tests all versions in the support skew. + LINE_SEPARATOR="*************************************************" + echo "$$LINE_SEPARATOR" + CI_VERSION=${CI_VERSION} + if [[ "$${CI_VERSION}" != "" ]]; then + CI_DIR=/tmp/k8s-ci + mkdir -p $$CI_DIR + declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") + + echo "* testing CI version $$CI_VERSION" + + CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* downloading binary: $$CI_URL/$$CI_PACKAGE" + wget "$$CI_URL/$$CI_PACKAGE" -O "$$CI_DIR/$$CI_PACKAGE" + chmod +x "$$CI_DIR/$$CI_PACKAGE" + mv "$$CI_DIR/$$CI_PACKAGE" "/usr/bin/$$CI_PACKAGE" + done + systemctl restart kubelet + fi + echo "* checking binary versions" + echo "ctr version: " $(ctr version) + echo "kubeadm version: " $(kubeadm version -o=short) + echo "kubectl version: " $(kubectl version --client=true --short=true) + echo "kubelet version: " $(kubelet --version) + echo "$$LINE_SEPARATOR" + owner: root:root + path: /tmp/kubeadm-bootstrap.sh + permissions: "0744" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + mounts: + - - LABEL=etcd_disk + - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: + - bash -c /tmp/kubeadm-bootstrap.sh + useExperimentalRetryJoin: true + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + replicas: ${CONTROL_PLANE_MACHINE_COUNT:-1} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + dataDisks: + - diskSizeGB: 256 + lun: 0 + nameSuffix: etcddisk + image: + marketplace: + offer: capi + publisher: cncf-upstream + sku: k8s-1dot23dot5-ubuntu-1804 + version: latest + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: Standard_D2s_v3 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WINDOWS_WORKER_MACHINE_COUNT:-2} + selector: {} + template: + metadata: + labels: + windows-healthcheck: "true" + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-win + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-win + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + annotations: + runtime: containerd + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + template: + metadata: + annotations: + runtime: containerd + spec: + image: + marketplace: + offer: capi-windows + publisher: cncf-upstream + sku: ${IMAGE_SKU:="k8s-1dot23dot5-windows-2019-containerd"} + version: ${IMAGE_VERSION:="latest"} + osDisk: + diskSizeGB: 128 + managedDisk: + storageAccountType: Premium_LRS + osType: Windows + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: ${AZURE_NODE_MACHINE_TYPE:-"Standard_D4s_v3"} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-win-azure-json + owner: root:root + path: c:/k/azure.json + permissions: "0644" + - content: |- + Add-MpPreference -ExclusionProcess C:/opt/cni/bin/calico.exe + Add-MpPreference -ExclusionProcess C:/opt/cni/bin/calico-ipam.exe + path: C:/defender-exclude-calico.ps1 + permissions: "0744" + - content: | + # /tmp is assumed created and required for upstream e2e tests to pass + New-Item -ItemType Directory -Force -Path C:\tmp\ + path: C:/create-temp-folder.ps1 + permissions: "0744" + - content: | + $ErrorActionPreference = 'Stop' + Stop-Service kubelet -Force + $$CI_VERSION="${CI_VERSION}" + if($$CI_VERSION -ne "") + { + $$binaries=@("kubeadm", "kubectl", "kubelet", "kube-proxy") + $$ci_url="https://storage.googleapis.com/k8s-release-dev/ci/$$CI_VERSION/bin/windows/amd64" + foreach ( $$binary in $$binaries ) + { + echo "downloading binary: $$ci_url/$$binary.exe" + curl.exe --retry 10 --retry-delay 5 "$$ci_url/$$binary.exe" --output "c:/k/$$binary.exe" + } + } + $$CONTAINERD_URL="${WINDOWS_CONTAINERD_URL}" + if($$CONTAINERD_URL -ne ""){ + Stop-Service containerd -Force + echo "downloading containerd: $$CONTAINERD_URL" + curl.exe --retry 10 --retry-delay 5 -L "$$CONTAINERD_URL" --output "c:/k/containerd.tar.gz" + tar.exe -zxvf c:/k/containerd.tar.gz -C "c:/Program Files/containerd" --strip-components 1 + Start-Service containerd + } + # Tag it to the ci version. The image knows how to use the copy locally with the configmap + # that is applied at at this stage (windows-kubeproxy-ci.yaml) + ctr.exe -n k8s.io images pull docker.io/sigwindowstools/kube-proxy:v1.23.1-calico-hostprocess + ctr.exe -n k8s.io images tag docker.io/sigwindowstools/kube-proxy:v1.23.1-calico-hostprocess "docker.io/sigwindowstools/kube-proxy:${CI_VERSION/+/_}-calico-hostprocess" + kubeadm.exe version -o=short + kubectl.exe version --client=true --short=true + kubelet.exe --version + kube-proxy.exe --version + containerd.exe --version + containerd-shim-runhcs-v1.exe --version + path: C:/replace-ci-binaries.ps1 + permissions: "0744" + joinConfiguration: + nodeRegistration: + criSocket: npipe:////./pipe/containerd-containerd + kubeletExtraArgs: + azure-container-registry-config: c:/k/azure.json + cloud-config: c:/k/azure.json + cloud-provider: azure + feature-gates: WindowsHostProcessContainers=true + v: "2" + windows-priorityclass: ABOVE_NORMAL_PRIORITY_CLASS + name: '{{ ds.meta_data["local_hostname"] }}' + postKubeadmCommands: + - nssm set kubelet start SERVICE_AUTO_START + - powershell C:/defender-exclude-calico.ps1 + preKubeadmCommands: + - powershell C:/create-temp-folder.ps1 + - powershell C:/replace-ci-binaries.ps1 + users: + - groups: Administrators + name: capi + sshAuthorizedKeys: + - ${AZURE_SSH_PUBLIC_KEY:=""} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: ${CLUSTER_NAME}-mhc-windows + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + maxUnhealthy: 100% + nodeStartupTimeout: 10m + selector: + matchLabels: + windows-healthcheck: "true" + unhealthyConditions: + - status: "false" + timeout: 300s + type: Ready + - status: Unknown + timeout: 300s + type: Ready +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID} + clientSecret: + name: ${AZURE_CLUSTER_IDENTITY_SECRET_NAME} + namespace: ${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE} + tenantID: ${AZURE_TENANT_ID} + type: ServicePrincipal +---