Skip to content

Commit

Permalink
CAPZ-demo: add automation
Browse files Browse the repository at this point in the history
This change adds automation to prepare and to run the CAPZ demo.
Automation currently works around limitations caused by the CAPZ
upstream PR not being merged. There are TODOs in capz-demo.env for when
the PR is merged upstream.

Signed-off-by: Thilo Fromm <[email protected]>
  • Loading branch information
t-lo committed Oct 31, 2024
1 parent 3369c78 commit 84bf668
Show file tree
Hide file tree
Showing 4 changed files with 571 additions and 0 deletions.
3 changes: 3 additions & 0 deletions CAPZ-sysext/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
*.swp
demo
azure.env
20 changes: 20 additions & 0 deletions CAPZ-sysext/azure.env.template
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Template for Azure CAPZ settings

export AZURE_SUBSCRIPTION_ID="TODO add subscription ID"

# From https://capz.sigs.k8s.io/getting-started:
# az ad sp create-for-rbac --role contributor --scopes="/subscriptions/${AZURE_SUBSCRIPTION_ID}"
export AZURE_TENANT_ID="TODO add 'tenant' from output of az command"
export AZURE_CLIENT_ID="TODO add 'appId from output of az command'"
export AZURE_CLIENT_SECRET="TODO add 'password' from output of az command"

#
# These usually do not need to be touched
#

export AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY="${AZURE_CLIENT_ID}" # for compatibility with CAPZ v1.16 templates

# AZURE_RESOURCE_GROUP is set in capz-demo.env
export AZURE_CLUSTER_IDENTITY_SECRET_NAME="${AZURE_RESOURCE_GROUP}-cluster-identity-secret"
export CLUSTER_IDENTITY_NAME="${AZURE_RESOURCE_GROUP}-cluster-identity"
export AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE="default"
213 changes: 213 additions & 0 deletions CAPZ-sysext/capz-demo.env
Original file line number Diff line number Diff line change
@@ -0,0 +1,213 @@
#!/bin/bash

KUBERNETES_VERSION="v1.30.0"

AZURE_RESOURCE_GROUP="flatcar-capi-demo-azure"
export AZURE_RESOURCE_GROUP

WORKER_CONTROLPLANE_NODES=1
WORKER_NODES=2

p() {
echo
echo "#####################################"
echo "${@}"
echo "-------------------------------------"
}
# --

check_command() {
local cmd="$1"

if ! command -v "$cmd" &> /dev/null ; then
echo "'$cmd' could not be found. Please install your distro's '$cmd'."
return 1
fi
echo " - '$cmd'"
}
# --

check_file() {
local f="$1"

if [ ! -f "./$f" ] ; then
echo "prerequisite '$f' could not be found."
return 1
fi

echo " - '$f'"
}
# --

get_prerequisites() {
p "Prerequisites: Checking for required host commands."
check_command "kubectl" || return
check_command "yq" || return
check_command "wget" || return

p "Prerequisites: Checking for prerequisite files."
check_file "azure.env" || return
check_file "cluster-template-flatcar-sysext.yaml" || return

if [ ! -f ./clusterctl ] ; then
p "Prerequisites: fetching clusterctl"
wget https://github.com/kubernetes-sigs/cluster-api/releases/latest/download/clusterctl-linux-amd64
mv clusterctl-linux-amd64 clusterctl
chmod 755 clusterctl
fi

if [ ! -f ./kind ] ; then
p "Prerequisites: fetching kind"
wget https://github.com/kubernetes-sigs/kind/releases/latest/download/kind-linux-amd64
mv kind-linux-amd64 kind
chmod 755 kind
fi

if [ ! -f ./helm ] ; then
p "Prerequisites: fetching helm"
curl https://get.helm.sh/helm-v3.16.2-linux-amd64.tar.gz \
| tar xz linux-amd64/helm -O >helm
chmod 755 helm
mkdir -p helm-cache
fi
}
# --

setup_kind_cluster() {
p "Bootsstrapping cluster"
./kind create cluster --kubeconfig=./kind-mgmt.kubeconfig
export EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION=true
}
# --

kc_mgmt() {
kubectl --kubeconfig=./kind-mgmt.kubeconfig "${@}"

}
# --

kc_worker() {
kubectl --kubeconfig=./${AZURE_RESOURCE_GROUP}.kubeconfig "${@}"
}
# --

generate_capz_yaml() {
source ./azure.env

p "Initialising ClusterAPI Azure provider."
./clusterctl init --infrastructure azure --kubeconfig=./kind-mgmt.kubeconfig

# FIXME: add
# --infrastructure azure \
# --flavor flatcar-sysext \
# and remove
# --from cluster-template-flatcar-sysext.yaml \
# when https://github.com/kubernetes-sigs/cluster-api-provider-azure/pull/4575 is merged.

# TODO: remove when PR is merged
export AZURE_CONTROL_PLANE_MACHINE_TYPE="Standard_D2s_v3"
export AZURE_LOCATION="northeurope"
export AZURE_NODE_MACHINE_TYPE="Standard_D2s_v3"
export FLATCAR_VERSION="latest"
export CI_RG="${AZURE_RESOURCE_GROUP}"

p "Generating ${AZURE_RESOURCE_GROUP}.yaml."
./clusterctl generate cluster ${AZURE_RESOURCE_GROUP} \
--from cluster-template-flatcar-sysext.yaml \
--kubeconfig=./kind-mgmt.kubeconfig \
--kubernetes-version "${KUBERNETES_VERSION}" \
--control-plane-machine-count=${WORKER_CONTROLPLANE_NODES} \
--worker-machine-count=${WORKER_NODES} \
> ${AZURE_RESOURCE_GROUP}.yaml

yq -i "with(. | select(.kind == \"AzureClusterIdentity\"); .spec.type |= \"ServicePrincipal\" | .spec.clientSecret.name |= \"${AZURE_CLUSTER_IDENTITY_SECRET_NAME}\" | .spec.clientSecret.namespace |= \"${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE}\")" "${AZURE_RESOURCE_GROUP}.yaml"
}
# --

deploy_capz_cluster() {

p "Creating client secrets and workload cluster"

kc_mgmt create secret generic "${AZURE_CLUSTER_IDENTITY_SECRET_NAME}" \
--from-literal=clientSecret="${AZURE_CLIENT_SECRET}" \
--namespace "${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE}"
kc_mgmt apply -f ./${AZURE_RESOURCE_GROUP}.yaml

p "Waiting for cluster to be provisioned."
while ! kc_mgmt get cluster | grep "${AZURE_RESOURCE_GROUP}" | grep -i provisioned ; do
sleep 1
done

# Hack alert: sometimes ended up with an empty kubeconfig from the command below, so
# we add a defensive sleep.
sleep 2

./clusterctl get kubeconfig ${AZURE_RESOURCE_GROUP} \
--kubeconfig=./kind-mgmt.kubeconfig \
--namespace default \
> ./${AZURE_RESOURCE_GROUP}.kubeconfig

p "Waiting for all nodes to come up."
local count=0
local target_count=$((WORKER_CONTROLPLANE_NODES + WORKER_NODES))
while [ "$count" -lt "$target_count" ] ; do
count="$(kc_worker --request-timeout=5s get nodes \
| grep "${AZURE_RESOURCE_GROUP}" \
| wc -l)"
echo "$count of $target_count nodes are up."
done

local worker_cidr="$(kc_mgmt get cluster "${AZURE_RESOURCE_GROUP}" \
-o=jsonpath='{.spec.clusterNetwork.pods.cidrBlocks[0]}')"

p "Deploying Calico to cluster CIDR '$worker_cidr' so worker nodes can talk to each other"

./helm --kubeconfig=./${AZURE_RESOURCE_GROUP}.kubeconfig \
--registry-config=./${AZURE_RESOURCE_GROUP}.helm-registry.json \
--repository-cache=./helm-cache \
--repository-config=./${AZURE_RESOURCE_GROUP}.helm-registry.yaml \
repo add projectcalico https://docs.tigera.io/calico/charts

./helm --kubeconfig=./${AZURE_RESOURCE_GROUP}.kubeconfig \
--registry-config=./${AZURE_RESOURCE_GROUP}.helm-registry.json \
--repository-cache=./helm-cache \
--repository-config=./${AZURE_RESOURCE_GROUP}.helm-registry.yaml \
install calico projectcalico/tigera-operator \
--version v3.26.1 \
-f https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/addons/calico/values.yaml \
--set-string "installation.calicoNetwork.ipPools[0].cidr=${worker_cidr}" \
--namespace tigera-operator \
--create-namespace

p "Your nodes should be ready soon"
kc_worker get nodes

p "Cluster is deployed and can now be used ('kc_worker' kubectl wrapper)."
p "'az serial-console connect -g ${AZURE_RESOURCE_GROUP} -n <nodename>' connects via serial console."
}
# --

cleanup() {
kc_mgmt delete cluster ${AZURE_RESOURCE_GROUP}
./kind delete cluster --kubeconfig=./kind-mgmt.kubeconfig
}
# --

help() {
cat <<EOF
Demo functions:
- get_prerequisites: Check and download a number of prerequisited.
- setup_kind_cluster: Create a local Kind (Kubernetes-in-docker) cluster we'll use as management cluster.
- generate_capz_yaml: Generate Azure worker cluster configuration.
- deploy_capz_cluster: Apply worker cluster config to management cluster. This will create the worker cluster.
You can use kubectl wrappers:
- kc_worker: interact with the worker cluster
- kc_mgmt: interact with the management cluster.
- 'cleanup' will tear down the worker cluster and remove the local management cluster.
EOF
}

help
Loading

0 comments on commit 84bf668

Please sign in to comment.