diff --git a/hack/util.sh b/hack/util.sh index 82b54fce29e..8a515cfe87f 100755 --- a/hack/util.sh +++ b/hack/util.sh @@ -82,10 +82,12 @@ capz::util::generate_ssh_key() { AZURE_SSH_PUBLIC_KEY_FILE=${AZURE_SSH_PUBLIC_KEY_FILE:-""} if [ -z "${AZURE_SSH_PUBLIC_KEY_FILE}" ]; then echo "generating sshkey for e2e" - SSH_KEY_FILE=.sshkey - rm -f "${SSH_KEY_FILE}" 2>/dev/null - ssh-keygen -t rsa -b 2048 -f "${SSH_KEY_FILE}" -N '' 1>/dev/null - AZURE_SSH_PUBLIC_KEY_FILE="${SSH_KEY_FILE}.pub" + AZURE_SSH_KEY=.sshkey + rm -f "${AZURE_SSH_KEY}" 2>/dev/null + ssh-keygen -t rsa -b 2048 -f "${AZURE_SSH_KEY}" -N '' 1>/dev/null + AZURE_SSH_PUBLIC_KEY_FILE="${AZURE_SSH_KEY}.pub" + # This is needed to run tests that required SSH access to nodes + export AZURE_SSH_KEY fi AZURE_SSH_PUBLIC_KEY_B64=$(base64 < "${AZURE_SSH_PUBLIC_KEY_FILE}" | tr -d '\r\n') export AZURE_SSH_PUBLIC_KEY_B64 diff --git a/scripts/ci-entrypoint.sh b/scripts/ci-entrypoint.sh index e706ddb5a96..900dd6b726a 100755 --- a/scripts/ci-entrypoint.sh +++ b/scripts/ci-entrypoint.sh @@ -134,6 +134,12 @@ create_cluster() { echo "Unable to find kubeconfig for kind mgmt cluster ${KIND_CLUSTER_NAME}" exit 1 fi + + # set the SSH bastion and user that can be used to SSH into nodes + KUBE_SSH_BASTION=$(${KUBECTL} get azurecluster -o json | jq '.items[0].spec.networkSpec.apiServerLB.frontendIPs[0].publicIP.dnsName' | tr -d \"):22 + export KUBE_SSH_BASTION + KUBE_SSH_USER=capi + export KUBE_SSH_USER } # get_cidrs derives the CIDR from the Cluster's '.spec.clusterNetwork.pods.cidrBlocks' metadata