diff --git a/templates/cluster-template-private.yaml b/templates/cluster-template-private.yaml index 8f994ee39c1..df9817fe915 100644 --- a/templates/cluster-template-private.yaml +++ b/templates/cluster-template-private.yaml @@ -28,22 +28,10 @@ spec: location: ${AZURE_LOCATION} networkSpec: apiServerLB: - frontendIPs: - - name: ${CLUSTER_NAME}-internal-lb-frontend - privateIP: 10.128.0.100 name: ${CLUSTER_NAME}-internal-lb type: Internal - subnets: - - cidrBlocks: - - ${AZURE_CP_SUBNET_CIDR} - name: private-cp-subnet - role: control-plane - - cidrBlocks: - - ${AZURE_NODE_SUBNET_CIDR} - name: private-node-subnet - role: node vnet: - name: ${AZURE_VNET_NAME} + name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} --- @@ -123,6 +111,9 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + preKubeadmCommands: + - if [ -f /tmp/kubeadm.yaml ]; then iptables -t nat -A OUTPUT -p all -d ${AZURE_INTERNAL_LB_IP} + -j DNAT --to-destination 127.0.0.1; fi useExperimentalRetryJoin: true replicas: ${CONTROL_PLANE_MACHINE_COUNT} version: ${KUBERNETES_VERSION} diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml new file mode 100644 index 00000000000..ab2bf7c478e --- /dev/null +++ b/templates/cluster-template.yaml @@ -0,0 +1,199 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + labels: + cni: calico + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + location: ${AZURE_LOCATION} + networkSpec: + vnet: + name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} + resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: KubeadmControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + extraVolumes: + - hostPath: /etc/kubernetes/azure.json + mountPath: /etc/kubernetes/azure.json + name: cloud-config + readOnly: true + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + allocate-node-cidrs: "false" + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + cluster-name: ${CLUSTER_NAME} + extraVolumes: + - hostPath: /etc/kubernetes/azure.json + mountPath: /etc/kubernetes/azure.json + name: cloud-config + readOnly: true + etcd: + local: + dataDir: /var/lib/etcddisk/etcd + diskSetup: + filesystems: + - device: /dev/disk/azure/scsi1/lun0 + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: etcd_disk + - device: ephemeral0.1 + filesystem: ext4 + label: ephemeral0 + replaceFS: ntfs + partitions: + - device: /dev/disk/azure/scsi1/lun0 + layout: true + overwrite: false + tableType: gpt + files: + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + mounts: + - - LABEL=etcd_disk + - /var/lib/etcddisk + useExperimentalRetryJoin: true + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + dataDisks: + - diskSizeGB: 256 + lun: 0 + nameSuffix: etcddisk + location: ${AZURE_LOCATION} + osDisk: + diskSizeGB: 128 + managedDisk: + storageAccountType: Premium_LRS + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + location: ${AZURE_LOCATION} + osDisk: + diskSizeGB: 128 + managedDisk: + storageAccountType: Premium_LRS + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + useExperimentalRetryJoin: true diff --git a/templates/flavors/private/kustomization.yaml b/templates/flavors/private/kustomization.yaml index f154854590a..b96f9b2e7b3 100644 --- a/templates/flavors/private/kustomization.yaml +++ b/templates/flavors/private/kustomization.yaml @@ -3,4 +3,4 @@ resources: - ../default patchesStrategicMerge: - patches/private-lb.yaml - + - patches/ilb-iptables-cmd.yaml diff --git a/templates/flavors/private/patches/ilb-iptables-cmd.yaml b/templates/flavors/private/patches/ilb-iptables-cmd.yaml new file mode 100644 index 00000000000..882d5c70218 --- /dev/null +++ b/templates/flavors/private/patches/ilb-iptables-cmd.yaml @@ -0,0 +1,10 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: KubeadmControlPlane +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + kubeadmConfigSpec: + preKubeadmCommands: + # this is a workaround for the hairpin routing problem with internal load balancers + # during kubeadm init where 10.128.0.100 is the internal LB IP. + - if [ -f /tmp/kubeadm.yaml ]; then iptables -t nat -A OUTPUT -p all -d ${AZURE_INTERNAL_LB_IP} -j DNAT --to-destination 127.0.0.1; fi diff --git a/templates/flavors/private/patches/private-lb.yaml b/templates/flavors/private/patches/private-lb.yaml index ad558c8d8da..cb1e27c1b70 100644 --- a/templates/flavors/private/patches/private-lb.yaml +++ b/templates/flavors/private/patches/private-lb.yaml @@ -4,20 +4,7 @@ metadata: name: ${CLUSTER_NAME} spec: networkSpec: - vnet: - name: ${AZURE_VNET_NAME} - subnets: - - name: private-cp-subnet - role: control-plane - cidrBlocks: - - ${AZURE_CP_SUBNET_CIDR} - - name: private-node-subnet - role: node - cidrBlocks: - - ${AZURE_NODE_SUBNET_CIDR} apiServerLB: name: ${CLUSTER_NAME}-internal-lb type: Internal - frontendIPs: - - name: ${CLUSTER_NAME}-internal-lb-frontend - privateIP: 10.128.0.100 \ No newline at end of file + diff --git a/templates/test/cluster-template-prow-private.yaml b/templates/test/cluster-template-prow-private.yaml index 2dc26d00665..8600300a685 100644 --- a/templates/test/cluster-template-prow-private.yaml +++ b/templates/test/cluster-template-prow-private.yaml @@ -33,7 +33,7 @@ spec: apiServerLB: frontendIPs: - name: ${CLUSTER_NAME}-internal-lb-frontend - privateIP: 10.128.0.100 + privateIP: ${AZURE_INTERNAL_LB_IP} name: ${CLUSTER_NAME}-internal-lb type: Internal subnets: @@ -126,6 +126,9 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + preKubeadmCommands: + - if [ -f /tmp/kubeadm.yaml ]; then iptables -t nat -A OUTPUT -p all -d ${AZURE_INTERNAL_LB_IP} + -j DNAT --to-destination 127.0.0.1; fi useExperimentalRetryJoin: true replicas: ${CONTROL_PLANE_MACHINE_COUNT} version: ${KUBERNETES_VERSION} diff --git a/test/e2e/azure_privatecluster.go b/test/e2e/azure_privatecluster.go index 715363024c1..6046cbfc62f 100644 --- a/test/e2e/azure_privatecluster.go +++ b/test/e2e/azure_privatecluster.go @@ -94,8 +94,7 @@ func AzurePrivateClusterSpec(ctx context.Context, inputGetter func() AzurePrivat By("Creating a private workload cluster") clusterName = fmt.Sprintf("capz-e2e-%s", util.RandomString(6)) - Expect(os.Setenv(AzureResourceGroup, input.ClusterName)).NotTo(HaveOccurred()) - Expect(os.Setenv(AzureVNetName, fmt.Sprintf("%s-vnet", input.ClusterName))).NotTo(HaveOccurred()) + Expect(os.Setenv(AzureInternalLBIP, "10.128.0.100")).NotTo(HaveOccurred()) result := clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: publicClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index 9218fc83b85..69957ac00ae 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -123,6 +123,7 @@ var _ = Describe("Workload cluster creation", func() { Context("Creating a private cluster from the management cluster", func() { AzurePrivateClusterSpec(ctx, func() AzurePrivateClusterSpecInput { return AzurePrivateClusterSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, Namespace: namespace, ClusterName: clusterName, ClusterctlConfigPath: clusterctlConfigPath,