diff --git a/scripts/ci-e2e.sh b/scripts/ci-e2e.sh index ad3ef8d5243b..988b8df3c697 100755 --- a/scripts/ci-e2e.sh +++ b/scripts/ci-e2e.sh @@ -31,7 +31,7 @@ source "${REPO_ROOT}/hack/ensure-kustomize.sh" # Configure provider images generation; # please ensure the generated image name matches image names used in the E2E_CONF_FILE export REGISTRY=gcr.io/k8s-staging-cluster-api -export TAG=ci +export TAG=dev export ARCH=amd64 export PULL_POLICY=IfNotPresent @@ -54,7 +54,7 @@ docker pull kindest/node:v1.17.2 export GINKGO_NODES=3 export GINKGO_NOCOLOR=true export GINKGO_ARGS="--failFast" # Other ginkgo args that need to be appended to the command. -export E2E_CONF_FILE="${REPO_ROOT}/test/e2e/config/docker-ci.yaml" +export E2E_CONF_FILE="${REPO_ROOT}/test/e2e/config/docker.yaml" export ARTIFACTS="${ARTIFACTS:-${REPO_ROOT}/_artifacts}" export SKIP_RESOURCE_CLEANUP=false export USE_EXISTING_CLUSTER=false diff --git a/test/e2e/Makefile b/test/e2e/Makefile index b14832073601..1c90e604d8b1 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -28,7 +28,7 @@ export GOPROXY REPO_ROOT := $(shell git rev-parse --show-toplevel) help: ## Display this help - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-25s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) ## -------------------------------------- ## Binaries @@ -38,28 +38,43 @@ TOOLS_DIR := $(REPO_ROOT)/hack/tools BIN_DIR := bin TOOLS_BIN_DIR := $(TOOLS_DIR)/$(BIN_DIR) GINKGO := $(TOOLS_BIN_DIR)/ginkgo +KUSTOMIZE := $(TOOLS_BIN_DIR)/kustomize -.PHONY: ginkgo -ginkgo: +$(GINKGO): # Build ginkgo from tools folder. cd $(TOOLS_DIR) && go build -tags=tools -o $(BIN_DIR)/ginkgo github.com/onsi/ginkgo/ginkgo +$(KUSTOMIZE): # Build kustomize from tools folder. + cd $(TOOLS_DIR) && go build -tags=tools -o $(BIN_DIR)/kustomize sigs.k8s.io/kustomize/kustomize/v3 + ## -------------------------------------- -## Testing +## Templates ## -------------------------------------- -TEST_E2E_DIR := $(REPO_ROOT)/test/e2e +DOCKER_TEMPLATES := $(REPO_ROOT)/test/e2e/data/infrastructure-docker + +.PHONY: cluster-templates +cluster-templates: $(KUSTOMIZE) ## Generate cluster templates + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template --load_restrictor none > $(DOCKER_TEMPLATES)/cluster-template.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-mhc --load_restrictor none > $(DOCKER_TEMPLATES)/cluster-template-mhc.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-kcp-adoption/step1 --load_restrictor none > $(DOCKER_TEMPLATES)/cluster-template-kcp-adoption.yaml + echo "---" >> $(DOCKER_TEMPLATES)/cluster-template-kcp-adoption.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-kcp-adoption/step2 --load_restrictor none >> $(DOCKER_TEMPLATES)/cluster-template-kcp-adoption.yaml + +## -------------------------------------- +## Testing +## -------------------------------------- GINKGO_FOCUS ?= GINKGO_NODES ?= 1 -E2E_CONF_FILE ?= ${REPO_ROOT}/test/e2e/config/docker-dev.yaml +E2E_CONF_FILE ?= ${REPO_ROOT}/test/e2e/config/docker.yaml ARTIFACTS ?= ${REPO_ROOT}/_artifacts SKIP_RESOURCE_CLEANUP ?= false USE_EXISTING_CLUSTER ?= false GINKGO_NOCOLOR ?= false .PHONY: run -run: ginkgo ## Run the end-to-end tests - cd $(TEST_E2E_DIR); $(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) $(GINKGO_ARGS) . -- \ +run: $(GINKGO) cluster-templates ## Run the end-to-end tests + $(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) $(GINKGO_ARGS) . -- \ -e2e.artifacts-folder="$(ARTIFACTS)" \ -e2e.config="$(E2E_CONF_FILE)" \ -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) diff --git a/test/e2e/config/docker-dev.yaml b/test/e2e/config/docker-dev.yaml deleted file mode 100644 index bd9775a2c9d7..000000000000 --- a/test/e2e/config/docker-dev.yaml +++ /dev/null @@ -1,123 +0,0 @@ ---- -# E2E test scenario using local dev images and manifests built from the source tree for following providers: -# - cluster-api -# - bootstrap kubeadm -# - control-plane kubeadm -# - docker - -# For creating local dev images built from the source tree; -# - `make docker-build REGISTRY=gcr.io/k8s-staging-cluster-api` to build the cluster-api, bootstrap kubeadm, control-plane kubeadm provider images. -# - `make -C test/infrastructure/docker docker-build REGISTRY=gcr.io/k8s-staging-cluster-api` to build the docker provider images. - -images: -# Use local dev images built source tree; -- name: gcr.io/k8s-staging-cluster-api/cluster-api-controller-amd64:dev - loadBehavior: mustLoad -- name: gcr.io/k8s-staging-cluster-api/kubeadm-bootstrap-controller-amd64:dev - loadBehavior: mustLoad -- name: gcr.io/k8s-staging-cluster-api/kubeadm-control-plane-controller-amd64:dev - loadBehavior: mustLoad -- name: gcr.io/k8s-staging-cluster-api/capd-manager-amd64:dev - loadBehavior: mustLoad -- name: quay.io/jetstack/cert-manager-cainjector:v0.16.1 - loadBehavior: tryLoad -- name: quay.io/jetstack/cert-manager-webhook:v0.16.1 - loadBehavior: tryLoad -- name: quay.io/jetstack/cert-manager-controller:v0.16.1 - loadBehavior: tryLoad -# If using Calico uncomment following lines to speed up test by pre-loading required images on nodes -# - name: calico/kube-controllers:v3.13.1 -# loadBehavior: tryLoad -# - name: calico/cni:v3.13.1 -# loadBehavior: tryLoad -# - name: calico/pod2daemon-flexvol:v3.13.1 -# loadBehavior: tryLoad -# - name: calico/node:v3.13.1 -# loadBehavior: tryLoad - -providers: - -- name: cluster-api - type: CoreProvider - versions: - - name: v0.3.0 - # Use manifest from source files - value: ../../../config - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - old: "--enable-leader-election" - new: "--enable-leader-election=false" - - old: --metrics-addr=127.0.0.1:8080 - new: --metrics-addr=:8080 - -- name: kubeadm - type: BootstrapProvider - versions: - - name: v0.3.0 - # Use manifest from source files - value: ../../../bootstrap/kubeadm/config - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - old: "--enable-leader-election" - new: "--enable-leader-election=false" - - old: --metrics-addr=127.0.0.1:8080 - new: --metrics-addr=:8080 - -- name: kubeadm - type: ControlPlaneProvider - versions: - - name: v0.3.0 - # Use manifest from source files - value: ../../../controlplane/kubeadm/config - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - old: "--enable-leader-election" - new: "--enable-leader-election=false" - - old: --metrics-addr=127.0.0.1:8080 - new: --metrics-addr=:8080 - -- name: docker - type: InfrastructureProvider - versions: - - name: v0.3.0 - # Use manifest from source files - value: ../../../test/infrastructure/docker/config - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - old: "--enable-leader-election" - new: "--enable-leader-election=false" - - old: --metrics-addr=127.0.0.1:8080 - new: --metrics-addr=:8080 - files: - # Add a metadata for docker provider - - sourcePath: "../data/infrastructure-docker/metadata.yaml" - # Add cluster templates - - sourcePath: "../data/infrastructure-docker/cluster-template.yaml" - - sourcePath: "../data/infrastructure-docker/cluster-template-kcp-adoption.yaml" - -variables: - KUBERNETES_VERSION: "v1.18.2" - ETCD_VERSION_UPGRADE_TO: "3.4.3-0" - COREDNS_VERSION_UPGRADE_TO: "1.6.7" - KUBERNETES_VERSION_UPGRADE_TO: "v1.18.2" - KUBERNETES_VERSION_UPGRADE_FROM: "v1.17.2" - DOCKER_SERVICE_DOMAIN: "cluster.local" - DOCKER_SERVICE_CIDRS: "10.128.0.0/12" - # IMPORTANT! This values should match the one used by the CNI provider - DOCKER_POD_CIDRS: "192.168.0.0/16" - #CNI: "./data/cni/calico/calico.yaml" - CNI: "./data/cni/kindnet/kindnet.yaml" - EXP_CLUSTER_RESOURCE_SET: "true" - -intervals: - default/wait-controllers: ["3m", "10s"] - default/wait-cluster: ["3m", "10s"] - default/wait-control-plane: ["10m", "10s"] - default/wait-worker-nodes: ["5m", "10s"] - default/wait-delete-cluster: ["3m", "10s"] - default/wait-machine-upgrade: ["20m", "10s"] - default/wait-machine-remediation: ["5m", "10s"] diff --git a/test/e2e/config/docker-ci.yaml b/test/e2e/config/docker.yaml similarity index 89% rename from test/e2e/config/docker-ci.yaml rename to test/e2e/config/docker.yaml index 54f469ef52ff..a9020ec3e6c8 100644 --- a/test/e2e/config/docker-ci.yaml +++ b/test/e2e/config/docker.yaml @@ -5,17 +5,17 @@ # - control-plane kubeadm # - docker -# For creating local dev images run ./scripts/ci-e2e.sh +# For creating local dev images run make docker-build-e2e from the main CAPI repository images: # Use local dev images built source tree; -- name: gcr.io/k8s-staging-cluster-api/cluster-api-controller-amd64:ci +- name: gcr.io/k8s-staging-cluster-api/cluster-api-controller-amd64:dev loadBehavior: mustLoad -- name: gcr.io/k8s-staging-cluster-api/kubeadm-bootstrap-controller-amd64:ci +- name: gcr.io/k8s-staging-cluster-api/kubeadm-bootstrap-controller-amd64:dev loadBehavior: mustLoad -- name: gcr.io/k8s-staging-cluster-api/kubeadm-control-plane-controller-amd64:ci +- name: gcr.io/k8s-staging-cluster-api/kubeadm-control-plane-controller-amd64:dev loadBehavior: mustLoad -- name: gcr.io/k8s-staging-cluster-api/capd-manager-amd64:ci +- name: gcr.io/k8s-staging-cluster-api/capd-manager-amd64:dev loadBehavior: mustLoad - name: quay.io/jetstack/cert-manager-cainjector:v0.16.1 loadBehavior: tryLoad @@ -66,11 +66,9 @@ providers: - old: --metrics-addr=127.0.0.1:8080 new: --metrics-addr=:8080 files: - # Add a metadata for docker provider - - sourcePath: "../data/infrastructure-docker/metadata.yaml" # Add cluster templates - - sourcePath: "../data/infrastructure-docker/cluster-template-ci.yaml" - targetName: "cluster-template.yaml" + - sourcePath: "../data/infrastructure-docker/cluster-template.yaml" + - sourcePath: "../data/infrastructure-docker/cluster-template-mhc.yaml" - sourcePath: "../data/infrastructure-docker/cluster-template-kcp-adoption.yaml" variables: diff --git a/test/e2e/data/cni/kindnet/kindnet.yaml b/test/e2e/data/cni/kindnet/kindnet.yaml index 8995ca640348..8b8589a59eaa 100644 --- a/test/e2e/data/cni/kindnet/kindnet.yaml +++ b/test/e2e/data/cni/kindnet/kindnet.yaml @@ -110,4 +110,3 @@ spec: - name: lib-modules hostPath: path: /lib/modules ---- \ No newline at end of file diff --git a/test/e2e/data/infrastructure-docker/bases/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/bases/cluster-with-kcp.yaml new file mode 100644 index 000000000000..ea86e904bed9 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/bases/cluster-with-kcp.yaml @@ -0,0 +1,70 @@ +--- +# DockerCluster object referenced by the Cluster object +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerCluster +metadata: + name: '${CLUSTER_NAME}' +--- +# Cluster object with +# - Reference to the KubeadmControlPlane object +# - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet. +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + labels: + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + services: + cidrBlocks: ['${DOCKER_SERVICE_CIDRS}'] + pods: + cidrBlocks: ['${DOCKER_POD_CIDRS}'] + serviceDomain: '${DOCKER_SERVICE_DOMAIN}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerCluster + name: '${CLUSTER_NAME}' + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" +--- +# DockerMachineTemplate object referenced by the KubeadmControlPlane object +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: {} +--- +# KubeadmControlPlane referenced by the Cluster object with +# - the label kcp-adoption.step2, because it should be created in the second step of the kcp-adoption test. +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" + labels: + kcp-adoption.step2: "" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: {enable-hostpath-provisioner: 'true'} + apiServer: + certSANs: [localhost, 127.0.0.1] + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + version: "${KUBERNETES_VERSION}" diff --git a/test/e2e/data/infrastructure-docker/bases/crs.yaml b/test/e2e/data/infrastructure-docker/bases/crs.yaml new file mode 100644 index 000000000000..d88867d1ddb1 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/bases/crs.yaml @@ -0,0 +1,24 @@ +--- +# ConfigMap object referenced by the ClusterResourceSet object and with +# the CNI resource defined in the test config file +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-0" +data: ${CNI_RESOURCES} +binaryData: +--- +# ClusterResourceSet object with +# a selector that targets all the Cluster with label cni=${CLUSTER_NAME}-crs-0 +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-0" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-0" + resources: + - name: "cni-${CLUSTER_NAME}-crs-0" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-docker/bases/md.yaml b/test/e2e/data/infrastructure-docker/bases/md.yaml new file mode 100644 index 000000000000..5cef10112881 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/bases/md.yaml @@ -0,0 +1,54 @@ +--- +# DockerMachineTemplate referenced by the MachineDeployment and with +# - extraMounts for the docker sock, thus allowing self-hosting test +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +# KubeadmConfigTemplate referenced by the MachineDeployment +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} +--- +# MachineDeployment object with +# - the label nodepool=pool1 that applies to all the machines, so those machine can be targeted by the MachineHealthCheck object +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + metadata: + labels: + "nodepool": "pool1" + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachineTemplate diff --git a/test/e2e/data/infrastructure-docker/bases/mhc.yaml b/test/e2e/data/infrastructure-docker/bases/mhc.yaml new file mode 100644 index 000000000000..34dd68ec7afe --- /dev/null +++ b/test/e2e/data/infrastructure-docker/bases/mhc.yaml @@ -0,0 +1,18 @@ +--- +# MachineHealthCheck object with +# - a selector that targets all the machines with label nodepool=pool1 +# - unhealthyConditions triggering remediation after 30s the node is up (because it is testing a condition that does not exists) +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineHealthCheck +metadata: + name: "${CLUSTER_NAME}-mhc-0" +spec: + clusterName: "${CLUSTER_NAME}" + maxUnhealthy: 100% + selector: + matchLabels: + nodepool: "pool1" + unhealthyConditions: + - type: E2ENodeUnhealthy + status: "True" + timeout: 30s diff --git a/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption.yaml b/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption.yaml index 0ed2d0f33c4d..8f5092534d0b 100644 --- a/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption.yaml +++ b/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption.yaml @@ -1,179 +1,180 @@ -## -# these resources are sequenced by label: -# 1. initial: just the resources to bootstrap an initial controlplane -# 2. kcp: the KCP resources (note the duplicated Cluster to associate the control plane) -## -### -# 1. initial -### -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: DockerCluster +apiVersion: v1 +binaryData: null +data: ${CNI_RESOURCES} +kind: ConfigMap metadata: - name: '${ CLUSTER_NAME }' labels: - initial: '' + kcp-adoption.step1: "" + name: cni-${CLUSTER_NAME}-crs-0 --- -apiVersion: cluster.x-k8s.io/v1alpha3 -kind: Cluster -metadata: - name: '${ CLUSTER_NAME }' - labels: - initial: '' - cni: "${CLUSTER_NAME}-crs-0" -spec: - clusterNetwork: - services: - cidrBlocks: ['${ DOCKER_SERVICE_CIDRS }'] - pods: - cidrBlocks: ['${ DOCKER_POD_CIDRS }'] - serviceDomain: '${ DOCKER_SERVICE_DOMAIN }' - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 - kind: DockerCluster - name: '${ CLUSTER_NAME }' ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: DockerMachine +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet metadata: - name: "${CLUSTER_NAME}-control-plane-0" labels: - initial: '' + kcp-adoption.step1: "" + name: ${CLUSTER_NAME}-crs-0 spec: - extraMounts: - - containerPath: "/var/run/docker.sock" - hostPath: "/var/run/docker.sock" + clusterSelector: + matchLabels: + cni: ${CLUSTER_NAME}-crs-0 + resources: + - kind: ConfigMap + name: cni-${CLUSTER_NAME}-crs-0 + strategy: ApplyOnce --- apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 kind: KubeadmConfig metadata: - name: "${CLUSTER_NAME}-control-plane-0" labels: - initial: '' + kcp-adoption.step1: "" + name: ${CLUSTER_NAME}-control-plane-0 spec: clusterConfiguration: - controllerManager: - extraArgs: {enable-hostpath-provisioner: 'true'} apiServer: - certSANs: [localhost, 127.0.0.1] + certSANs: + - localhost + - 127.0.0.1 + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" initConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock - kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% joinConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock - kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + labels: + cni: ${CLUSTER_NAME}-crs-0 + kcp-adoption.step1: "" + name: ${CLUSTER_NAME} +spec: + clusterNetwork: + pods: + cidrBlocks: + - ${DOCKER_POD_CIDRS} + serviceDomain: ${DOCKER_SERVICE_DOMAIN} + services: + cidrBlocks: + - ${DOCKER_SERVICE_CIDRS} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerCluster + name: ${CLUSTER_NAME} --- apiVersion: cluster.x-k8s.io/v1alpha3 kind: Machine metadata: - name: "${CLUSTER_NAME}-control-plane-0" labels: - initial: '' - cluster.x-k8s.io/control-plane: '' + cluster.x-k8s.io/control-plane: "" + kcp-adoption.step1: "" + name: ${CLUSTER_NAME}-control-plane-0 spec: - clusterName: "${ CLUSTER_NAME }" - version: "${ KUBERNETES_VERSION }" bootstrap: configRef: - name: "${ CLUSTER_NAME }-control-plane-0" apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 kind: KubeadmConfig + name: ${ CLUSTER_NAME }-control-plane-0 + clusterName: ${ CLUSTER_NAME } infrastructureRef: - name: "${ CLUSTER_NAME }-control-plane-0" apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: DockerMachine + name: ${ CLUSTER_NAME }-control-plane-0 + version: ${ KUBERNETES_VERSION } +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerCluster +metadata: + labels: + kcp-adoption.step1: "" + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachine +metadata: + labels: + kcp-adoption.step1: "" + name: ${CLUSTER_NAME}-control-plane-0 +spec: {} --- -### -# 2. kcp -### apiVersion: cluster.x-k8s.io/v1alpha3 kind: Cluster metadata: - name: '${ CLUSTER_NAME }' labels: - kcp: '' - cni: "${CLUSTER_NAME}-crs-0" + cni: ${CLUSTER_NAME}-crs-0 + kcp-adoption.step2: "" + name: ${CLUSTER_NAME} spec: clusterNetwork: - services: - cidrBlocks: ['${ DOCKER_SERVICE_CIDRS }'] pods: - cidrBlocks: ['${ DOCKER_POD_CIDRS }'] - serviceDomain: '${ DOCKER_SERVICE_DOMAIN }' + cidrBlocks: + - ${DOCKER_POD_CIDRS} + serviceDomain: ${DOCKER_SERVICE_DOMAIN} + services: + cidrBlocks: + - ${DOCKER_SERVICE_CIDRS} controlPlaneRef: - kind: KubeadmControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 - name: "${CLUSTER_NAME}-control-plane" + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: DockerCluster - name: '${ CLUSTER_NAME }' ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: DockerMachineTemplate -metadata: - name: "${CLUSTER_NAME}-control-plane" - labels: - kcp: '' -spec: - template: - spec: - extraMounts: - - containerPath: "/var/run/docker.sock" - hostPath: "/var/run/docker.sock" + name: ${CLUSTER_NAME} --- -kind: KubeadmControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: KubeadmControlPlane metadata: - name: "${ CLUSTER_NAME }-control-plane" labels: - cluster.x-k8s.io/cluster-name: "${ CLUSTER_NAME }" - kcp: '' + kcp-adoption.step2: "" + name: ${CLUSTER_NAME}-control-plane spec: - replicas: ${ CONTROL_PLANE_MACHINE_COUNT } infrastructureTemplate: - kind: DockerMachineTemplate apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 - name: "${CLUSTER_NAME}-control-plane" + kind: DockerMachineTemplate + name: ${CLUSTER_NAME}-control-plane kubeadmConfigSpec: clusterConfiguration: - controllerManager: - extraArgs: {enable-hostpath-provisioner: 'true'} apiServer: - certSANs: [localhost, 127.0.0.1] + certSANs: + - localhost + - 127.0.0.1 + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" initConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock - kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% joinConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock - kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} - version: "${KUBERNETES_VERSION}" + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: ${KUBERNETES_VERSION} --- -apiVersion: v1 -kind: ConfigMap +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerCluster metadata: - name: "cni-${CLUSTER_NAME}-crs-0" labels: - initial: '' - kcp: '' -data: ${CNI_RESOURCES} + kcp-adoption.step2: "" + name: ${CLUSTER_NAME} --- -apiVersion: addons.cluster.x-k8s.io/v1alpha3 -kind: ClusterResourceSet +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate metadata: - name: "${CLUSTER_NAME}-crs-0" labels: - initial: '' - kcp: '' + kcp-adoption.step2: "" + name: ${CLUSTER_NAME}-control-plane spec: - strategy: ApplyOnce - clusterSelector: - matchLabels: - cni: "${CLUSTER_NAME}-crs-0" - resources: - - name: "cni-${CLUSTER_NAME}-crs-0" - kind: ConfigMap ---- + template: + spec: {} diff --git a/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml b/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml new file mode 100644 index 000000000000..beaf11f91398 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml @@ -0,0 +1,74 @@ +--- +# DockerCluster object referenced by the Cluster object +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerCluster +metadata: + name: '${CLUSTER_NAME}' +--- +# Cluster object with +# - No reference to the KubeadmControlPlane object +# - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet. +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + labels: + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + services: + cidrBlocks: ['${DOCKER_SERVICE_CIDRS}'] + pods: + cidrBlocks: ['${DOCKER_POD_CIDRS}'] + serviceDomain: '${DOCKER_SERVICE_DOMAIN}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerCluster + name: '${CLUSTER_NAME}' +--- +# DockerMachine referenced by the Machine cp0 +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachine +metadata: + name: "${CLUSTER_NAME}-control-plane-0" +spec: {} +--- +# KubeadmConfig referenced by the Machine cp0 +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfig +metadata: + name: "${CLUSTER_NAME}-control-plane-0" +spec: + clusterConfiguration: + controllerManager: + extraArgs: {enable-hostpath-provisioner: 'true'} + apiServer: + certSANs: [localhost, 127.0.0.1] + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} +--- +# cp0 Machine +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Machine +metadata: + name: "${CLUSTER_NAME}-control-plane-0" + labels: + cluster.x-k8s.io/control-plane: '' +spec: + clusterName: "${ CLUSTER_NAME }" + version: "${ KUBERNETES_VERSION }" + bootstrap: + configRef: + name: "${ CLUSTER_NAME }-control-plane-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + infrastructureRef: + name: "${ CLUSTER_NAME }-control-plane-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachine diff --git a/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption/step1/kustomization.yaml b/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption/step1/kustomization.yaml new file mode 100644 index 000000000000..fa5f1edcc2f5 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption/step1/kustomization.yaml @@ -0,0 +1,8 @@ +commonLabels: + kcp-adoption.step1: "" + +bases: + - cluster-with-cp0.yaml + - ../../bases/crs.yaml + + diff --git a/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption/step2/kustomization.yaml b/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption/step2/kustomization.yaml new file mode 100644 index 000000000000..f7b1382cfa0e --- /dev/null +++ b/test/e2e/data/infrastructure-docker/cluster-template-kcp-adoption/step2/kustomization.yaml @@ -0,0 +1,6 @@ +commonLabels: + kcp-adoption.step2: "" + +bases: + - ../../bases/cluster-with-kcp.yaml + diff --git a/test/e2e/data/infrastructure-docker/cluster-template-ci.yaml b/test/e2e/data/infrastructure-docker/cluster-template-mhc.yaml similarity index 54% rename from test/e2e/data/infrastructure-docker/cluster-template-ci.yaml rename to test/e2e/data/infrastructure-docker/cluster-template-mhc.yaml index ce66062a76e1..6ec0c94c5911 100644 --- a/test/e2e/data/infrastructure-docker/cluster-template-ci.yaml +++ b/test/e2e/data/infrastructure-docker/cluster-template-mhc.yaml @@ -1,147 +1,154 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: DockerCluster -metadata: - name: '${ CLUSTER_NAME }' ---- -apiVersion: cluster.x-k8s.io/v1alpha3 -kind: Cluster -metadata: - name: '${ CLUSTER_NAME }' - labels: - cni: "${CLUSTER_NAME}-crs-0" -spec: - clusterNetwork: - services: - cidrBlocks: ['${ DOCKER_SERVICE_CIDRS }'] - pods: - cidrBlocks: ['${ DOCKER_POD_CIDRS }'] - serviceDomain: '${ DOCKER_SERVICE_DOMAIN }' - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 - kind: DockerCluster - name: '${ CLUSTER_NAME }' - controlPlaneRef: - kind: KubeadmControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 - name: "${CLUSTER_NAME}-control-plane" ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: DockerMachineTemplate -metadata: - name: "${CLUSTER_NAME}-control-plane" -spec: - template: - spec: - extraMounts: - - containerPath: "/var/run/docker.sock" - hostPath: "/var/run/docker.sock" ---- -kind: KubeadmControlPlane -apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +apiVersion: v1 +binaryData: null +data: ${CNI_RESOURCES} +kind: ConfigMap metadata: - name: "${ CLUSTER_NAME }-control-plane" -spec: - replicas: ${ CONTROL_PLANE_MACHINE_COUNT } - infrastructureTemplate: - kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 - name: "${CLUSTER_NAME}-control-plane" - kubeadmConfigSpec: - clusterConfiguration: - controllerManager: - extraArgs: {enable-hostpath-provisioner: 'true'} - apiServer: - certSANs: [localhost, 127.0.0.1] - initConfiguration: - nodeRegistration: - criSocket: /var/run/containerd/containerd.sock - kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} - joinConfiguration: - nodeRegistration: - criSocket: /var/run/containerd/containerd.sock - kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} - version: "${KUBERNETES_VERSION}" + name: cni-${CLUSTER_NAME}-crs-0 --- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: DockerMachineTemplate +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet metadata: - name: "${CLUSTER_NAME}-md-0" + name: ${CLUSTER_NAME}-crs-0 spec: - template: - spec: - extraMounts: - - containerPath: "/var/run/docker.sock" - hostPath: "/var/run/docker.sock" + clusterSelector: + matchLabels: + cni: ${CLUSTER_NAME}-crs-0 + resources: + - kind: ConfigMap + name: cni-${CLUSTER_NAME}-crs-0 + strategy: ApplyOnce --- apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 kind: KubeadmConfigTemplate metadata: - name: "${ CLUSTER_NAME }-md-0" + name: ${CLUSTER_NAME}-md-0 spec: template: spec: joinConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock - kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + labels: + cni: ${CLUSTER_NAME}-crs-0 + name: ${CLUSTER_NAME} +spec: + clusterNetwork: + pods: + cidrBlocks: + - ${DOCKER_POD_CIDRS} + serviceDomain: ${DOCKER_SERVICE_DOMAIN} + services: + cidrBlocks: + - ${DOCKER_SERVICE_CIDRS} + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerCluster + name: ${CLUSTER_NAME} --- apiVersion: cluster.x-k8s.io/v1alpha3 kind: MachineDeployment metadata: - name: "${CLUSTER_NAME}-md-0" + name: ${CLUSTER_NAME}-md-0 spec: - clusterName: "${CLUSTER_NAME}" - replicas: ${ WORKER_MACHINE_COUNT } + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} selector: - matchLabels: + matchLabels: null template: metadata: labels: - "nodepool": "pool1" + nodepool: pool1 spec: - clusterName: "${ CLUSTER_NAME }" - version: "${ KUBERNETES_VERSION }" bootstrap: configRef: - name: "${ CLUSTER_NAME }-md-0" apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-0 + clusterName: ${CLUSTER_NAME} infrastructureRef: - name: "${ CLUSTER_NAME }-md-0" apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: DockerMachineTemplate + name: ${CLUSTER_NAME}-md-0 + version: ${KUBERNETES_VERSION} --- apiVersion: cluster.x-k8s.io/v1alpha3 kind: MachineHealthCheck metadata: - name: "${CLUSTER_NAME}-mhc-0" + name: ${CLUSTER_NAME}-mhc-0 spec: - clusterName: "${ CLUSTER_NAME }" + clusterName: ${CLUSTER_NAME} maxUnhealthy: 100% selector: matchLabels: - nodepool: "pool1" + nodepool: pool1 unhealthyConditions: - - type: E2ENodeUnhealthy - status: "True" - timeout: 30s + - status: "True" + timeout: 30s + type: E2ENodeUnhealthy --- -apiVersion: v1 -kind: ConfigMap +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: KubeadmControlPlane metadata: - name: "cni-${CLUSTER_NAME}-crs-0" -data: ${CNI_RESOURCES} + labels: + kcp-adoption.step2: "" + name: ${CLUSTER_NAME}-control-plane +spec: + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachineTemplate + name: ${CLUSTER_NAME}-control-plane + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: ${KUBERNETES_VERSION} --- -apiVersion: addons.cluster.x-k8s.io/v1alpha3 -kind: ClusterResourceSet +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerCluster +metadata: + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate metadata: - name: "${CLUSTER_NAME}-crs-0" + name: ${CLUSTER_NAME}-control-plane spec: - strategy: ApplyOnce - clusterSelector: - matchLabels: - cni: "${CLUSTER_NAME}-crs-0" - resources: - - name: "cni-${CLUSTER_NAME}-crs-0" - kind: ConfigMap + template: + spec: {} --- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + template: + spec: + extraMounts: + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock diff --git a/test/e2e/data/infrastructure-docker/cluster-template-mhc/kustomization.yaml b/test/e2e/data/infrastructure-docker/cluster-template-mhc/kustomization.yaml new file mode 100644 index 000000000000..21314c852607 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/cluster-template-mhc/kustomization.yaml @@ -0,0 +1,7 @@ +bases: + - ../bases/cluster-with-kcp.yaml + - ../bases/md.yaml + - ../bases/crs.yaml + - ../bases/mhc.yaml + + diff --git a/test/e2e/data/infrastructure-docker/cluster-template.yaml b/test/e2e/data/infrastructure-docker/cluster-template.yaml index ce66062a76e1..31d6f36a61a0 100644 --- a/test/e2e/data/infrastructure-docker/cluster-template.yaml +++ b/test/e2e/data/infrastructure-docker/cluster-template.yaml @@ -1,147 +1,139 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: DockerCluster +apiVersion: v1 +binaryData: null +data: ${CNI_RESOURCES} +kind: ConfigMap +metadata: + name: cni-${CLUSTER_NAME}-crs-0 +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet metadata: - name: '${ CLUSTER_NAME }' + name: ${CLUSTER_NAME}-crs-0 +spec: + clusterSelector: + matchLabels: + cni: ${CLUSTER_NAME}-crs-0 + resources: + - kind: ConfigMap + name: cni-${CLUSTER_NAME}-crs-0 + strategy: ApplyOnce +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- apiVersion: cluster.x-k8s.io/v1alpha3 kind: Cluster metadata: - name: '${ CLUSTER_NAME }' labels: - cni: "${CLUSTER_NAME}-crs-0" + cni: ${CLUSTER_NAME}-crs-0 + name: ${CLUSTER_NAME} spec: clusterNetwork: - services: - cidrBlocks: ['${ DOCKER_SERVICE_CIDRS }'] pods: - cidrBlocks: ['${ DOCKER_POD_CIDRS }'] - serviceDomain: '${ DOCKER_SERVICE_DOMAIN }' + cidrBlocks: + - ${DOCKER_POD_CIDRS} + serviceDomain: ${DOCKER_SERVICE_DOMAIN} + services: + cidrBlocks: + - ${DOCKER_SERVICE_CIDRS} + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: DockerCluster - name: '${ CLUSTER_NAME }' - controlPlaneRef: - kind: KubeadmControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 - name: "${CLUSTER_NAME}-control-plane" + name: ${CLUSTER_NAME} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: DockerMachineTemplate +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment metadata: - name: "${CLUSTER_NAME}-control-plane" + name: ${CLUSTER_NAME}-md-0 spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: null template: + metadata: + labels: + nodepool: pool1 spec: - extraMounts: - - containerPath: "/var/run/docker.sock" - hostPath: "/var/run/docker.sock" + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachineTemplate + name: ${CLUSTER_NAME}-md-0 + version: ${KUBERNETES_VERSION} --- -kind: KubeadmControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: KubeadmControlPlane metadata: - name: "${ CLUSTER_NAME }-control-plane" + labels: + kcp-adoption.step2: "" + name: ${CLUSTER_NAME}-control-plane spec: - replicas: ${ CONTROL_PLANE_MACHINE_COUNT } infrastructureTemplate: - kind: DockerMachineTemplate apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 - name: "${CLUSTER_NAME}-control-plane" + kind: DockerMachineTemplate + name: ${CLUSTER_NAME}-control-plane kubeadmConfigSpec: clusterConfiguration: - controllerManager: - extraArgs: {enable-hostpath-provisioner: 'true'} apiServer: - certSANs: [localhost, 127.0.0.1] + certSANs: + - localhost + - 127.0.0.1 + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" initConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock - kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% joinConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock - kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} - version: "${KUBERNETES_VERSION}" + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: ${KUBERNETES_VERSION} --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: DockerMachineTemplate +kind: DockerCluster metadata: - name: "${CLUSTER_NAME}-md-0" -spec: - template: - spec: - extraMounts: - - containerPath: "/var/run/docker.sock" - hostPath: "/var/run/docker.sock" + name: ${CLUSTER_NAME} --- -apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 -kind: KubeadmConfigTemplate +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate metadata: - name: "${ CLUSTER_NAME }-md-0" + name: ${CLUSTER_NAME}-control-plane spec: template: - spec: - joinConfiguration: - nodeRegistration: - criSocket: /var/run/containerd/containerd.sock - kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + spec: {} --- -apiVersion: cluster.x-k8s.io/v1alpha3 -kind: MachineDeployment +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate metadata: - name: "${CLUSTER_NAME}-md-0" + name: ${CLUSTER_NAME}-md-0 spec: - clusterName: "${CLUSTER_NAME}" - replicas: ${ WORKER_MACHINE_COUNT } - selector: - matchLabels: template: - metadata: - labels: - "nodepool": "pool1" spec: - clusterName: "${ CLUSTER_NAME }" - version: "${ KUBERNETES_VERSION }" - bootstrap: - configRef: - name: "${ CLUSTER_NAME }-md-0" - apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 - kind: KubeadmConfigTemplate - infrastructureRef: - name: "${ CLUSTER_NAME }-md-0" - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 - kind: DockerMachineTemplate ---- -apiVersion: cluster.x-k8s.io/v1alpha3 -kind: MachineHealthCheck -metadata: - name: "${CLUSTER_NAME}-mhc-0" -spec: - clusterName: "${ CLUSTER_NAME }" - maxUnhealthy: 100% - selector: - matchLabels: - nodepool: "pool1" - unhealthyConditions: - - type: E2ENodeUnhealthy - status: "True" - timeout: 30s ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: "cni-${CLUSTER_NAME}-crs-0" -data: ${CNI_RESOURCES} ---- -apiVersion: addons.cluster.x-k8s.io/v1alpha3 -kind: ClusterResourceSet -metadata: - name: "${CLUSTER_NAME}-crs-0" -spec: - strategy: ApplyOnce - clusterSelector: - matchLabels: - cni: "${CLUSTER_NAME}-crs-0" - resources: - - name: "cni-${CLUSTER_NAME}-crs-0" - kind: ConfigMap ---- + extraMounts: + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock diff --git a/test/e2e/data/infrastructure-docker/cluster-template/kustomization.yaml b/test/e2e/data/infrastructure-docker/cluster-template/kustomization.yaml new file mode 100644 index 000000000000..adb5919cec6f --- /dev/null +++ b/test/e2e/data/infrastructure-docker/cluster-template/kustomization.yaml @@ -0,0 +1,5 @@ +bases: +- ../bases/cluster-with-kcp.yaml +- ../bases/md.yaml +- ../bases/crs.yaml + diff --git a/test/e2e/data/infrastructure-docker/metadata.yaml b/test/e2e/data/infrastructure-docker/metadata.yaml deleted file mode 100644 index 264b00b51f34..000000000000 --- a/test/e2e/data/infrastructure-docker/metadata.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 -kind: Metadata -releaseSeries: - - major: 0 - minor: 2 - contract: v1alpha2 - - major: 0 - minor: 3 - contract: v1alpha3 \ No newline at end of file diff --git a/test/e2e/kcp_adoption.go b/test/e2e/kcp_adoption.go index 628446efafe6..d2ffbed5e4e0 100644 --- a/test/e2e/kcp_adoption.go +++ b/test/e2e/kcp_adoption.go @@ -110,7 +110,7 @@ func KCPAdoptionSpec(ctx context.Context, inputGetter func() KCPAdoptionSpecInpu Expect(workloadClusterTemplate).ToNot(BeNil(), "Failed to get the cluster template") By("Applying the cluster template yaml to the cluster with the 'initial' selector") - Expect(input.BootstrapClusterProxy.ApplyWithArgs(ctx, workloadClusterTemplate, "--selector", "initial")).ShouldNot(HaveOccurred()) + Expect(input.BootstrapClusterProxy.ApplyWithArgs(ctx, workloadClusterTemplate, "--selector", "kcp-adoption.step1")).ShouldNot(HaveOccurred()) cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{ Getter: client, @@ -131,7 +131,7 @@ func KCPAdoptionSpec(ctx context.Context, inputGetter func() KCPAdoptionSpecInpu }, WaitForControlPlaneIntervals...) By("Applying the cluster template yaml to the cluster with the 'kcp' selector") - Expect(input.BootstrapClusterProxy.ApplyWithArgs(ctx, workloadClusterTemplate, "--selector", "kcp")).ShouldNot(HaveOccurred()) + Expect(input.BootstrapClusterProxy.ApplyWithArgs(ctx, workloadClusterTemplate, "--selector", "kcp-adoption.step2")).ShouldNot(HaveOccurred()) controlPlane := framework.GetKubeadmControlPlaneByCluster(ctx, framework.GetKubeadmControlPlaneByClusterInput{ Lister: client, diff --git a/test/e2e/mhc_remediations.go b/test/e2e/mhc_remediations.go index 0d5b76dc72fd..79caa529be59 100644 --- a/test/e2e/mhc_remediations.go +++ b/test/e2e/mhc_remediations.go @@ -76,7 +76,7 @@ func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemed ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, - Flavor: clusterctl.DefaultFlavor, + Flavor: "mhc", Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion),