From 5596ab2a8d3f7dc14ca63e7f425df7c8f76fab3d Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Fri, 17 Apr 2020 21:41:36 +0200 Subject: [PATCH] add test e2e folder --- cmd/clusterctl/test/e2e/go.sum | 3 + hack/tools/go.mod | 1 + hack/tools/tools.go | 1 + scripts/ci-e2e.sh | 60 +++++ test/e2e/Makefile | 65 ++++++ test/e2e/common.go | 71 ++++++ test/e2e/config/docker-ci.yaml | 77 +++++++ test/e2e/config/docker-dev.yaml | 108 +++++++++ test/e2e/data/cni/kindnet/kindnet.yaml | 113 ++++++++++ .../infrastructure-aws/cluster-template.yaml | 113 ++++++++++ .../cluster-template-ci.yaml | 107 +++++++++ .../cluster-template.yaml | 107 +++++++++ .../data/infrastructure-docker/metadata.yaml | 9 + test/e2e/e2e_suite_test.go | 210 ++++++++++++++++++ test/e2e/kcp_upgrade.go | 116 ++++++++++ test/e2e/kcp_upgrade_test.go | 39 ++++ test/e2e/quick_start.go | 102 +++++++++ test/e2e/quick_start_test.go | 39 ++++ test/e2e/self_hosted.go | 192 ++++++++++++++++ test/e2e/self_hosted_test.go | 39 ++++ test/framework/cluster_helpers.go | 61 +++++ test/framework/clusterctl/client.go | 23 +- .../clusterctl/clusterctl_helpers.go | 172 ++++++++++++++ test/framework/clusterctl/e2e_config.go | 35 ++- test/framework/controlpane_helpers.go | 140 ++++++++++++ test/framework/deployment_helpers.go | 3 +- test/framework/machinedeployment_helpers.go | 27 +++ test/framework/namespace_helpers.go | 32 +++ test/infrastructure/docker/go.sum | 2 + 29 files changed, 2045 insertions(+), 22 deletions(-) create mode 100755 scripts/ci-e2e.sh create mode 100644 test/e2e/Makefile create mode 100644 test/e2e/common.go create mode 100644 test/e2e/config/docker-ci.yaml create mode 100644 test/e2e/config/docker-dev.yaml create mode 100644 test/e2e/data/cni/kindnet/kindnet.yaml create mode 100644 test/e2e/data/infrastructure-aws/cluster-template.yaml create mode 100644 test/e2e/data/infrastructure-docker/cluster-template-ci.yaml create mode 100644 test/e2e/data/infrastructure-docker/cluster-template.yaml create mode 100644 test/e2e/data/infrastructure-docker/metadata.yaml create mode 100644 test/e2e/e2e_suite_test.go create mode 100644 test/e2e/kcp_upgrade.go create mode 100644 test/e2e/kcp_upgrade_test.go create mode 100644 test/e2e/quick_start.go create mode 100644 test/e2e/quick_start_test.go create mode 100644 test/e2e/self_hosted.go create mode 100644 test/e2e/self_hosted_test.go create mode 100644 test/framework/clusterctl/clusterctl_helpers.go diff --git a/cmd/clusterctl/test/e2e/go.sum b/cmd/clusterctl/test/e2e/go.sum index 90e895f33dad..7cf1b4633284 100644 --- a/cmd/clusterctl/test/e2e/go.sum +++ b/cmd/clusterctl/test/e2e/go.sum @@ -214,6 +214,7 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -487,6 +488,7 @@ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -571,6 +573,7 @@ k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.17.2 h1:hwDQQFbdRlpnnsR64Asdi55GyCaIP/3WQpMmbNBeWr4= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apiserver v0.17.2 h1:NssVvPALll6SSeNgo1Wk1h2myU1UHNwmhxV0Oxbcl8Y= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 h1:mLmhKUm1X+pXu0zXMEzNsOF5E2kKFGe5o6BZBIIqA6A= k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= diff --git a/hack/tools/go.mod b/hack/tools/go.mod index 77b6dea4f607..253263c854a7 100644 --- a/hack/tools/go.mod +++ b/hack/tools/go.mod @@ -7,6 +7,7 @@ require ( github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/golangci/golangci-lint v1.23.8 github.com/joelanford/go-apidiff v0.0.0-20191206194835-106bcff5f060 + github.com/onsi/ginkgo v1.11.0 github.com/raviqqe/liche v0.0.0-20200229003944-f57a5d1c5be4 golang.org/x/tools v0.0.0-20200204192400-7124308813f3 k8s.io/code-generator v0.18.0-alpha.2.0.20200130061103-7dfd5e9157ef diff --git a/hack/tools/tools.go b/hack/tools/tools.go index 3281cd0bc47d..4029a5fdd5d2 100644 --- a/hack/tools/tools.go +++ b/hack/tools/tools.go @@ -23,6 +23,7 @@ import ( _ "github.com/go-bindata/go-bindata" _ "github.com/golangci/golangci-lint/cmd/golangci-lint" _ "github.com/joelanford/go-apidiff" + _ "github.com/onsi/ginkgo/ginkgo" _ "github.com/raviqqe/liche" _ "k8s.io/code-generator/cmd/conversion-gen" _ "sigs.k8s.io/controller-tools/cmd/controller-gen" diff --git a/scripts/ci-e2e.sh b/scripts/ci-e2e.sh new file mode 100755 index 000000000000..60396881d7bc --- /dev/null +++ b/scripts/ci-e2e.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +REPO_ROOT=$(git rev-parse --show-toplevel) +cd "${REPO_ROOT}" || exit 1 + +# shellcheck source=./hack/ensure-go.sh +source "${REPO_ROOT}/hack/ensure-go.sh" +# shellcheck source=./hack/ensure-kubectl.sh +source "${REPO_ROOT}/hack/ensure-kubectl.sh" +# shellcheck source=./hack/ensure-kustomize.sh +source "${REPO_ROOT}/hack/ensure-kustomize.sh" + +# Configure provider images generation; +# please ensure the generated image name matches image names used in the E2E_CONF_FILE +export REGISTRY=gcr.io/k8s-staging-cluster-api +export TAG=ci +export ARCH=amd64 +export PULL_POLICY=IfNotPresent + +## Rebuild all Cluster API provider images +make docker-build + +## Rebuild CAPD provider images +make -C test/infrastructure/docker docker-build + +## Pulling cert manager images so we can pre-load in kind nodes +docker pull quay.io/jetstack/cert-manager-cainjector:v0.11.0 +docker pull quay.io/jetstack/cert-manager-webhook:v0.11.0 +docker pull quay.io/jetstack/cert-manager-controller:v0.11.0 + +# Configure e2e tests +export GINKGO_FOCUS= +export GINKGO_NODES=2 +export GINKGO_NOCOLOR=true +export E2E_CONF_FILE="${REPO_ROOT}/test/e2e/config/docker-ci.yaml" +export ARTIFACTS="${ARTIFACTS:-${REPO_ROOT}/_artifacts}" +export SKIP_RESOURCE_CLEANUP=false +export USE_EXISTING_CLUSTER=false + +# Run e2e tests +mkdir -p "$ARTIFACTS" +make -C test/e2e/ run diff --git a/test/e2e/Makefile b/test/e2e/Makefile new file mode 100644 index 000000000000..c5383e70a1bb --- /dev/null +++ b/test/e2e/Makefile @@ -0,0 +1,65 @@ +# Copyright 2020 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# If you update this file, please follow: +# https://suva.sh/posts/well-documented-makefiles/ + +# Use GOPROXY environment variable if set + +.DEFAULT_GOAL:=help + +GOPROXY := $(shell go env GOPROXY) +ifeq ($(GOPROXY),) +GOPROXY := https://proxy.golang.org +endif +export GOPROXY + +REPO_ROOT := $(shell git rev-parse --show-toplevel) + +help: ## Display this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +## -------------------------------------- +## Binaries +## -------------------------------------- + +TOOLS_DIR := $(REPO_ROOT)/hack/tools +BIN_DIR := bin +TOOLS_BIN_DIR := $(TOOLS_DIR)/$(BIN_DIR) +GINKGO := $(TOOLS_BIN_DIR)/ginkgo + +.PHONY: ginkgo +ginkgo: + cd $(TOOLS_DIR) && go build -tags=tools -o $(BIN_DIR)/ginkgo github.com/onsi/ginkgo/ginkgo + +## -------------------------------------- +## Testing +## -------------------------------------- + +TEST_E2E_DIR := $(REPO_ROOT)/test/e2e + +GINKGO_FOCUS ?= +GINKGO_NODES ?= 1 +E2E_CONF_FILE ?= ${REPO_ROOT}/test/e2e/config/docker-dev.yaml +ARTIFACTS ?= ${REPO_ROOT}/_artifacts +SKIP_RESOURCE_CLEANUP ?= false +USE_EXISTING_CLUSTER ?= false +GINKGO_NOCOLOR ?= false + +.PHONY: run +run: ginkgo ## Run the end-to-end tests + cd $(TEST_E2E_DIR); $(GINKGO) -v -trace -tags=e2e -focus=$(GINKGO_FOCUS) -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) . -- \ + -e2e.artifacts-folder="$(ARTIFACTS)" \ + -e2e.config="$(E2E_CONF_FILE)" \ + -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) diff --git a/test/e2e/common.go b/test/e2e/common.go new file mode 100644 index 000000000000..861f0f6a1bda --- /dev/null +++ b/test/e2e/common.go @@ -0,0 +1,71 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "path/filepath" + + . "github.com/onsi/ginkgo" + + corev1 "k8s.io/api/core/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/util" +) + +func Byf(format string, a ...interface{}) { + By(fmt.Sprintf(format, a...)) +} + +func setupSpecNamespace(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder string) (*corev1.Namespace, context.CancelFunc) { + Byf("Creating a namespace for hosting the %q test spec", specName) + namespace, cancelWatches := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{ + Creator: clusterProxy.GetClient(), + ClientSet: clusterProxy.GetClientSet(), + Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), + LogFolder: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName()), + }) + + return namespace, cancelWatches +} + +func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []interface{}, skipCleanup bool) { + Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name) + // Dump all Cluster API related resources to artifacts before deleting them. + framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{ + Lister: clusterProxy.GetClient(), + Namespace: namespace.Name, + LogPath: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName(), "resources"), + }) + + if !skipCleanup { + Byf("Deleting cluster %s/%s", cluster.Namespace, cluster.Name) + framework.DeleteClusterAndWait(ctx, framework.DeleteClusterAndWaitInput{ + Client: clusterProxy.GetClient(), + Cluster: cluster, + }, intervalsGetter(specName, "wait-delete-cluster")...) + + Byf("Deleting namespace used for hosting the %q test spec", specName) + framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ + Deleter: clusterProxy.GetClient(), + Name: namespace.Name, + }) + } + cancelWatches() +} diff --git a/test/e2e/config/docker-ci.yaml b/test/e2e/config/docker-ci.yaml new file mode 100644 index 000000000000..d9ddcda89c92 --- /dev/null +++ b/test/e2e/config/docker-ci.yaml @@ -0,0 +1,77 @@ +--- +# CI E2E test test configuration scenario using locally build images and manifests for: +# - cluster-api +# - bootstrap kubeadm +# - control-plane kubeadm +# - docker + +# For creating local dev images run ./scripts/ci-e2e.sh + +images: +# Use local dev images built source tree; +- name: gcr.io/k8s-staging-cluster-api/cluster-api-controller-amd64:ci + loadBehavior: mustLoad +- name: gcr.io/k8s-staging-cluster-api/kubeadm-bootstrap-controller-amd64:ci + loadBehavior: mustLoad +- name: gcr.io/k8s-staging-cluster-api/kubeadm-control-plane-controller-amd64:ci + loadBehavior: mustLoad +- name: gcr.io/k8s-staging-cluster-api/capd-manager-amd64:ci + loadBehavior: mustLoad +- name: quay.io/jetstack/cert-manager-cainjector:v0.11.0 + loadBehavior: tryLoad +- name: quay.io/jetstack/cert-manager-webhook:v0.11.0 + loadBehavior: tryLoad +- name: quay.io/jetstack/cert-manager-controller:v0.11.0 + loadBehavior: tryLoad + +providers: + +- name: cluster-api + type: CoreProvider + versions: + - name: v0.3.0 + # Use manifest from source files + value: ../../../config + +- name: kubeadm + type: BootstrapProvider + versions: + - name: v0.3.0 + # Use manifest from source files + value: ../../../bootstrap/kubeadm/config + +- name: kubeadm + type: ControlPlaneProvider + versions: + - name: v0.3.0 + # Use manifest from source files + value: ../../../controlplane/kubeadm/config + +- name: docker + type: InfrastructureProvider + versions: + - name: v0.3.0 + # Use manifest from source files + value: ../../../test/infrastructure/docker/config + files: + # Add a metadata for docker provider + - sourcePath: "../data/infrastructure-docker/metadata.yaml" + # Add a cluster template + - sourcePath: "../data/infrastructure-docker/cluster-template-ci.yaml" + targetName: "cluster-template.yaml" + +variables: + KUBERNETES_VERSION: "v1.17.0" + DOCKER_SERVICE_DOMAIN: "cluster.local" + DOCKER_SERVICE_CIDRS: "10.128.0.0/12" + # IMPORTANT! This values should match the one used by the CNI provider + DOCKER_POD_CIDRS: "192.168.0.0/16" + CNI: "./data/cni/kindnet/kindnet.yaml" + +intervals: + default/wait-controllers: ["3m", "10s"] + default/wait-cluster: ["3m", "10s"] + default/wait-control-plane: ["3m", "10s"] + default/wait-worker-nodes: ["3m", "10s"] + default/wait-delete-cluster: ["3m", "10s"] + default/wait-machine-upgrade: ["15m", "1m"] \ No newline at end of file diff --git a/test/e2e/config/docker-dev.yaml b/test/e2e/config/docker-dev.yaml new file mode 100644 index 000000000000..95398206d0ee --- /dev/null +++ b/test/e2e/config/docker-dev.yaml @@ -0,0 +1,108 @@ +--- +# E2E test scenario using local dev images and manifests built from the source tree for following providers: +# - cluster-api +# - bootstrap kubeadm +# - control-plane kubeadm +# - docker + +# For creating local dev images built from the source tree; +# - `make docker-build REGISTRY=gcr.io/k8s-staging-cluster-api` to build the cluster-api, bootstrap kubeadm, control-plane kubeadm provider images. +# - `make -C test/infrastructure/docker docker-build REGISTRY=gcr.io/k8s-staging-cluster-api` to build the docker provider images. + +images: +# Use local dev images built source tree; +- name: gcr.io/k8s-staging-cluster-api/cluster-api-controller-amd64:dev + loadBehavior: mustLoad +- name: gcr.io/k8s-staging-cluster-api/kubeadm-bootstrap-controller-amd64:dev + loadBehavior: mustLoad +- name: gcr.io/k8s-staging-cluster-api/kubeadm-control-plane-controller-amd64:dev + loadBehavior: mustLoad +- name: gcr.io/k8s-staging-cluster-api/capd-manager-amd64:dev + loadBehavior: mustLoad +- name: quay.io/jetstack/cert-manager-cainjector:v0.11.0 + loadBehavior: tryLoad +- name: quay.io/jetstack/cert-manager-webhook:v0.11.0 + loadBehavior: tryLoad +- name: quay.io/jetstack/cert-manager-controller:v0.11.0 + loadBehavior: tryLoad +# If using Calico uncomment following lines to speed up test by pre-loading required images on nodes +# - name: calico/kube-controllers:v3.13.1 +# loadBehavior: tryLoad +# - name: calico/cni:v3.13.1 +# loadBehavior: tryLoad +# - name: calico/pod2daemon-flexvol:v3.13.1 +# loadBehavior: tryLoad +# - name: calico/node:v3.13.1 +# loadBehavior: tryLoad + +providers: + +- name: cluster-api + type: CoreProvider + versions: + - name: v0.3.0 + # Use manifest from source files + value: ../../../config + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - old: "--enable-leader-election" + new: "--enable-leader-election=false" + +- name: kubeadm + type: BootstrapProvider + versions: + - name: v0.3.0 + # Use manifest from source files + value: ../../../bootstrap/kubeadm/config + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - old: "--enable-leader-election" + new: "--enable-leader-election=false" + +- name: kubeadm + type: ControlPlaneProvider + versions: + - name: v0.3.0 + # Use manifest from source files + value: ../../../controlplane/kubeadm/config + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - old: "--enable-leader-election" + new: "--enable-leader-election=false" + +- name: docker + type: InfrastructureProvider + versions: + - name: v0.3.0 + # Use manifest from source files + value: ../../../test/infrastructure/docker/config + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - old: "--enable-leader-election" + new: "--enable-leader-election=false" + files: + # Add a metadata for docker provider + - sourcePath: "../data/infrastructure-docker/metadata.yaml" + # Add a cluster template + - sourcePath: "../data/infrastructure-docker/cluster-template.yaml" + +variables: + KUBERNETES_VERSION: "v1.17.0" + DOCKER_SERVICE_DOMAIN: "cluster.local" + DOCKER_SERVICE_CIDRS: "10.128.0.0/12" + # IMPORTANT! This values should match the one used by the CNI provider + DOCKER_POD_CIDRS: "192.168.0.0/16" + #CNI: "./data/cni/calico/calico.yaml" + CNI: "./data/cni/kindnet/kindnet.yaml" + +intervals: + default/wait-controllers: ["3m", "10s"] + default/wait-cluster: ["3m", "10s"] + default/wait-control-plane: ["3m", "10s"] + default/wait-worker-nodes: ["3m", "10s"] + default/wait-delete-cluster: ["3m", "10s"] + default/wait-machine-upgrade: ["15m", "1m"] \ No newline at end of file diff --git a/test/e2e/data/cni/kindnet/kindnet.yaml b/test/e2e/data/cni/kindnet/kindnet.yaml new file mode 100644 index 000000000000..8995ca640348 --- /dev/null +++ b/test/e2e/data/cni/kindnet/kindnet.yaml @@ -0,0 +1,113 @@ +# kindnetd networking manifest +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kindnet +rules: + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - kindnet + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kindnet +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kindnet +subjects: + - kind: ServiceAccount + name: kindnet + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kindnet + namespace: kube-system +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kindnet + namespace: kube-system + labels: + tier: node + app: kindnet + k8s-app: kindnet +spec: + selector: + matchLabels: + app: kindnet + template: + metadata: + labels: + tier: node + app: kindnet + k8s-app: kindnet + spec: + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: kindnet + containers: + - name: kindnet-cni + image: kindest/kindnetd:0.5.4 + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_SUBNET + value: "192.168.0.0/16" + volumeMounts: + - name: cni-cfg + mountPath: /etc/cni/net.d + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + - name: lib-modules + mountPath: /lib/modules + readOnly: true + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_RAW", "NET_ADMIN"] + volumes: + - name: cni-cfg + hostPath: + path: /etc/cni/net.d + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: lib-modules + hostPath: + path: /lib/modules +--- \ No newline at end of file diff --git a/test/e2e/data/infrastructure-aws/cluster-template.yaml b/test/e2e/data/infrastructure-aws/cluster-template.yaml new file mode 100644 index 000000000000..0bafd4746ebc --- /dev/null +++ b/test/e2e/data/infrastructure-aws/cluster-template.yaml @@ -0,0 +1,113 @@ +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AWSCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AWSCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + region: "${AWS_REGION}" + sshKeyName: "${AWS_SSH_KEY_NAME}" +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + kind: AWSMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + initConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.local_hostname }}' + kubeletExtraArgs: + cloud-provider: aws + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: aws + controllerManager: + extraArgs: + cloud-provider: aws + joinConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.local_hostname }}' + kubeletExtraArgs: + cloud-provider: aws + version: "${KUBERNETES_VERSION}" +--- +kind: AWSMachineTemplate +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + instanceType: "${AWS_CONTROL_PLANE_MACHINE_TYPE}" + iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io" + sshKeyName: "${AWS_SSH_KEY_NAME}" +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: AWSMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AWSMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + instanceType: "${AWS_NODE_MACHINE_TYPE}" + iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io" + sshKeyName: "${AWS_SSH_KEY_NAME}" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.local_hostname }}' + kubeletExtraArgs: + cloud-provider: aws diff --git a/test/e2e/data/infrastructure-docker/cluster-template-ci.yaml b/test/e2e/data/infrastructure-docker/cluster-template-ci.yaml new file mode 100644 index 000000000000..fbc0f5a2acea --- /dev/null +++ b/test/e2e/data/infrastructure-docker/cluster-template-ci.yaml @@ -0,0 +1,107 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerCluster +metadata: + name: '${ CLUSTER_NAME }' +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: '${ CLUSTER_NAME }' +spec: + clusterNetwork: + services: + cidrBlocks: ['${ DOCKER_SERVICE_CIDRS }'] + pods: + cidrBlocks: ['${ DOCKER_POD_CIDRS }'] + serviceDomain: '${ DOCKER_SERVICE_DOMAIN }' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerCluster + name: '${ CLUSTER_NAME }' + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${ CLUSTER_NAME }-control-plane" +spec: + replicas: ${ CONTROL_PLANE_MACHINE_COUNT } + infrastructureTemplate: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: {enable-hostpath-provisioner: 'true'} + apiServer: + certSANs: [localhost, 127.0.0.1] + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + version: "${KUBERNETES_VERSION}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfigTemplate +metadata: + name: "${ CLUSTER_NAME }-md-0" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${ WORKER_MACHINE_COUNT } + selector: + matchLabels: + template: + spec: + clusterName: "${ CLUSTER_NAME }" + version: "${ KUBERNETES_VERSION }" + bootstrap: + configRef: + name: "${ CLUSTER_NAME }-md-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${ CLUSTER_NAME }-md-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachineTemplate diff --git a/test/e2e/data/infrastructure-docker/cluster-template.yaml b/test/e2e/data/infrastructure-docker/cluster-template.yaml new file mode 100644 index 000000000000..fbc0f5a2acea --- /dev/null +++ b/test/e2e/data/infrastructure-docker/cluster-template.yaml @@ -0,0 +1,107 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerCluster +metadata: + name: '${ CLUSTER_NAME }' +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: '${ CLUSTER_NAME }' +spec: + clusterNetwork: + services: + cidrBlocks: ['${ DOCKER_SERVICE_CIDRS }'] + pods: + cidrBlocks: ['${ DOCKER_POD_CIDRS }'] + serviceDomain: '${ DOCKER_SERVICE_DOMAIN }' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerCluster + name: '${ CLUSTER_NAME }' + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${ CLUSTER_NAME }-control-plane" +spec: + replicas: ${ CONTROL_PLANE_MACHINE_COUNT } + infrastructureTemplate: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: {enable-hostpath-provisioner: 'true'} + apiServer: + certSANs: [localhost, 127.0.0.1] + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + version: "${KUBERNETES_VERSION}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfigTemplate +metadata: + name: "${ CLUSTER_NAME }-md-0" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${ WORKER_MACHINE_COUNT } + selector: + matchLabels: + template: + spec: + clusterName: "${ CLUSTER_NAME }" + version: "${ KUBERNETES_VERSION }" + bootstrap: + configRef: + name: "${ CLUSTER_NAME }-md-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${ CLUSTER_NAME }-md-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachineTemplate diff --git a/test/e2e/data/infrastructure-docker/metadata.yaml b/test/e2e/data/infrastructure-docker/metadata.yaml new file mode 100644 index 000000000000..264b00b51f34 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/metadata.yaml @@ -0,0 +1,9 @@ +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 0 + minor: 2 + contract: v1alpha2 + - major: 0 + minor: 3 + contract: v1alpha3 \ No newline at end of file diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go new file mode 100644 index 000000000000..19ac7c6744b7 --- /dev/null +++ b/test/e2e/e2e_suite_test.go @@ -0,0 +1,210 @@ +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "flag" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + . "github.com/onsi/gomega" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/bootstrap" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" +) + +// Test suite flags +var ( + // configPath is the path to the e2e config file. + configPath string + + // useExistingCluster instructs the test to use the current cluster instead of creating a new one (default discovery rules apply). + useExistingCluster bool + + // artifactFolder is the folder to store e2e test artifacts. + artifactFolder string + + // skipCleanup prevents cleanup of test resources e.g. for debug purposes. + skipCleanup bool +) + +// Test suite global vars +var ( + // e2eConfig to be used for this test, read from configPath. + e2eConfig *clusterctl.E2EConfig + + // clusterctlConfigPath to be used for this test, created by generating a clusterctl local repository + // with the providers specified in the configPath. + clusterctlConfigPath string + + // bootstrapClusterProvider manages provisioning of the the bootstrap cluster to be used for the e2e tests. + // Please note that provisioning will be skipped if e2e.use-existing-cluster is provided. + bootstrapClusterProvider bootstrap.ClusterProvider + + // bootstrapClusterProxy allows to interact with the bootstrap cluster to be used for the e2e tests. + bootstrapClusterProxy framework.ClusterProxy +) + +func init() { + flag.StringVar(&configPath, "e2e.config", "", "path to the e2e config file") + flag.StringVar(&artifactFolder, "e2e.artifacts-folder", "", "folder where e2e test artifact should be stored") + flag.BoolVar(&skipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped") + flag.BoolVar(&useExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") +} + +func TestE2E(t *testing.T) { + // If running in prow, make sure to use the artifacts folder that will be reported in test grid (ignoring the value provided by flag). + if prowArtifactFolder, exists := os.LookupEnv("ARTIFACTS"); exists { + artifactFolder = prowArtifactFolder + } + + RegisterFailHandler(Fail) + junitPath := filepath.Join(artifactFolder, fmt.Sprintf("junit.e2e_suite.%d.xml", config.GinkgoConfig.ParallelNode)) + junitReporter := reporters.NewJUnitReporter(junitPath) + RunSpecsWithDefaultAndCustomReporters(t, "capi-e2e", []Reporter{junitReporter}) +} + +// Using a SynchronizedBeforeSuite for controlling how to create resources shared across ParallelNodes (~ginkgo threads). +// The local clusterctl repository & the bootstrap cluster are created once and shared across all the tests. +var _ = SynchronizedBeforeSuite(func() []byte { + // Before all ParallelNodes. + + Expect(configPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", artifactFolder) + + By("Initializing a runtime.Scheme with all the GVK relevant for this test") + scheme := initScheme() + + Byf("Loading the e2e test configuration from %q", configPath) + e2eConfig = loadE2EConfig(configPath) + + Byf("Creating a clusterctl local repository into %q", artifactFolder) + clusterctlConfigPath = createClusterctlLocalRepository(e2eConfig, filepath.Join(artifactFolder, "repository")) + + By("Setting up the bootstrap cluster") + bootstrapClusterProvider, bootstrapClusterProxy = setupBootstrapCluster(e2eConfig, scheme, useExistingCluster) + + By("Initializing the bootstrap cluster") + initBootstrapCluster(bootstrapClusterProxy, e2eConfig, clusterctlConfigPath, artifactFolder) + + return []byte( + strings.Join([]string{ + artifactFolder, + configPath, + clusterctlConfigPath, + bootstrapClusterProxy.GetKubeconfigPath(), + }, ","), + ) +}, func(data []byte) { + // Before each ParallelNode. + + parts := strings.Split(string(data), ",") + Expect(parts).To(HaveLen(4)) + + artifactFolder = parts[0] + configPath = parts[1] + clusterctlConfigPath = parts[2] + kubeconfigPath := parts[3] + + e2eConfig = loadE2EConfig(configPath) + bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme()) +}) + +// Using a SynchronizedAfterSuite for controlling how to delete resources shared across ParallelNodes (~ginkgo threads). +// The bootstrap cluster is shared across all the tests, so it should be deleted only after all ParallelNodes completes. +// The local clusterctl repository is preserved like everything else created into the artifact folder. +var _ = SynchronizedAfterSuite(func() { + // After each ParallelNode. +}, func() { + // After all ParallelNodes. + + By("Tearing down the management cluster") + if !skipCleanup { + tearDown(bootstrapClusterProvider, bootstrapClusterProxy) + } +}) + +func initScheme() *runtime.Scheme { + sc := runtime.NewScheme() + framework.TryAddDefaultSchemes(sc) + return sc +} + +func loadE2EConfig(configPath string) *clusterctl.E2EConfig { + config := clusterctl.LoadE2EConfig(context.TODO(), clusterctl.LoadE2EConfigInput{ConfigPath: configPath}) + Expect(config).ToNot(BeNil(), "Failed to load E2E config from %s", configPath) + return config +} + +func createClusterctlLocalRepository(config *clusterctl.E2EConfig, repositoryFolder string) string { + clusterctlConfig := clusterctl.CreateRepository(context.TODO(), clusterctl.CreateRepositoryInput{ + E2EConfig: config, + RepositoryFolder: repositoryFolder, + }) + Expect(clusterctlConfig).To(BeAnExistingFile(), "The clusterctl config file does not exists in the local repository %s", repositoryFolder) + return clusterctlConfig +} + +func setupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme, useExistingCluster bool) (bootstrap.ClusterProvider, framework.ClusterProxy) { + var clusterProvider bootstrap.ClusterProvider + kubeconfigPath := "" + if !useExistingCluster { + clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(context.TODO(), bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{ + Name: config.ManagementClusterName, + RequiresDockerSock: config.HasDockerProvider(), + Images: config.Images, + }) + Expect(clusterProvider).ToNot(BeNil(), "Failed to create a bootstrap cluster") + + kubeconfigPath = clusterProvider.GetKubeconfigPath() + Expect(kubeconfigPath).To(BeAnExistingFile(), "Failed to get the kubeconfig file for the bootstrap cluster") + } + + clusterProxy := framework.NewClusterProxy("bootstrap", kubeconfigPath, scheme) + Expect(clusterProxy).ToNot(BeNil(), "Failed to get a bootstrap cluster proxy") + + return clusterProvider, clusterProxy +} + +func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *clusterctl.E2EConfig, clusterctlConfig, artifactFolder string) { + clusterctl.InitManagementClusterAndWatchControllerLogs(context.TODO(), clusterctl.InitManagementClusterAndWatchControllerLogsInput{ + ClusterProxy: bootstrapClusterProxy, + ClusterctlConfigPath: clusterctlConfig, + InfrastructureProviders: config.InfrastructureProviders(), + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + }, config.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...) +} + +func tearDown(bootstrapClusterProvider bootstrap.ClusterProvider, bootstrapClusterProxy framework.ClusterProxy) { + if bootstrapClusterProxy != nil { + bootstrapClusterProxy.Dispose(context.TODO()) + } + if bootstrapClusterProvider != nil { + bootstrapClusterProvider.Dispose(context.TODO()) + } +} diff --git a/test/e2e/kcp_upgrade.go b/test/e2e/kcp_upgrade.go new file mode 100644 index 000000000000..9fb80b235371 --- /dev/null +++ b/test/e2e/kcp_upgrade.go @@ -0,0 +1,116 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +// KCPUpgradeSpecInput is the input for KCPUpgradeSpec. +type KCPUpgradeSpecInput struct { + E2EConfig *clusterctl.E2EConfig + ClusterctlConfigPath string + BootstrapClusterProxy framework.ClusterProxy + ArtifactFolder string + SkipCleanup bool +} + +// KCPUpgradeSpec implements a test that verifies KCP to properly upgrade a control plane with 3 machines. +func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) { + var ( + specName = "kcp-upgrade" + input KCPUpgradeSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + cluster *clusterv1.Cluster + controlPlane *controlplanev1.KubeadmControlPlane + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) + }) + + It("Should successfully upgrade Kubernetes, DNS, kube-proxy, and etcd", func() { + + By("Creating a workload cluster") + Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.KubernetesVersion)) + Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.CNIPath)) + + cluster, controlPlane, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: clusterctl.DefaultFlavor, + Namespace: namespace.Name, + ClusterName: fmt.Sprintf("cluster-%s", util.RandomString(6)), + KubernetesVersion: input.E2EConfig.GetKubernetesVersion(), + ControlPlaneMachineCount: pointer.Int64Ptr(3), + WorkerMachineCount: pointer.Int64Ptr(1), + }, + CNIManifestPath: input.E2EConfig.GetCNIPath(), + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + + By("Upgrading Kubernetes, DNS, kube-proxy, and etcd versions") + framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: cluster, + ControlPlane: controlPlane, + //Valid image tags for v1.17.2 + EtcdImageTag: "3.4.3-0", + DNSImageTag: "1.6.6", + KubernetesUpgradeVersion: "v1.17.2", + WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + WaitForDNSUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + WaitForEtcdUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + }) + + By("PASSED!") + }) + + AfterEach(func() { + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + }) +} diff --git a/test/e2e/kcp_upgrade_test.go b/test/e2e/kcp_upgrade_test.go new file mode 100644 index 000000000000..e0ca5b2dd884 --- /dev/null +++ b/test/e2e/kcp_upgrade_test.go @@ -0,0 +1,39 @@ +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + + . "github.com/onsi/ginkgo" +) + +var _ = Describe("When testing KCP upgrade", func() { + + KCPUpgradeSpec(context.TODO(), func() KCPUpgradeSpecInput { + return KCPUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + } + }) + +}) diff --git a/test/e2e/quick_start.go b/test/e2e/quick_start.go new file mode 100644 index 000000000000..75cc48828e81 --- /dev/null +++ b/test/e2e/quick_start.go @@ -0,0 +1,102 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +// QuickStartSpecInput is the input for QuickStartSpec. +type QuickStartSpecInput struct { + E2EConfig *clusterctl.E2EConfig + ClusterctlConfigPath string + BootstrapClusterProxy framework.ClusterProxy + ArtifactFolder string + SkipCleanup bool +} + +// QuickStartSpec implements a spec that mimics the operation described in the Cluster API quick start, that is +// creating a workload cluster. +// This test is meant to provide a first, fast signal to detect regression; it is recommended to use it as a PR blocker test. +func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) { + var ( + specName = "quick-start" + input QuickStartSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + cluster *clusterv1.Cluster + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) + }) + + It("Should create a workload cluster", func() { + + By("Creating a workload cluster") + Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.KubernetesVersion)) + Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.CNIPath)) + + cluster, _, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: clusterctl.DefaultFlavor, + Namespace: namespace.Name, + ClusterName: fmt.Sprintf("cluster-%s", util.RandomString(6)), + KubernetesVersion: input.E2EConfig.GetKubernetesVersion(), + ControlPlaneMachineCount: pointer.Int64Ptr(1), + WorkerMachineCount: pointer.Int64Ptr(1), + }, + CNIManifestPath: input.E2EConfig.GetCNIPath(), + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + + By("PASSED!") + }) + + AfterEach(func() { + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + }) +} diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go new file mode 100644 index 000000000000..4e38f8303ab9 --- /dev/null +++ b/test/e2e/quick_start_test.go @@ -0,0 +1,39 @@ +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + + . "github.com/onsi/ginkgo" +) + +var _ = Describe("When following the Cluster API quick-start", func() { + + QuickStartSpec(context.TODO(), func() QuickStartSpecInput { + return QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + } + }) + +}) diff --git a/test/e2e/self_hosted.go b/test/e2e/self_hosted.go new file mode 100644 index 000000000000..ee5ca557089a --- /dev/null +++ b/test/e2e/self_hosted.go @@ -0,0 +1,192 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/bootstrap" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +// SelfHostedSpecInput is the input for SelfHostedSpec. +type SelfHostedSpecInput struct { + E2EConfig *clusterctl.E2EConfig + ClusterctlConfigPath string + BootstrapClusterProxy framework.ClusterProxy + ArtifactFolder string + SkipCleanup bool +} + +// SelfHostedSpec implements a test that verifies Cluster API creating a cluster, pivoting to a self-hosted cluster. +func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) { + var ( + specName = "self-hosted" + input SelfHostedSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + cluster *clusterv1.Cluster + + selfHostedClusterProxy framework.ClusterProxy + selfHostedNamespace *corev1.Namespace + selfHostedCancelWatches context.CancelFunc + selfHostedCluster *clusterv1.Cluster + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = setupSpecNamespace(context.TODO(), specName, input.BootstrapClusterProxy, input.ArtifactFolder) + }) + + It("Should create a workload cluster", func() { + + By("Creating a workload cluster") + Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.KubernetesVersion)) + Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.CNIPath)) + + cluster, _, _ = clusterctl.ApplyClusterTemplateAndWait(context.TODO(), clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: clusterctl.DefaultFlavor, + Namespace: namespace.Name, + ClusterName: fmt.Sprintf("cluster-%s", util.RandomString(6)), + KubernetesVersion: input.E2EConfig.GetKubernetesVersion(), + ControlPlaneMachineCount: pointer.Int64Ptr(1), + WorkerMachineCount: pointer.Int64Ptr(1), + }, + CNIManifestPath: input.E2EConfig.GetCNIPath(), + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + + By("Turning the workload cluster into a management cluster") + //TODO: refactor into an helper func e.g. "UpgradeToManagementCluster" + + // In case of the cluster id a DockerCluster, we should load controller images into the nodes. + // Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using + // this approach because this allows to have a single source of truth for images, the e2e config + if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" { + bootstrap.LoadImagesToKindCluster(context.TODO(), bootstrap.LoadImagesToKindClusterInput{ + Name: cluster.Name, + Images: input.E2EConfig.Images, + }) + } + + // Get a ClusterBroker so we can interact with the workload cluster + selfHostedClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(context.TODO(), cluster.Namespace, cluster.Name) + + Byf("Creating a namespace for hosting the %s test spec", specName) + selfHostedNamespace, selfHostedCancelWatches = framework.CreateNamespaceAndWatchEvents(context.TODO(), framework.CreateNamespaceAndWatchEventsInput{ + Creator: selfHostedClusterProxy.GetClient(), + ClientSet: selfHostedClusterProxy.GetClientSet(), + Name: namespace.Name, + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", "bootstrap"), + }) + + By("Initializing the workload cluster") + clusterctl.InitManagementClusterAndWatchControllerLogs(context.TODO(), clusterctl.InitManagementClusterAndWatchControllerLogsInput{ + ClusterProxy: selfHostedClusterProxy, + ClusterctlConfigPath: input.ClusterctlConfigPath, + InfrastructureProviders: input.E2EConfig.InfrastructureProviders(), + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", "self-hosted"), + }, input.E2EConfig.GetIntervals(specName, "wait-controllers")...) + + //TODO: refactor in to an helper func e.g. "MoveToSelfHostedAndWait" + By("Moving the cluster to self hosted") + clusterctl.Move(context.TODO(), clusterctl.MoveInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", "bootstrap"), + ClusterctlConfigPath: input.ClusterctlConfigPath, + FromKubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + ToKubeconfigPath: selfHostedClusterProxy.GetKubeconfigPath(), + Namespace: namespace.Name, + }) + + fmt.Fprintf(GinkgoWriter, "Waiting for the cluster infrastructure to be provisioned\n") + selfHostedCluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{ + Getter: selfHostedClusterProxy.GetClient(), + Namespace: selfHostedNamespace.Name, + Name: cluster.Name, + }, input.E2EConfig.GetIntervals(specName, "wait-cluster")...) + + controlPlane := framework.GetKubeadmControlPlaneByCluster(ctx, framework.GetKubeadmControlPlaneByClusterInput{ + Lister: selfHostedClusterProxy.GetClient(), + ClusterName: selfHostedCluster.Name, + Namespace: selfHostedCluster.Namespace, + }) + Expect(controlPlane).ToNot(BeNil()) + + By("PASSED!") + }) + + AfterEach(func() { + //TODO: refactor in to an helper func e.g. "MoveToBootstrapAndWait" + if selfHostedNamespace != nil { + // Dump all Cluster API related resources to artifacts before pivoting back. + framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{ + Lister: selfHostedClusterProxy.GetClient(), + Namespace: namespace.Name, + LogPath: filepath.Join(input.ArtifactFolder, "clusters", "self-hosted", "resources"), + }) + } + if selfHostedCluster != nil { + By("Moving the cluster back to bootstrap") + clusterctl.Move(ctx, clusterctl.MoveInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", "self-hosted"), + ClusterctlConfigPath: input.ClusterctlConfigPath, + FromKubeconfigPath: selfHostedClusterProxy.GetKubeconfigPath(), + ToKubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + Namespace: selfHostedNamespace.Name, + }) + + fmt.Fprintf(GinkgoWriter, "Waiting for the cluster infrastructure to be provisioned\n") + cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Namespace: namespace.Name, + Name: cluster.Name, + }, input.E2EConfig.GetIntervals(specName, "wait-cluster")...) + } + if selfHostedCancelWatches != nil { + selfHostedCancelWatches() + } + + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + }) +} diff --git a/test/e2e/self_hosted_test.go b/test/e2e/self_hosted_test.go new file mode 100644 index 000000000000..a964e7b8bc9a --- /dev/null +++ b/test/e2e/self_hosted_test.go @@ -0,0 +1,39 @@ +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + + . "github.com/onsi/ginkgo" +) + +var _ = Describe("When testing Cluster API working on self-hosted clusters", func() { + + SelfHostedSpec(context.TODO(), func() SelfHostedSpecInput { + return SelfHostedSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + } + }) + +}) diff --git a/test/framework/cluster_helpers.go b/test/framework/cluster_helpers.go index f02b28c9d783..4ef80f2cc136 100644 --- a/test/framework/cluster_helpers.go +++ b/test/framework/cluster_helpers.go @@ -133,6 +133,67 @@ func WaitForClusterDeleted(ctx context.Context, input WaitForClusterDeletedInput }, intervals...).Should(BeTrue()) } +// DiscoveryAndWaitForClusterInput is the input type for DiscoveryAndWaitForCluster. +type DiscoveryAndWaitForClusterInput struct { + Getter Getter + Namespace string + Name string +} + +// DiscoveryAndWaitForCluster discovers a cluster object in a namespace and waits for the cluster infrastructure to be provisioned. +func DiscoveryAndWaitForCluster(ctx context.Context, input DiscoveryAndWaitForClusterInput, intervals ...interface{}) *clusterv1.Cluster { + Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForCluster") + Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling DiscoveryAndWaitForCluster") + Expect(input.Namespace).ToNot(BeNil(), "Invalid argument. input.Namespace can't be empty when calling DiscoveryAndWaitForCluster") + Expect(input.Name).ToNot(BeNil(), "Invalid argument. input.Name can't be empty when calling DiscoveryAndWaitForCluster") + + cluster := GetClusterByName(ctx, GetClusterByNameInput{ + Getter: input.Getter, + Name: input.Name, + Namespace: input.Namespace, + }) + Expect(cluster).ToNot(BeNil(), "Failed to get the Cluster object") + + WaitForClusterToProvision(ctx, WaitForClusterToProvisionInput{ + Getter: input.Getter, + Cluster: cluster, + }, intervals...) + + return cluster +} + +// DeleteClusterAndWaitInput is the input type for DeleteClusterAndWait. +type DeleteClusterAndWaitInput struct { + Client client.Client + Cluster *clusterv1.Cluster +} + +// DeleteClusterAndWait deletes a cluster object and waits for it to be gone. +func DeleteClusterAndWait(ctx context.Context, input DeleteClusterAndWaitInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for DeleteClusterAndWait") + Expect(input.Client).ToNot(BeNil(), "Invalid argument. input.Client can't be nil when calling DeleteClusterAndWait") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DeleteClusterAndWait") + + DeleteCluster(ctx, DeleteClusterInput{ + Deleter: input.Client, + Cluster: input.Cluster, + }) + + fmt.Fprintf(GinkgoWriter, "Waiting for the Cluster object to be deleted\n") + WaitForClusterDeleted(ctx, WaitForClusterDeletedInput{ + Getter: input.Client, + Cluster: input.Cluster, + }, intervals...) + + //TODO: consider if to move in another func (what if there are more than one cluster?) + fmt.Fprintf(GinkgoWriter, "Check for all the Cluster API resources being deleted\n") + resources := GetCAPIResources(ctx, GetCAPIResourcesInput{ + Lister: input.Client, + Namespace: input.Cluster.Namespace, + }) + Expect(resources).To(BeEmpty(), "There are still Cluster API resources in the %q namespace", input.Cluster.Namespace) +} + // byClusterOptions returns a set of ListOptions that allows to identify all the objects belonging to a Cluster. func byClusterOptions(name, namespace string) []client.ListOption { return []client.ListOption{ diff --git a/test/framework/clusterctl/client.go b/test/framework/clusterctl/client.go index 70973f27ee40..9b4eb216f726 100644 --- a/test/framework/clusterctl/client.go +++ b/test/framework/clusterctl/client.go @@ -45,7 +45,7 @@ const ( type InitInput struct { LogFolder string ClusterctlConfigPath string - Kubeconfig clusterctlclient.Kubeconfig + KubeconfigPath string CoreProvider string BootstrapProviders []string ControlPlaneProviders []string @@ -62,7 +62,10 @@ func Init(ctx context.Context, input InitInput) { ) initOpt := clusterctlclient.InitOptions{ - Kubeconfig: input.Kubeconfig, + Kubeconfig: clusterctlclient.Kubeconfig{ + Path: input.KubeconfigPath, + Context: "", + }, CoreProvider: input.CoreProvider, BootstrapProviders: input.BootstrapProviders, ControlPlaneProviders: input.ControlPlaneProviders, @@ -81,7 +84,7 @@ func Init(ctx context.Context, input InitInput) { type ConfigClusterInput struct { LogFolder string ClusterctlConfigPath string - Kubeconfig clusterctlclient.Kubeconfig + KubeconfigPath string InfrastructureProvider string Namespace string ClusterName string @@ -103,7 +106,10 @@ func ConfigCluster(ctx context.Context, input ConfigClusterInput) []byte { ) templateOptions := clusterctlclient.GetClusterTemplateOptions{ - Kubeconfig: input.Kubeconfig, + Kubeconfig: clusterctlclient.Kubeconfig{ + Path: input.KubeconfigPath, + Context: "", + }, ProviderRepositorySource: &clusterctlclient.ProviderRepositorySourceOptions{ InfrastructureProvider: input.InfrastructureProvider, Flavor: input.Flavor, @@ -133,8 +139,8 @@ func ConfigCluster(ctx context.Context, input ConfigClusterInput) []byte { type MoveInput struct { LogFolder string ClusterctlConfigPath string - FromKubeconfig clusterctlclient.Kubeconfig - ToKubeconfig clusterctlclient.Kubeconfig + FromKubeconfigPath string + ToKubeconfigPath string Namespace string } @@ -144,10 +150,9 @@ func Move(ctx context.Context, input MoveInput) { clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-move.log", input.LogFolder) defer log.Close() - options := clusterctlclient.MoveOptions{ - FromKubeconfig: input.FromKubeconfig, - ToKubeconfig: input.ToKubeconfig, + FromKubeconfig: clusterctlclient.Kubeconfig{Path: input.FromKubeconfigPath, Context: ""}, + ToKubeconfig: clusterctlclient.Kubeconfig{Path: input.ToKubeconfigPath, Context: ""}, Namespace: input.Namespace, } diff --git a/test/framework/clusterctl/clusterctl_helpers.go b/test/framework/clusterctl/clusterctl_helpers.go new file mode 100644 index 000000000000..2664e059e289 --- /dev/null +++ b/test/framework/clusterctl/clusterctl_helpers.go @@ -0,0 +1,172 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterctl + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" + + "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" +) + +// InitManagementClusterAndWatchControllerLogsInput is the input type for InitManagementClusterAndWatchControllerLogs. +type InitManagementClusterAndWatchControllerLogsInput struct { + ClusterProxy framework.ClusterProxy + ClusterctlConfigPath string + InfrastructureProviders []string + LogFolder string +} + +// InitManagementClusterAndWatchControllerLogs initializes a management using clusterctl and setup watches for controller logs. +// Important: Considering we want to support test suites using existing clusters, clusterctl init is executed only in case +// there are no provider controllers in the cluster; but controller logs watchers are created regardless of the pre-existing providers. +func InitManagementClusterAndWatchControllerLogs(ctx context.Context, input InitManagementClusterAndWatchControllerLogsInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for InitManagementClusterAndWatchControllerLogs") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling InitManagementClusterAndWatchControllerLogs") + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling InitManagementClusterAndWatchControllerLogs") + Expect(input.InfrastructureProviders).ToNot(BeEmpty(), "Invalid argument. input.InfrastructureProviders can't be empty when calling InitManagementClusterAndWatchControllerLogs") + Expect(os.MkdirAll(input.LogFolder, 0755)).To(Succeed(), "Invalid argument. input.LogFolder can't be created for InitManagementClusterAndWatchControllerLogs") + + client := input.ClusterProxy.GetClient() + controllersDeployments := framework.GetControllerDeployments(context.TODO(), framework.GetControllerDeploymentsInput{ + Lister: client, + }) + if len(controllersDeployments) == 0 { + Init(context.TODO(), InitInput{ + // pass reference to the management cluster hosting this test + KubeconfigPath: input.ClusterProxy.GetKubeconfigPath(), + // pass the clusterctl config file that points to the local provider repository created for this test + ClusterctlConfigPath: input.ClusterctlConfigPath, + // setup the desired list of providers for a single-tenant management cluster + CoreProvider: config.ClusterAPIProviderName, + BootstrapProviders: []string{config.KubeadmBootstrapProviderName}, + ControlPlaneProviders: []string{config.KubeadmControlPlaneProviderName}, + InfrastructureProviders: input.InfrastructureProviders, + // setup clusterctl logs folder + LogFolder: input.LogFolder, + }) + } + + fmt.Fprintf(GinkgoWriter, "Waiting for provider controllers to be running\n") + controllersDeployments = framework.GetControllerDeployments(context.TODO(), framework.GetControllerDeploymentsInput{ + Lister: client, + }) + Expect(controllersDeployments).ToNot(BeEmpty(), "The list of controller deployments should not be empty") + for _, deployment := range controllersDeployments { + framework.WaitForDeploymentsAvailable(context.TODO(), framework.WaitForDeploymentsAvailableInput{ + Getter: client, + Deployment: deployment, + }, intervals...) + + // Start streaming logs from all controller providers + framework.WatchDeploymentLogs(context.TODO(), framework.WatchDeploymentLogsInput{ + GetLister: client, + ClientSet: input.ClusterProxy.GetClientSet(), + Deployment: deployment, + LogPath: filepath.Join(input.LogFolder, "controllers"), + }) + } +} + +// ApplyClusterTemplateAndWaitInput is the input type for ApplyClusterTemplateAndWait. +type ApplyClusterTemplateAndWaitInput struct { + ClusterProxy framework.ClusterProxy + ConfigCluster ConfigClusterInput + CNIManifestPath string + WaitForClusterIntervals []interface{} + WaitForControlPlaneIntervals []interface{} + WaitForMachineDeployments []interface{} +} + +// ApplyClusterTemplateAndWait gets a cluster template using clusterctl, and waits for the cluster to be ready. +// Important! this method assumes the cluster uses a KubeadmControlPlane and MachineDeployments. +func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplateAndWaitInput) (*clusterv1.Cluster, *controlplanev1.KubeadmControlPlane, []*clusterv1.MachineDeployment) { + Expect(ctx).NotTo(BeNil(), "ctx is required for ApplyClusterTemplateAndWait") + + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyClusterTemplateAndWait") + + fmt.Fprintf(GinkgoWriter, "Creating the workload cluster with name %q using the %q template (Kubernetes %s, %d control-plane machines, %d worker machines)\n", + input.ConfigCluster.ClusterName, valueOrDefault(input.ConfigCluster.Flavor), input.ConfigCluster.KubernetesVersion, input.ConfigCluster.ControlPlaneMachineCount, input.ConfigCluster.WorkerMachineCount) + + fmt.Fprintf(GinkgoWriter, "Getting the cluster template yaml\n") + workloadClusterTemplate := ConfigCluster(ctx, ConfigClusterInput{ + // pass reference to the management cluster hosting this test + KubeconfigPath: input.ConfigCluster.KubeconfigPath, + // pass the clusterctl config file that points to the local provider repository created for this test, + ClusterctlConfigPath: input.ConfigCluster.ClusterctlConfigPath, + // select template + Flavor: input.ConfigCluster.Flavor, + // define template variables + Namespace: input.ConfigCluster.Namespace, + ClusterName: input.ConfigCluster.ClusterName, + KubernetesVersion: input.ConfigCluster.KubernetesVersion, + ControlPlaneMachineCount: input.ConfigCluster.ControlPlaneMachineCount, + WorkerMachineCount: input.ConfigCluster.WorkerMachineCount, + InfrastructureProvider: input.ConfigCluster.InfrastructureProvider, + // setup clusterctl logs folder + LogFolder: input.ConfigCluster.LogFolder, + }) + Expect(workloadClusterTemplate).ToNot(BeNil(), "Failed to get the cluster template") + + fmt.Fprintf(GinkgoWriter, "Applying the cluster template yaml to the cluster\n") + Expect(input.ClusterProxy.Apply(ctx, workloadClusterTemplate)).ShouldNot(HaveOccurred()) + + fmt.Fprintf(GinkgoWriter, "Waiting for the cluster infrastructure to be provisioned\n") + cluster := framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{ + Getter: input.ClusterProxy.GetClient(), + Namespace: input.ConfigCluster.Namespace, + Name: input.ConfigCluster.ClusterName, + }, input.WaitForClusterIntervals...) + + fmt.Fprintf(GinkgoWriter, "Waiting for control plane to be initialized\n") + controlPlane := framework.DiscoveryAndWaitForControlPlaneInitialized(ctx, framework.DiscoveryAndWaitForControlPlaneInitializedInput{ + Lister: input.ClusterProxy.GetClient(), + Cluster: cluster, + }, input.WaitForControlPlaneIntervals...) + + fmt.Fprintf(GinkgoWriter, "Installing a CNI plugin to the workload cluster\n") + workloadCluster := input.ClusterProxy.GetWorkloadCluster(context.TODO(), cluster.Namespace, cluster.Name) + + cniYaml, err := ioutil.ReadFile(input.CNIManifestPath) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(workloadCluster.Apply(context.TODO(), cniYaml)).ShouldNot(HaveOccurred()) + + fmt.Fprintf(GinkgoWriter, "Waiting for control plane to be ready\n") + framework.WaitForControlPlaneAndMachinesReady(ctx, framework.WaitForControlPlaneAndMachinesReadyInput{ + GetLister: input.ClusterProxy.GetClient(), + Cluster: cluster, + ControlPlane: controlPlane, + }, input.WaitForControlPlaneIntervals...) + + fmt.Fprintf(GinkgoWriter, "Waiting for the worker machines to be provisioned\n") + machineDeployments := framework.DiscoveryAndWaitForMachineDeployments(ctx, framework.DiscoveryAndWaitForMachineDeploymentsInput{ + Lister: input.ClusterProxy.GetClient(), + Cluster: cluster, + }, input.WaitForMachineDeployments...) + + return cluster, controlPlane, machineDeployments +} diff --git a/test/framework/clusterctl/e2e_config.go b/test/framework/clusterctl/e2e_config.go index 0afda6ca76d5..dd93b919f131 100644 --- a/test/framework/clusterctl/e2e_config.go +++ b/test/framework/clusterctl/e2e_config.go @@ -30,6 +30,7 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/version" + "k8s.io/utils/pointer" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" clusterctlconfig "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" "sigs.k8s.io/cluster-api/test/framework" @@ -39,9 +40,10 @@ import ( // Provides access to the configuration for an e2e test. -// Define constants for well known clusterctl config variables +// Define constants for e2e config variables const ( - kubernetesVersion = "KUBERNETES_VERSION" + KubernetesVersion = "KUBERNETES_VERSION" + CNIPath = "CNI" ) // LoadE2EConfigInput is the input for LoadE2EConfig. @@ -191,6 +193,7 @@ func errEmptyArg(argName string) error { // - Image should have name and loadBehavior be one of [mustload, tryload]. // - Intervals should be valid ginkgo intervals. // - KubernetesVersion is not nil and valid. +// - CNIPath is not nil. func (c *E2EConfig) Validate() error { // ManagementClusterName should not be empty. if c.ManagementClusterName == "" { @@ -230,14 +233,19 @@ func (c *E2EConfig) Validate() error { } } - // If kubernetesVersion is nil or not valid, return error. + // If KubernetesVersion is nil or not valid, return error. k8sVersion := c.GetKubernetesVersion() if k8sVersion == "" { - return errEmptyArg(fmt.Sprintf("Variables[%s]", kubernetesVersion)) + return errEmptyArg(fmt.Sprintf("Variables[%s]", KubernetesVersion)) } else if _, err := version.ParseSemantic(k8sVersion); err != nil { - return errInvalidArg("Variables[%s]=%q", kubernetesVersion, k8sVersion) + return errInvalidArg("Variables[%s]=%q", KubernetesVersion, k8sVersion) } + // If CniPath is nil, return error. + cniPath := c.GetCNIPath() + if cniPath == "" { + return errEmptyArg(fmt.Sprintf("Variables[%s]", CNIPath)) + } return nil } @@ -401,18 +409,23 @@ func (c *E2EConfig) GetInt64PtrVariable(varName string) *int64 { wCount, err := strconv.ParseInt(wCountStr, 10, 64) Expect(err).NotTo(HaveOccurred()) - return Int64Ptr(wCount) -} - -func Int64Ptr(n int64) *int64 { - return &n + return pointer.Int64Ptr(wCount) } // GetKubernetesVersion returns the kubernetes version provided in e2e config. func (c *E2EConfig) GetKubernetesVersion() string { - version, ok := c.Variables[kubernetesVersion] + version, ok := c.Variables[KubernetesVersion] if !ok { return "" } return version } + +// GetCNIPath returns the CNI path provided in e2e config. +func (c *E2EConfig) GetCNIPath() string { + path, ok := c.Variables[CNIPath] + if !ok { + return "" + } + return path +} diff --git a/test/framework/controlpane_helpers.go b/test/framework/controlpane_helpers.go index 11aedc7a5024..537c504cdc54 100644 --- a/test/framework/controlpane_helpers.go +++ b/test/framework/controlpane_helpers.go @@ -23,9 +23,12 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" + "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -226,3 +229,140 @@ func controlPlaneMachineOptions() []client.ListOption { client.HasLabels{clusterv1.MachineControlPlaneLabelName}, } } + +// DiscoveryAndWaitForControlPlaneInitializedInput is the input type for DiscoveryAndWaitForControlPlaneInitialized. +type DiscoveryAndWaitForControlPlaneInitializedInput struct { + Lister Lister + Cluster *clusterv1.Cluster +} + +// DiscoveryAndWaitForControlPlaneInitialized discovers the KubeadmControlPlane object attached to a cluster and waits for it to be initialized. +func DiscoveryAndWaitForControlPlaneInitialized(ctx context.Context, input DiscoveryAndWaitForControlPlaneInitializedInput, intervals ...interface{}) *controlplanev1.KubeadmControlPlane { + Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForControlPlaneInitialized") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForControlPlaneInitialized") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoveryAndWaitForControlPlaneInitialized") + + controlPlane := GetKubeadmControlPlaneByCluster(ctx, GetKubeadmControlPlaneByClusterInput{ + Lister: input.Lister, + ClusterName: input.Cluster.Name, + Namespace: input.Cluster.Namespace, + }) + Expect(controlPlane).ToNot(BeNil()) + + fmt.Fprintf(GinkgoWriter, "Waiting for the first control plane machine managed by %s/%s to be provisioned\n", controlPlane.Namespace, controlPlane.Name) + WaitForOneKubeadmControlPlaneMachineToExist(ctx, WaitForOneKubeadmControlPlaneMachineToExistInput{ + Lister: input.Lister, + Cluster: input.Cluster, + ControlPlane: controlPlane, + }, intervals...) + + return controlPlane +} + +// WaitForControlPlaneAndMachinesReadyInput is the input type for WaitForControlPlaneAndMachinesReady. +type WaitForControlPlaneAndMachinesReadyInput struct { + GetLister GetLister + Cluster *clusterv1.Cluster + ControlPlane *controlplanev1.KubeadmControlPlane +} + +// WaitForControlPlaneAndMachinesReady waits for a KubeadmControlPlane object to be ready (all the machine provisioned and one node ready). +func WaitForControlPlaneAndMachinesReady(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForControlPlaneReady") + Expect(input.GetLister).ToNot(BeNil(), "Invalid argument. input.GetLister can't be nil when calling WaitForControlPlaneReady") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling WaitForControlPlaneReady") + Expect(input.ControlPlane).ToNot(BeNil(), "Invalid argument. input.ControlPlane can't be nil when calling WaitForControlPlaneReady") + + if input.ControlPlane.Spec.Replicas != nil && int(*input.ControlPlane.Spec.Replicas) > 1 { + fmt.Fprintf(GinkgoWriter, "Waiting for the remaining control plane machines managed by %s/%s to be provisioned\n", input.ControlPlane.Namespace, input.ControlPlane.Name) + WaitForKubeadmControlPlaneMachinesToExist(ctx, WaitForKubeadmControlPlaneMachinesToExistInput{ + Lister: input.GetLister, + Cluster: input.Cluster, + ControlPlane: input.ControlPlane, + }, intervals...) + } + + fmt.Fprintf(GinkgoWriter, "Waiting for control plane %s/%s to be ready (implies underlying nodes to be ready as well)\n", input.ControlPlane.Namespace, input.ControlPlane.Name) + waitForControlPlaneToBeReadyInput := WaitForControlPlaneToBeReadyInput{ + Getter: input.GetLister, + ControlPlane: input.ControlPlane, + } + WaitForControlPlaneToBeReady(ctx, waitForControlPlaneToBeReadyInput, intervals...) +} + +// UpgradeControlPlaneAndWaitForUpgradeInput is the input type for UpgradeControlPlaneAndWaitForUpgrade. +type UpgradeControlPlaneAndWaitForUpgradeInput struct { + ClusterProxy ClusterProxy + Cluster *clusterv1.Cluster + ControlPlane *controlplanev1.KubeadmControlPlane + KubernetesUpgradeVersion string + EtcdImageTag string + DNSImageTag string + WaitForMachinesToBeUpgraded []interface{} + WaitForDNSUpgrade []interface{} + WaitForEtcdUpgrade []interface{} +} + +// UpgradeControlPlaneAndWaitForUpgrade upgrades a KubeadmControlPlane and waits for it to be upgraded. +func UpgradeControlPlaneAndWaitForUpgrade(ctx context.Context, input UpgradeControlPlaneAndWaitForUpgradeInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeControlPlaneAndWaitForUpgrade") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeControlPlaneAndWaitForUpgrade") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeControlPlaneAndWaitForUpgrade") + Expect(input.ControlPlane).ToNot(BeNil(), "Invalid argument. input.ControlPlane can't be nil when calling UpgradeControlPlaneAndWaitForUpgrade") + Expect(input.KubernetesUpgradeVersion).ToNot(BeNil(), "Invalid argument. input.KubernetesUpgradeVersion can't be empty when calling UpgradeControlPlaneAndWaitForUpgrade") + Expect(input.EtcdImageTag).ToNot(BeNil(), "Invalid argument. input.EtcdImageTag can't be empty when calling UpgradeControlPlaneAndWaitForUpgrade") + Expect(input.DNSImageTag).ToNot(BeNil(), "Invalid argument. input.DNSImageTag can't be empty when calling UpgradeControlPlaneAndWaitForUpgrade") + + mgmtClient := input.ClusterProxy.GetClient() + + fmt.Fprintf(GinkgoWriter, "Patching the new kubernetes version to KCP\n") + patchHelper, err := patch.NewHelper(input.ControlPlane, mgmtClient) + Expect(err).ToNot(HaveOccurred()) + + input.ControlPlane.Spec.Version = input.KubernetesUpgradeVersion + input.ControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd = v1beta1.Etcd{ + Local: &v1beta1.LocalEtcd{ + ImageMeta: v1beta1.ImageMeta{ + ImageTag: input.EtcdImageTag, + }, + }, + } + input.ControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = v1beta1.DNS{ + ImageMeta: v1beta1.ImageMeta{ + ImageTag: input.DNSImageTag, + }, + } + + Expect(patchHelper.Patch(ctx, input.ControlPlane)).To(Succeed()) + + fmt.Fprintf(GinkgoWriter, "Waiting for machines to have the upgraded kubernetes version\n") + WaitForMachinesToBeUpgraded(ctx, WaitForMachinesToBeUpgradedInput{ + Lister: mgmtClient, + Cluster: input.Cluster, + MachineCount: int(*input.ControlPlane.Spec.Replicas), + KubernetesUpgradeVersion: input.KubernetesUpgradeVersion, + }, input.WaitForMachinesToBeUpgraded...) + + fmt.Fprintf(GinkgoWriter, "Waiting for kube-proxy to have the upgraded kubernetes version\n") + workloadCluster := input.ClusterProxy.GetWorkloadCluster(context.TODO(), input.Cluster.Namespace, input.Cluster.Name) + workloadClient := workloadCluster.GetClient() + WaitForKubeProxyUpgrade(ctx, WaitForKubeProxyUpgradeInput{ + Getter: workloadClient, + KubernetesVersion: input.KubernetesUpgradeVersion, + }, input.WaitForDNSUpgrade...) + + fmt.Fprintf(GinkgoWriter, "Waiting for CoreDNS to have the upgraded image tag\n") + WaitForDNSUpgrade(ctx, WaitForDNSUpgradeInput{ + Getter: workloadClient, + DNSVersion: input.DNSImageTag, + }) + + fmt.Fprintf(GinkgoWriter, "Waiting for etcd to have the upgraded image tag\n") + lblSelector, err := labels.Parse("component=etcd") + Expect(err).ToNot(HaveOccurred()) + WaitForPodListCondition(ctx, WaitForPodListConditionInput{ + Lister: workloadClient, + ListOptions: &client.ListOptions{LabelSelector: lblSelector}, + Condition: EtcdImageTagCondition(input.EtcdImageTag, int(*input.ControlPlane.Spec.Replicas)), + }, input.WaitForEtcdUpgrade...) +} diff --git a/test/framework/deployment_helpers.go b/test/framework/deployment_helpers.go index fa03dddb702c..26cec38dbbf9 100644 --- a/test/framework/deployment_helpers.go +++ b/test/framework/deployment_helpers.go @@ -77,7 +77,7 @@ type WatchDeploymentLogsInput struct { // in a separate goroutine so they can all be streamed concurrently. This only causes a test failure if there are errors // retrieving the deployment, its pods, or setting up a log file. If there is an error with the log streaming itself, // that does not cause the test to fail. -func WatchDeploymentLogs(ctx context.Context, input WatchDeploymentLogsInput) error { +func WatchDeploymentLogs(ctx context.Context, input WatchDeploymentLogsInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for WatchControllerLogs") Expect(input.ClientSet).NotTo(BeNil(), "input.ClientSet is required for WatchControllerLogs") Expect(input.Deployment).NotTo(BeNil(), "input.Name is required for WatchControllerLogs") @@ -131,7 +131,6 @@ func WatchDeploymentLogs(ctx context.Context, input WatchDeploymentLogsInput) er }(pod, container) } } - return nil } // WaitForDNSUpgradeInput is the input for WaitForDNSUpgrade. diff --git a/test/framework/machinedeployment_helpers.go b/test/framework/machinedeployment_helpers.go index 5e799c960d14..cd10dc1ab878 100644 --- a/test/framework/machinedeployment_helpers.go +++ b/test/framework/machinedeployment_helpers.go @@ -110,3 +110,30 @@ func WaitForMachineDeploymentNodesToExist(ctx context.Context, input WaitForMach return count, nil }, intervals...).Should(Equal(int(*input.MachineDeployment.Spec.Replicas))) } + +// DiscoveryAndWaitForMachineDeploymentsInput is the input type for DiscoveryAndWaitForMachineDeployments. +type DiscoveryAndWaitForMachineDeploymentsInput struct { + Lister Lister + Cluster *clusterv1.Cluster +} + +// DiscoveryAndWaitForMachineDeployments discovers the MachineDeployments existing in a cluster and waits for them to be ready (all the machine provisioned). +func DiscoveryAndWaitForMachineDeployments(ctx context.Context, input DiscoveryAndWaitForMachineDeploymentsInput, intervals ...interface{}) []*clusterv1.MachineDeployment { + Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForMachineDeployments") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForMachineDeployments") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoveryAndWaitForMachineDeployments") + + machineDeployments := GetMachineDeploymentsByCluster(ctx, GetMachineDeploymentsByClusterInput{ + Lister: input.Lister, + ClusterName: input.Cluster.Name, + Namespace: input.Cluster.Namespace, + }) + for _, deployment := range machineDeployments { + WaitForMachineDeploymentNodesToExist(ctx, WaitForMachineDeploymentNodesToExistInput{ + Lister: input.Lister, + Cluster: input.Cluster, + MachineDeployment: deployment, + }, intervals...) + } + return machineDeployments +} diff --git a/test/framework/namespace_helpers.go b/test/framework/namespace_helpers.go index d1011a1d1eab..18c389ce88b2 100644 --- a/test/framework/namespace_helpers.go +++ b/test/framework/namespace_helpers.go @@ -161,3 +161,35 @@ func WatchNamespaceEvents(ctx context.Context, input WatchNamespaceEventsInput) <-ctx.Done() stopInformer <- struct{}{} } + +// CreateNamespaceAndWatchEventsInput is the input type for CreateNamespaceAndWatchEvents. +type CreateNamespaceAndWatchEventsInput struct { + Creator Creator + ClientSet *kubernetes.Clientset + Name string + LogFolder string +} + +// CreateNamespaceAndWatchEvents creates a namespace and setups a watch for the namespace events. +func CreateNamespaceAndWatchEvents(ctx context.Context, input CreateNamespaceAndWatchEventsInput) (*corev1.Namespace, context.CancelFunc) { + Expect(ctx).NotTo(BeNil(), "ctx is required for CreateNamespaceAndWatchEvents") + Expect(input.Creator).ToNot(BeNil(), "Invalid argument. input.Creator can't be nil when calling CreateNamespaceAndWatchEvents") + Expect(input.ClientSet).ToNot(BeNil(), "Invalid argument. input.ClientSet can't be nil when calling ClientSet") + Expect(input.Name).ToNot(BeEmpty(), "Invalid argument. input.Name can't be empty when calling ClientSet") + Expect(os.MkdirAll(input.LogFolder, 0755)).To(Succeed(), "Invalid argument. input.LogFolder can't be created in CreateNamespaceAndWatchEvents") + + namespace := CreateNamespace(ctx, CreateNamespaceInput{Creator: input.Creator, Name: input.Name}, "40s", "10s") + Expect(namespace).ToNot(BeNil(), "Failed to create namespace %q", input.Name) + + fmt.Fprintf(GinkgoWriter, "Creating event watcher for namespace %q\n", input.Name) + watchesCtx, cancelWatches := context.WithCancel(ctx) + go func() { + defer GinkgoRecover() + WatchNamespaceEvents(watchesCtx, WatchNamespaceEventsInput{ + ClientSet: input.ClientSet, + Name: namespace.Name, + LogFolder: input.LogFolder, + }) + }() + return namespace, cancelWatches +} diff --git a/test/infrastructure/docker/go.sum b/test/infrastructure/docker/go.sum index d2dec9d315c5..58bd7ad6f471 100644 --- a/test/infrastructure/docker/go.sum +++ b/test/infrastructure/docker/go.sum @@ -211,6 +211,7 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -473,6 +474,7 @@ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=