Skip to content

Commit

Permalink
Adding e2e test cases
Browse files Browse the repository at this point in the history
  • Loading branch information
Amulyam24 committed Jan 12, 2022
1 parent 6ebefa3 commit f5009a7
Show file tree
Hide file tree
Showing 5 changed files with 329 additions and 9 deletions.
21 changes: 15 additions & 6 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ ARTIFACTS ?= $(REPO_ROOT)/_artifacts
TOOLS_DIR := hack/tools
TOOLS_BIN_DIR := $(TOOLS_DIR)/bin
GO_INSTALL = ./scripts/go_install.sh
E2E_CONF_FILE ?= $(REPO_ROOT)/test/e2e/config/ibmcloud-e2e.yaml
E2E_CONF_FILE_ENVSUBST := $(REPO_ROOT)/test/e2e/config/ibmcloud-e2e-envsubst.yaml

GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint
Expand Down Expand Up @@ -135,6 +134,14 @@ generate-go: $(MOCKGEN)

images: docker-build

set-flavor:
ifeq ($(E2E_FLAVOR), vpc)
$(eval E2E_CONF_FILE=$(REPO_ROOT)/test/e2e/config/ibmcloud-e2e-vpc.yaml)
else
$(eval E2E_CONF_FILE=$(REPO_ROOT)/test/e2e/config/ibmcloud-e2e-powervs.yaml)
endif
@echo "Setting e2e test flavour to ${E2E_CONF_FILE}"

## --------------------------------------
## Linting
## --------------------------------------
Expand All @@ -155,20 +162,22 @@ test: generate fmt vet manifests
GINKGO_FOCUS ?= Workload cluster creation
GINKGO_NODES ?= 3
GINKGO_NOCOLOR ?= false
E2E_FLAVOR ?= powervs
GINKGO_ARGS ?= -v -trace -progress -v -tags=e2e -focus=$(GINKGO_FOCUS) -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR)
ARTIFACTS ?= $(REPO_ROOT)/_artifacts
SKIP_CLEANUP ?= false
SKIP_CREATE_MGMT_CLUSTER ?= false

#Run the end-to-end tests
.PHONY: test-e2e
test-e2e: $(KUBECTL) $(GINKGO) $(ENVSUBST) e2e-image
test-e2e: $(KUBECTL) $(GINKGO) $(ENVSUBST) set-flavor e2e-image
$(ENVSUBST) < $(E2E_CONF_FILE) > $(E2E_CONF_FILE_ENVSUBST)
$(GINKGO) $(GINKGO_ARGS) ./test/e2e -- \
-e2e.artifacts-folder="$(ARTIFACTS)" \
-e2e.config="$(E2E_CONF_FILE_ENVSUBST)" \
-e2e.skip-resource-cleanup=$(SKIP_CLEANUP) \
-e2e.use-existing-cluster=$(SKIP_CREATE_MGMT_CLUSTER)
-e2e.artifacts-folder="$(ARTIFACTS)" \
-e2e.config="$(E2E_CONF_FILE_ENVSUBST)" \
-e2e.skip-resource-cleanup=$(SKIP_CLEANUP) \
-e2e.use-existing-cluster=$(SKIP_CREATE_MGMT_CLUSTER) \
-e2e.flavor="$(E2E_FLAVOR)"

## --------------------------------------
## Docker
Expand Down
68 changes: 68 additions & 0 deletions test/e2e/config/ibmcloud-e2e-powervs.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
managementClusterName: capi-ibmcloud-e2e

images:
# Use local built images for e2e tests
- name: gcr.io/k8s-staging-capi-ibmcloud/cluster-api-ibmcloud-controller:e2e
loadBehavior: mustLoad

providers:
- name: cluster-api
type: CoreProvider
versions:
- name: v1.0.2
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.0.2/core-components.yaml
type: url
files:
- sourcePath: "${PWD}/test/e2e/data/shared/metadata.yaml"
- name: kubeadm
type: BootstrapProvider
versions:
- name: v1.0.2
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.0.2/bootstrap-components.yaml
type: url
files:
- sourcePath: "${PWD}/test/e2e/data/shared/metadata.yaml"
- name: kubeadm
type: ControlPlaneProvider
versions:
- name: v1.0.2
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.0.2/control-plane-components.yaml
type: url
files:
- sourcePath: "${PWD}/test/e2e/data/shared/metadata.yaml"
- name: ibmcloud
type: InfrastructureProvider
versions:
- name: v0.2.0
value: "${PWD}/config/default"
files:
- sourcePath: "${PWD}/metadata.yaml"
targetName: "metadata.yaml"
- sourcePath: "${PWD}/templates/cluster-template-powervs.yaml"
targetName: "cluster-template-powervs.yaml"

variables:
KUBERNETES_VERSION: "${KUBERNETES_VERSION:-v1.22.4}"
# Cluster Addons
CNI: "${PWD}/test/e2e/data/cni/calico/calico.yaml"
IP_FAMILY: "IPv4"
# Following variables should be set based on the flavour being tested
IBMPOWERVS_SSHKEY_NAME: "${IBMPOWERVS_SSHKEY_NAME:-}"
IBMPOWERVS_VIP: "${IBMPOWERVS_VIP:-}"
IBMPOWERVS_VIP_EXTERNAL: "${IBMPOWERVS_VIP_EXTERNAL:-}"
IBMPOWERVS_VIP_CIDR: "${IBMPOWERVS_VIP_CIDR:-}"
IBMPOWERVS_IMAGE_NAME: "${IBMPOWERVS_IMAGE_NAME:-}"
IBMPOWERVS_SERVICE_INSTANCE_ID: "${IBMPOWERVS_SERVICE_INSTANCE_ID:-}"
IBMPOWERVS_NETWORK_NAME: "${IBMPOWERVS_NETWORK_NAME:-}"

intervals:
default/wait-controllers: ["3m", "10s"]
default/wait-cluster: ["20m", "10s"]
default/wait-control-plane: ["30m", "10s"]
default/wait-worker-nodes: ["30m", "10s"]
default/wait-delete-cluster: ["20m", "10s"]
default/wait-machine-upgrade: ["50m", "10s"]
default/wait-machine-remediation: ["30m", "10s"]
default/wait-deployment: ["5m", "10s"]
default/wait-job: ["5m", "10s"]
default/wait-service: ["3m", "10s"]
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,6 @@ providers:
files:
- sourcePath: "${PWD}/metadata.yaml"
targetName: "metadata.yaml"
- sourcePath: "${PWD}/templates/cluster-template-powervs.yaml"
targetName: "cluster-template-powervs.yaml"
- sourcePath: "${PWD}/templates/cluster-template.yaml"
targetName: "cluster-template-vpc.yaml"

Expand All @@ -48,7 +46,15 @@ variables:
# Cluster Addons
CNI: "${PWD}/test/e2e/data/cni/calico/calico.yaml"
IP_FAMILY: "IPv4"

# Following variables should be set based on the flavour being tested
IBMVPC_REGION: "${IBMVPC_REGION:-}"
IBMVPC_ZONE: "${IBMVPC_ZONE:-}"
IBMVPC_RESOURCEGROUP: "${IBMVPC_RESOURCEGROUP:-}"
IBMVPC_NAME: "${IBMVPC_NAME:-}"
IBMVPC_IMAGE_ID: "${IBMVPC_IMAGE_ID:-}"
IBMVPC_PROFILE: "${IBMVPC_PROFILE:-}"
IBMVPC_SSHKEY_ID: "${IBMVPC_SSHKEY_ID:-}"

intervals:
default/wait-controllers: ["3m", "10s"]
default/wait-cluster: ["20m", "10s"]
Expand Down
231 changes: 231 additions & 0 deletions test/e2e/e2e_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,231 @@
//go:build e2e
// +build e2e

/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"
"fmt"
"os"
"path/filepath"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"

capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
)

var _ = Describe("Workload cluster creation", func() {
var (
ctx = context.TODO()
specName = "create-workload-cluster"
namespace *corev1.Namespace
cancelWatches context.CancelFunc
result *clusterctl.ApplyClusterTemplateAndWaitResult
clusterName string
clusterctlLogFolder string
cniPath string
)

BeforeEach(func() {
Expect(e2eConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName)
Expect(clusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. clusterctlConfigPath must be an existing file when calling %s spec", specName)
Expect(bootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. bootstrapClusterProxy can't be nil when calling %s spec", specName)
Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid argument. artifactFolder can't be created for %s spec", specName)

Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion))

clusterName = fmt.Sprintf("capi-ibmcloud-e2e-%s", util.RandomString(6))

// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder)

result = new(clusterctl.ApplyClusterTemplateAndWaitResult)

// We need to override clusterctl apply log folder to avoid getting our credentials exposed.
clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName())

// Path to the CNI file is defined in the config
Expect(e2eConfig.Variables).To(HaveKey(capi_e2e.CNIPath), "Missing %s variable in the config", capi_e2e.CNIPath)
cniPath = e2eConfig.GetVariable(capi_e2e.CNIPath)
})

AfterEach(func() {
cleanInput := cleanupInput{
SpecName: specName,
Cluster: result.Cluster,
ClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
CancelWatches: cancelWatches,
IntervalsGetter: e2eConfig.GetIntervals,
SkipCleanup: skipCleanup,
ArtifactFolder: artifactFolder,
}

dumpSpecResourcesAndCleanup(ctx, cleanInput)
})

Context("Creating a single control-plane cluster", func() {
It("Should create a cluster with 1 worker node and can be scaled", func() {
By("Initializing with 1 worker node")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: clusterctlLogFolder,
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: flavor,
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
CNIManifestPath: cniPath,
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)

By("Scaling worker node to 3")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: clusterctlLogFolder,
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: flavor,
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(3),
},
CNIManifestPath: cniPath,
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
})
})

Context("Creating a highly available control-plane cluster", func() {
It("Should create a cluster with 3 control-plane nodes and 1 worker node", func() {
By("Creating a high available cluster")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: clusterctlLogFolder,
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: flavor,
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(3),
WorkerMachineCount: pointer.Int64Ptr(1),
},
CNIManifestPath: cniPath,
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
})
})
})

func Byf(format string, a ...interface{}) {
By(fmt.Sprintf(format, a...))
}

type cleanupInput struct {
SpecName string
ClusterProxy framework.ClusterProxy
ArtifactFolder string
Namespace *corev1.Namespace
CancelWatches context.CancelFunc
Cluster *clusterv1.Cluster
IntervalsGetter func(spec, key string) []interface{}
SkipCleanup bool
AdditionalCleanup func()
}

func setupSpecNamespace(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder string) (*corev1.Namespace, context.CancelFunc) {
Byf("Creating a namespace for hosting the %q test spec", specName)
namespace, cancelWatches := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{
Creator: clusterProxy.GetClient(),
ClientSet: clusterProxy.GetClientSet(),
Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
LogFolder: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName()),
})

return namespace, cancelWatches
}

func dumpSpecResourcesAndCleanup(ctx context.Context, input cleanupInput) {
defer func() {
input.CancelWatches()
}()

if input.Cluster == nil {
By("Unable to dump workload cluster logs as the cluster is nil")
} else {
Byf("Dumping logs from the %q workload cluster", input.Cluster.Name)
input.ClusterProxy.CollectWorkloadClusterLogs(ctx, input.Cluster.Namespace, input.Cluster.Name, filepath.Join(input.ArtifactFolder, "clusters", input.Cluster.Name))
}

Byf("Dumping all the Cluster API resources in the %q namespace", input.Namespace.Name)
// Dump all Cluster API related resources to artifacts before deleting them.
framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{
Lister: input.ClusterProxy.GetClient(),
Namespace: input.Namespace.Name,
LogPath: filepath.Join(input.ArtifactFolder, "clusters", input.ClusterProxy.GetName(), "resources"),
})

if input.SkipCleanup {
return
}

Byf("Deleting all clusters in the %s namespace", input.Namespace.Name)
framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{
Client: input.ClusterProxy.GetClient(),
Namespace: input.Namespace.Name,
}, input.IntervalsGetter(input.SpecName, "wait-delete-cluster")...)

Byf("Deleting namespace used for hosting the %q test spec", input.SpecName)
framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{
Deleter: input.ClusterProxy.GetClient(),
Name: input.Namespace.Name,
})

if input.AdditionalCleanup != nil {
Byf("Running additional cleanup for the %q test spec", input.SpecName)
input.AdditionalCleanup()
}
}
Loading

0 comments on commit f5009a7

Please sign in to comment.