Skip to content

Commit

Permalink
Generate the docker infrastructure-components.yaml using kustomize.
Browse files Browse the repository at this point in the history
Added a README.md with instructions to run tests.
Minor changes to delete and move tests based on feedback.
  • Loading branch information
Arvinderpal committed Feb 12, 2020
1 parent 5df11f0 commit 6a69ce8
Show file tree
Hide file tree
Showing 6 changed files with 79 additions and 572 deletions.
26 changes: 26 additions & 0 deletions cmd/clusterctl/test/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Running the tests

NOTE: the e2e tests may not work on a Mac; they should however work just fine on any Linux distro. Mac support will be added in a follow-up PR.

Currently, overrides are needed for the cluster-api, kubeadm-bootstrap and kubeadm-control-plane providers. The override for the infra provider docker must be removed as it is generated locally by the e2e test script:

cmd/clusterctl/hack/local-overrides.py
rm -rf $HOME/.cluster-api/overrides/docker

The entire test suite can be run using the script:

./run-e2e.sh

To run specific tests, use the `GINKGO_FOCUS`

GINKGO_FOCUS="clusterctl create cluster" ./run-e2e.sh

## Skip local build of CAPD

By default, the a local capd image will be built and loaded into kind. This can be skipped as so:

SKIP_DOCKER_BUILD=1 ./run-e2e.sh

You can also specifiy a pre-build image and skip the build:

SKIP_DOCKER_BUILD=1 MANAGER_IMAGE=gcr.io/my-project-name/docker-provider-manager-amd64:dev ./run-e2e.sh
36 changes: 15 additions & 21 deletions cmd/clusterctl/test/e2e/delete_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,15 +82,14 @@ var _ = Describe("clusterctl delete", func() {
})
It("should delete of all infra provider components except the hosting namespace and the CRDs.", func() {
Eventually(
func() error {
err := mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "capd-system", Name: "capd-controller-manager"}, &appsv1.Deployment{})
if err == nil || !apierrors.IsNotFound(err) {
// deployment still exists or some other error other than not found occured.
return fmt.Errorf("%v", err)
func() bool {
if !apierrors.IsNotFound(mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "capd-system", Name: "capd-controller-manager"}, &appsv1.Deployment{})) {
return false
}
return nil
// TODO: check that namespace and CRD are still present.
return true
}, 3*time.Minute, 5*time.Second,
).ShouldNot(HaveOccurred())
).Should(BeTrue())
})
})
Context("deletes everything", func() {
Expand All @@ -104,28 +103,23 @@ var _ = Describe("clusterctl delete", func() {
})
It("should reset the management cluster to its original state", func() {
Eventually(
func() error {
func() bool {
// TODO: check all components are deleted.
err := mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "capd-system", Name: "capd-controller-manager"}, &appsv1.Deployment{})
if err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("%v", err)
if !apierrors.IsNotFound(mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "capd-system", Name: "capd-controller-manager"}, &appsv1.Deployment{})) {
return false
}
// TODO: check namespace of all components are deleted.
err = mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Name: "capd-system"}, &corev1.Namespace{})
if err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("%v", err)
if !apierrors.IsNotFound(mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Name: "capd-system"}, &corev1.Namespace{})) {
return false
}
// TODO: check that all CRDs are deleted.
err = mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "foo-cluster"}, &infrav1.DockerCluster{})
if err == nil {
return fmt.Errorf("%v", err)
}
err := mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "foo-cluster"}, &infrav1.DockerCluster{})
if _, ok := err.(*meta.NoResourceMatchError); !ok {
return err
return false
}
return nil
return true
}, 3*time.Minute, 5*time.Second,
).ShouldNot(HaveOccurred())
).Should(BeTrue())
})
})
})
3 changes: 3 additions & 0 deletions cmd/clusterctl/test/e2e/e2e_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,10 @@ var _ = BeforeSuite(func() {
// Docker image to load into the kind cluster for testing
managerImage = os.Getenv("MANAGER_IMAGE")
if managerImage == "" {
fmt.Fprintf(GinkgoWriter, "MANAGER_IMAGE not specified, using default %v\n", "gcr.io/k8s-staging-capi-docker/capd-manager-amd64:dev")
managerImage = "gcr.io/k8s-staging-capi-docker/capd-manager-amd64:dev"
} else {
fmt.Fprintf(GinkgoWriter, "Using MANAGER_IMAGE %v\n", managerImage)
}
kindConfigFile = os.Getenv("KIND_CONFIG_FILE")
if kindConfigFile == "" {
Expand Down
31 changes: 13 additions & 18 deletions cmd/clusterctl/test/e2e/move_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,30 +142,25 @@ var _ = Describe("clusterctl move", func() {
).Should(BeTrue())
// Should delete all Cluster API objects from the previous management cluster.
Eventually(
func() error {
err := fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, &clusterv1.Cluster{})
if err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("%v", err)
func() bool {
if !apierrors.IsNotFound(fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, &clusterv1.Cluster{})) {
return false
}
err = fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &clusterv1.Machine{})
if err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("%v", err)
if !apierrors.IsNotFound(fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &clusterv1.Machine{})) {
return false
}
err = fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0-config"}, &bootstrapv1.KubeadmConfig{})
if err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("%v", err)
if !apierrors.IsNotFound(fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0-config"}, &bootstrapv1.KubeadmConfig{})) {
return false
}
err = fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, &infrav1.DockerCluster{})
if err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("%v", err)
if !apierrors.IsNotFound(fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, &infrav1.DockerCluster{})) {
return false
}
err = fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &infrav1.DockerMachine{})
if err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("%v", err)
if !apierrors.IsNotFound(fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &infrav1.DockerMachine{})) {
return false
}
return nil
return true
}, 3*time.Minute, 5*time.Second,
).ShouldNot(HaveOccurred())
).Should(BeTrue())
})
})
})
41 changes: 22 additions & 19 deletions cmd/clusterctl/test/run-e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,26 +18,44 @@ set -o errexit
set -o nounset
set -o pipefail

export CAPD_VERSION=v0.3.0

REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/../../..
cd "${REPO_ROOT}" || exit 1
REPO_ROOT_ABS=${PWD}
ARTIFACTS="${ARTIFACTS:-${REPO_ROOT_ABS}/_artifacts}"
mkdir -p "$ARTIFACTS/logs/"

if [ -z "${SKIP_DOCKER_BUILD-}" ]; then
REGISTRY=gcr.io/"$(gcloud config get-value project)"
export REGISTRY
export DOCKER_MANAGER_IMAGE=docker-provider-manager
export TAG=dev
export ARCH=amd64
# This will load the capd image into the kind mgmt cluster.
export MANAGER_IMAGE=${REGISTRY}/${DOCKER_MANAGER_IMAGE}-${ARCH}:${TAG}
export PULL_POLICY=IfNotPresent
cd "${REPO_ROOT_ABS}/test/infrastructure/docker"
CONTROLLER_IMG=${REGISTRY}/${DOCKER_MANAGER_IMAGE} make docker-build
fi

cat <<EOF > "clusterctl-settings.json"
{
"providers": [ "cluster-api", "kubeadm-bootstrap", "kubeadm-control-plane", "docker"]
}
EOF

# Create a local filesystem repository for the docker provider and update clusterctl.yaml
mkdir -p "$ARTIFACTS/testdata/"
cp -r "${REPO_ROOT_ABS}/cmd/clusterctl/test/testdata" "$ARTIFACTS/"
export CLUSTERCTL_CONFIG="${ARTIFACTS}/testdata/clusterctl.yaml"
LOCAL_CAPD_REPO_PATH="${ARTIFACTS}/testdata/docker"
mkdir -p "${LOCAL_CAPD_REPO_PATH}"
cp -r "${REPO_ROOT_ABS}/cmd/clusterctl/test/testdata/docker/${CAPD_VERSION}" "${LOCAL_CAPD_REPO_PATH}"
# We build the infrastructure-components.yaml from the capd folder and put in local repo folder
kustomize build "${REPO_ROOT_ABS}/test/infrastructure/docker/config/default/" > "${LOCAL_CAPD_REPO_PATH}/${CAPD_VERSION}/infrastructure-components.yaml"
export CLUSTERCTL_CONFIG="${ARTIFACTS}/testdata/clusterctl.yaml"
cat <<EOF > "${CLUSTERCTL_CONFIG}"
providers:
- name: docker
url: ${ARTIFACTS}/testdata/docker/v0.3.0/infrastructure-components.yaml
url: ${LOCAL_CAPD_REPO_PATH}/${CAPD_VERSION}/infrastructure-components.yaml
type: InfrastructureProvider
DOCKER_SERVICE_DOMAIN: "cluster.local"
Expand All @@ -58,21 +76,6 @@ nodes:
containerPath: /var/run/docker.sock
EOF

REGISTRY=gcr.io/"$(gcloud config get-value project)"
export REGISTRY
export DOCKER_MANAGER_IMAGE=docker-provider-manager
export TAG=dev
export ARCH=amd64

# This will load the capd image into the kind mgmt cluster.
export MANAGER_IMAGE=${REGISTRY}/${DOCKER_MANAGER_IMAGE}-${ARCH}:${TAG}
export PULL_POLICY=IfNotPresent

if [ -z "${SKIP_DOCKER_BUILD-}" ]; then
cd "${REPO_ROOT_ABS}/test/infrastructure/docker"
CONTROLLER_IMG=${REGISTRY}/${DOCKER_MANAGER_IMAGE} make docker-build
fi

GINKGO_FOCUS=${GINKGO_FOCUS:-""}

cd "${REPO_ROOT_ABS}/cmd/clusterctl/test/e2e"
Expand Down
Loading

0 comments on commit 6a69ce8

Please sign in to comment.