diff --git a/cmd/clusterctl/test/e2e/config_cluster_test.go b/cmd/clusterctl/test/e2e/config_cluster_test.go index dac1cac5c16c..2e410dccff7e 100644 --- a/cmd/clusterctl/test/e2e/config_cluster_test.go +++ b/cmd/clusterctl/test/e2e/config_cluster_test.go @@ -28,100 +28,59 @@ import ( "github.com/pkg/errors" v1 "k8s.io/api/core/v1" - clusterctlclient "sigs.k8s.io/cluster-api/cmd/clusterctl/pkg/client" - clusterctlrepo "sigs.k8s.io/cluster-api/cmd/clusterctl/pkg/client/repository" - "sigs.k8s.io/cluster-api/test/framework" - "sigs.k8s.io/cluster-api/test/framework/management/kind" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/kind/pkg/cluster" ) var _ = Describe("clusterctl create cluster", func() { var ( - cfgFile string // default is $HOME/clusterctl.yaml - mgmtCluster *kind.Cluster - mgmtClient client.Client - workloadClusterName string - kubernetesVersion string - coreProvider string - bootstrapProviders []string - controlPlaneProviders []string - infrastructureProviders []string - tmpDir string - template clusterctlrepo.Template + mgmtInfo testMgmtClusterInfo + workloadInfo testWorkloadClusterInfo ) BeforeEach(func() { - workloadClusterName = "e2e-workload-cluster" - kubernetesVersion = "1.14.2" - coreProvider = "cluster-api:v0.3.0" - bootstrapProviders = []string{"kubeadm-bootstrap:v0.3.0"} - controlPlaneProviders = []string{"kubeadm-control-plane:v0.3.0"} - infrastructureProviders = []string{"docker:v0.3.0"} var err error - + // Mgmt cluster info object + mgmtInfo = testMgmtClusterInfo{ + clusterctlConfigFile: clusterctlConfigFile, + coreProvider: "cluster-api:v0.3.0", + bootstrapProviders: []string{"kubeadm-bootstrap:v0.3.0"}, + controlPlaneProviders: []string{"kubeadm-control-plane:v0.3.0"}, + infrastructureProviders: []string{"docker:v0.3.0"}, + } // Create the mgmt cluster and client - mgmtCluster, err = CreateKindCluster(kindConfigFile) + mgmtInfo.mgmtCluster, err = CreateKindCluster(kindConfigFile) Expect(err).ToNot(HaveOccurred()) - mgmtClient, err = mgmtCluster.GetClient() + mgmtInfo.mgmtClient, err = mgmtInfo.mgmtCluster.GetClient() Expect(err).NotTo(HaveOccurred()) - // Let's setup some varibles for the workload cluster template - os.Setenv("DOCKER_SERVICE_CIDRS", "\"10.96.0.0/12\"") - os.Setenv("DOCKER_POD_CIDRS", "\"192.168.0.0/16\"") - // Create clusterctl.yaml - tmpDir = createTempDir() - cfgFile = createLocalTestClusterCtlConfig(tmpDir, "clusterctl.yaml", "DOCKER_SERVICE_DOMAIN: \"docker.cluster.local\"") - - c, err := clusterctlclient.New(cfgFile) - Expect(err).ToNot(HaveOccurred()) - initOpt := clusterctlclient.InitOptions{ - Kubeconfig: mgmtCluster.KubeconfigPath, - CoreProvider: coreProvider, - BootstrapProviders: bootstrapProviders, - ControlPlaneProviders: controlPlaneProviders, - InfrastructureProviders: infrastructureProviders, - } - _, err = c.Init(initOpt) - Expect(err).ToNot(HaveOccurred()) - framework.WaitForAPIServiceAvailable(ctx, mgmtCluster, "v1beta1.webhook.cert-manager.io") - CheckAndWaitCAPITestDeploymentsExist(mgmtClient) + initTestMgmtCluster(ctx, mgmtInfo) - options := clusterctlclient.GetClusterTemplateOptions{ - Kubeconfig: mgmtCluster.KubeconfigPath, - InfrastructureProvider: infrastructureProviders[0], - ClusterName: workloadClusterName, - Flavor: "", - // TargetNamespace: targetNamespace, - KubernetesVersion: kubernetesVersion, - ControlPlaneMachineCount: 1, - WorkerMachineCount: 0, + // Workload cluster info object + workloadInfo = testWorkloadClusterInfo{ + workloadClusterName: "e2e-workload-cluster", + kubernetesVersion: "1.14.2", + controlPlaneMachineCount: 1, + workerMachineCount: 0, } - - template, err = c.GetClusterTemplate(options) - Expect(err).ToNot(HaveOccurred()) - yaml, err := template.Yaml() - Expect(err).ToNot(HaveOccurred()) - // Create our workload cluster - err = mgmtCluster.Apply(ctx, yaml) - Expect(err).ToNot(HaveOccurred()) - + // Let's setup some varibles for the workload cluster template + Expect(os.Setenv("DOCKER_SERVICE_CIDRS", "\"10.96.0.0/12\"")).To(Succeed()) + Expect(os.Setenv("DOCKER_POD_CIDRS", "\"192.168.0.0/16\"")).To(Succeed()) + createTestWorkloadCluster(ctx, mgmtInfo, workloadInfo) }) AfterEach(func() { fmt.Fprintf(GinkgoWriter, "Tearing down kind mgmt cluster\n") - mgmtCluster.Teardown(ctx) + mgmtInfo.mgmtCluster.Teardown(ctx) fmt.Fprintf(GinkgoWriter, "Tearing down kind workload cluster\n") - if err := cluster.NewProvider().Delete(workloadClusterName, ""); err != nil { + if err := cluster.NewProvider().Delete(workloadInfo.workloadClusterName, ""); err != nil { // Treat this as a non critical error - fmt.Fprintf(GinkgoWriter, "Deleting the kind cluster %q failed. You may need to remove this by hand.\n", workloadClusterName) + fmt.Fprintf(GinkgoWriter, "Deleting the kind cluster %q failed. You may need to remove this by hand.\n", workloadInfo.workloadClusterName) } - os.RemoveAll(tmpDir) }) Context("using specific core, control-plane, bootstrap, and capd provider version", func() { It("should create a workload cluster", func() { Eventually(func() ([]v1.Node, error) { - workloadClient, err := mgmtCluster.GetWorkloadClient(ctx, "default", workloadClusterName) + workloadClient, err := mgmtInfo.mgmtCluster.GetWorkloadClient(ctx, "default", workloadInfo.workloadClusterName) if err != nil { return nil, errors.Wrap(err, "failed to get workload client") } diff --git a/cmd/clusterctl/test/e2e/constant.go b/cmd/clusterctl/test/e2e/constant.go deleted file mode 100644 index c26908ff6214..000000000000 --- a/cmd/clusterctl/test/e2e/constant.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build e2e - -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -const testClusterYAML = `apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: DockerCluster -metadata: - name: foo-cluster - namespace: default ---- -apiVersion: cluster.x-k8s.io/v1alpha3 -kind: Cluster -metadata: - name: foo-cluster - namespace: default -spec: - clusterNetwork: - services: - cidrBlocks: ["10.96.0.0/12"] - pods: - cidrBlocks: ["192.168.0.0/16"] - serviceDomain: docker.cluster.local - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 - kind: DockerCluster - name: foo-cluster - namespace: default ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: DockerMachine -metadata: - name: controlplane-0 - namespace: default ---- -apiVersion: cluster.x-k8s.io/v1alpha3 -kind: Machine -metadata: - labels: - cluster.x-k8s.io/cluster-name: foo-cluster - cluster.x-k8s.io/control-plane: "true" - name: controlplane-0 - namespace: default -spec: - version: "v1.14.2" - clusterName: foo-cluster - bootstrap: - configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 - kind: KubeadmConfig - name: controlplane-0-config - namespace: default - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 - kind: DockerMachine - name: controlplane-0 - namespace: default ---- -apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 -kind: KubeadmConfig -metadata: - name: controlplane-0-config - namespace: default -spec: - clusterConfiguration: - controllerManager: - extraArgs: - enable-hostpath-provisioner: "true" - initConfiguration: - nodeRegistration: - kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% ---- -` diff --git a/cmd/clusterctl/test/e2e/delete_test.go b/cmd/clusterctl/test/e2e/delete_test.go index 67b89cccd2d0..ea1d902bc59b 100644 --- a/cmd/clusterctl/test/e2e/delete_test.go +++ b/cmd/clusterctl/test/e2e/delete_test.go @@ -26,76 +26,64 @@ import ( . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "sigs.k8s.io/cluster-api/test/framework" - "sigs.k8s.io/cluster-api/test/framework/management/kind" + "k8s.io/apimachinery/pkg/api/meta" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/kind/pkg/cluster" clusterctlclient "sigs.k8s.io/cluster-api/cmd/clusterctl/pkg/client" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" ) var _ = Describe("clusterctl delete", func() { var ( - cfgFile string - mgmtCluster *kind.Cluster - mgmtClient client.Client + mgmtInfo testMgmtClusterInfo deleteOptions clusterctlclient.DeleteOptions - cctlClient clusterctlclient.Client ) BeforeEach(func() { var err error + // Mgmt cluster info object + mgmtInfo = testMgmtClusterInfo{ + clusterctlConfigFile: clusterctlConfigFile, + coreProvider: "cluster-api:v0.3.0", + bootstrapProviders: []string{"kubeadm-bootstrap:v0.3.0"}, + controlPlaneProviders: []string{"kubeadm-control-plane:v0.3.0"}, + infrastructureProviders: []string{"docker:v0.3.0"}, + } // Create the mgmt cluster and client - mgmtCluster, err = CreateKindCluster(kindConfigFile) + mgmtInfo.mgmtCluster, err = CreateKindCluster(kindConfigFile) Expect(err).ToNot(HaveOccurred()) - mgmtClient, err = mgmtCluster.GetClient() + mgmtInfo.mgmtClient, err = mgmtInfo.mgmtCluster.GetClient() Expect(err).NotTo(HaveOccurred()) - cctlClient, err = clusterctlclient.New(cfgFile) - Expect(err).ToNot(HaveOccurred()) - initOptmgmtCluster := clusterctlclient.InitOptions{ - Kubeconfig: mgmtCluster.KubeconfigPath, - CoreProvider: "cluster-api:v0.3.0", - BootstrapProviders: []string{"kubeadm-bootstrap:v0.3.0"}, - ControlPlaneProviders: []string{"kubeadm-control-plane:v0.3.0"}, - InfrastructureProviders: []string{"docker:v0.3.0"}, - } - _, err = cctlClient.Init(initOptmgmtCluster) - Expect(err).ToNot(HaveOccurred()) - framework.WaitForAPIServiceAvailable(ctx, mgmtCluster, "v1beta1.webhook.cert-manager.io") - CheckAndWaitCAPITestDeploymentsExist(mgmtClient) - // Create a capi Cluster and capd objects - err = mgmtCluster.Apply(ctx, []byte(testClusterYAML)) - Expect(err).ToNot(HaveOccurred()) + + initTestMgmtCluster(ctx, mgmtInfo) }, setupTimeout) JustBeforeEach(func() { - err := cctlClient.Delete(deleteOptions) + c, err := clusterctlclient.New(mgmtInfo.clusterctlConfigFile) + Expect(err).ToNot(HaveOccurred()) + err = c.Delete(deleteOptions) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { fmt.Fprintf(GinkgoWriter, "Tearing down kind clusters\n") - mgmtCluster.Teardown(ctx) - fmt.Fprintf(GinkgoWriter, "Tearing down kind workload cluster\n") - if err := cluster.NewProvider().Delete("foo-cluster", ""); err != nil { - // Treat this as a non critical error - fmt.Fprintf(GinkgoWriter, "Deleting the kind cluster \"foo-cluster\" failed. You may need to remove this by hand.\n") - } + mgmtInfo.mgmtCluster.Teardown(ctx) }) Context("deletes the infra provider", func() { BeforeEach(func() { deleteOptions = clusterctlclient.DeleteOptions{ - Kubeconfig: mgmtCluster.KubeconfigPath, + Kubeconfig: mgmtInfo.mgmtCluster.KubeconfigPath, Providers: []string{"docker"}, } }) It("should delete of all infra provider components except the hosting namespace and the CRDs.", func() { Eventually( func() error { - err := mgmtClient.Get(ctx, client.ObjectKey{Namespace: "capd-system", Name: "capd-controller-manager"}, &appsv1.Deployment{}) + err := mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "capd-system", Name: "capd-controller-manager"}, &appsv1.Deployment{}) if err == nil || !apierrors.IsNotFound(err) { // deployment still exists or some other error other than not found occured. return fmt.Errorf("%v", err) @@ -105,4 +93,39 @@ var _ = Describe("clusterctl delete", func() { ).ShouldNot(HaveOccurred()) }) }) + Context("deletes everything", func() { + BeforeEach(func() { + deleteOptions = clusterctlclient.DeleteOptions{ + Kubeconfig: mgmtInfo.mgmtCluster.KubeconfigPath, + ForceDeleteNamespace: true, + ForceDeleteCRD: true, + Providers: []string{}, + } + }) + It("should reset the management cluster to its original state", func() { + Eventually( + func() error { + // TODO: check all components are deleted. + err := mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "capd-system", Name: "capd-controller-manager"}, &appsv1.Deployment{}) + if err == nil || !apierrors.IsNotFound(err) { + return fmt.Errorf("%v", err) + } + // TODO: check namespace of all components are deleted. + err = mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Name: "capd-system"}, &corev1.Namespace{}) + if err == nil || !apierrors.IsNotFound(err) { + return fmt.Errorf("%v", err) + } + // TODO: check that all CRDs are deleted. + err = mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "foo-cluster"}, &infrav1.DockerCluster{}) + if err == nil { + return fmt.Errorf("%v", err) + } + if _, ok := err.(*meta.NoResourceMatchError); !ok { + return err + } + return nil + }, 3*time.Minute, 5*time.Second, + ).ShouldNot(HaveOccurred()) + }) + }) }) diff --git a/cmd/clusterctl/test/e2e/e2e_suite_test.go b/cmd/clusterctl/test/e2e/e2e_suite_test.go index 6af654f9148b..2e7b122cfe6f 100644 --- a/cmd/clusterctl/test/e2e/e2e_suite_test.go +++ b/cmd/clusterctl/test/e2e/e2e_suite_test.go @@ -38,9 +38,10 @@ const ( ) var ( - ctx context.Context - managerImage string - kindConfigFile string + ctx context.Context + managerImage string + kindConfigFile string + clusterctlConfigFile string ) var _ = BeforeSuite(func() { @@ -56,6 +57,12 @@ var _ = BeforeSuite(func() { } else { fmt.Fprintf(GinkgoWriter, "Using KIND_CONFIG_FILE: %v\n", kindConfigFile) } + clusterctlConfigFile = os.Getenv("CLUSTERCTL_CONFIG") + if clusterctlConfigFile == "" { + fmt.Fprintf(GinkgoWriter, "CLUSTERCTL_CONFIG not found.\n") + } else { + fmt.Fprintf(GinkgoWriter, "Using CLUSTERCTL_CONFIG: %v\n", clusterctlConfigFile) + } }, setupTimeout) var _ = AfterSuite(func() { diff --git a/cmd/clusterctl/test/e2e/helpers.go b/cmd/clusterctl/test/e2e/helpers.go index 8045116240db..0ca10e2645b9 100644 --- a/cmd/clusterctl/test/e2e/helpers.go +++ b/cmd/clusterctl/test/e2e/helpers.go @@ -31,6 +31,7 @@ import ( appsv1 "k8s.io/api/apps/v1" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + clusterctlclient "sigs.k8s.io/cluster-api/cmd/clusterctl/pkg/client" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/management/kind" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" @@ -91,3 +92,59 @@ func createLocalTestClusterCtlConfig(tmpDir, path, msg string) string { Expect(err).NotTo(HaveOccurred()) return dst } + +type testMgmtClusterInfo struct { + mgmtCluster *kind.Cluster + mgmtClient client.Client + clusterctlConfigFile string + coreProvider string + bootstrapProviders []string + controlPlaneProviders []string + infrastructureProviders []string +} + +func initTestMgmtCluster(ctx context.Context, mgmtInfo testMgmtClusterInfo) { + var err error + c, err := clusterctlclient.New(mgmtInfo.clusterctlConfigFile) + Expect(err).ToNot(HaveOccurred()) + initOpt := clusterctlclient.InitOptions{ + Kubeconfig: mgmtInfo.mgmtCluster.KubeconfigPath, + CoreProvider: mgmtInfo.coreProvider, + BootstrapProviders: mgmtInfo.bootstrapProviders, + ControlPlaneProviders: mgmtInfo.controlPlaneProviders, + InfrastructureProviders: mgmtInfo.infrastructureProviders, + } + _, err = c.Init(initOpt) + Expect(err).ToNot(HaveOccurred()) + framework.WaitForAPIServiceAvailable(ctx, mgmtInfo.mgmtCluster, "v1beta1.webhook.cert-manager.io") + CheckAndWaitCAPITestDeploymentsExist(mgmtInfo.mgmtClient) +} + +type testWorkloadClusterInfo struct { + workloadClusterName string + kubernetesVersion string + controlPlaneMachineCount int + workerMachineCount int +} + +func createTestWorkloadCluster(ctx context.Context, mgmtInfo testMgmtClusterInfo, workloadInfo testWorkloadClusterInfo) { + var err error + c, err := clusterctlclient.New(mgmtInfo.clusterctlConfigFile) + Expect(err).ToNot(HaveOccurred()) + options := clusterctlclient.GetClusterTemplateOptions{ + Kubeconfig: mgmtInfo.mgmtCluster.KubeconfigPath, + InfrastructureProvider: mgmtInfo.infrastructureProviders[0], + ClusterName: workloadInfo.workloadClusterName, + Flavor: "", + KubernetesVersion: workloadInfo.kubernetesVersion, + ControlPlaneMachineCount: workloadInfo.controlPlaneMachineCount, + WorkerMachineCount: workloadInfo.workerMachineCount, + } + template, err := c.GetClusterTemplate(options) + Expect(err).ToNot(HaveOccurred()) + yaml, err := template.Yaml() + Expect(err).ToNot(HaveOccurred()) + // Create our workload cluster + err = mgmtInfo.mgmtCluster.Apply(ctx, yaml) + Expect(err).ToNot(HaveOccurred()) +} diff --git a/cmd/clusterctl/test/e2e/init_test.go b/cmd/clusterctl/test/e2e/init_test.go deleted file mode 100644 index ca31e1a7d99a..000000000000 --- a/cmd/clusterctl/test/e2e/init_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// +build e2e - -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "fmt" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "sigs.k8s.io/cluster-api/test/framework" - "sigs.k8s.io/cluster-api/test/framework/management/kind" - "sigs.k8s.io/controller-runtime/pkg/client" - - clusterctlclient "sigs.k8s.io/cluster-api/cmd/clusterctl/pkg/client" -) - -var _ = Describe("clusterctl init", func() { - var ( - cfgFile string // default is $HOME/.clusterctl.yaml - mgmtCluster *kind.Cluster - mgmtClient client.Client - ) - - BeforeEach(func() { - var err error - // Create the mgmt cluster and client - mgmtCluster, err = CreateKindCluster(kindConfigFile) - Expect(err).ToNot(HaveOccurred()) - mgmtClient, err = mgmtCluster.GetClient() - Expect(err).NotTo(HaveOccurred()) - - c, err := clusterctlclient.New(cfgFile) - Expect(err).ToNot(HaveOccurred()) - initOpt := clusterctlclient.InitOptions{ - Kubeconfig: mgmtCluster.KubeconfigPath, - CoreProvider: "cluster-api:v0.3.0", - BootstrapProviders: []string{"kubeadm-bootstrap:v0.3.0"}, - ControlPlaneProviders: []string{"kubeadm-control-plane:v0.3.0"}, - InfrastructureProviders: []string{"docker:v0.3.0"}, - } - componentList, err := c.Init(initOpt) - Expect(err).ToNot(HaveOccurred()) - // TODO: remove below print statements - for _, components := range componentList { - fmt.Fprintf(GinkgoWriter, " - %s %s installed (%s)\n", components.Name(), components.Type(), components.Version()) - } - }, setupTimeout) - - AfterEach(func() { - fmt.Fprintf(GinkgoWriter, "Tearing down kind cluster\n") - mgmtCluster.Teardown(ctx) - }) - - Context("mgmt cluster", func() { - It("should install core, bootstrap and capd components and CRDs", func() { - framework.WaitForAPIServiceAvailable(ctx, mgmtCluster, "v1beta1.webhook.cert-manager.io") - CheckAndWaitCAPITestDeploymentsExist(mgmtClient) - }) - }) -}) diff --git a/cmd/clusterctl/test/e2e/move_test.go b/cmd/clusterctl/test/e2e/move_test.go index 8f652f4b8e12..85881ddf5290 100644 --- a/cmd/clusterctl/test/e2e/move_test.go +++ b/cmd/clusterctl/test/e2e/move_test.go @@ -26,8 +26,6 @@ import ( . "github.com/onsi/gomega" apierrors "k8s.io/apimachinery/pkg/api/errors" - "sigs.k8s.io/cluster-api/test/framework" - "sigs.k8s.io/cluster-api/test/framework/management/kind" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/kind/pkg/cluster" @@ -39,58 +37,58 @@ import ( var _ = Describe("clusterctl move", func() { var ( - cfgFile string - fromMgmtCluster *kind.Cluster - fromMgmtClient client.Client - toMgmtCluster *kind.Cluster - toMgmtClient client.Client + fromMgmtInfo testMgmtClusterInfo + toMgmtInfo testMgmtClusterInfo + workloadInfo testWorkloadClusterInfo ) BeforeEach(func() { var err error + // "from" mgmt cluster info object + fromMgmtInfo = testMgmtClusterInfo{ + clusterctlConfigFile: clusterctlConfigFile, + coreProvider: "cluster-api:v0.3.0", + bootstrapProviders: []string{"kubeadm-bootstrap:v0.3.0"}, + controlPlaneProviders: []string{"kubeadm-control-plane:v0.3.0"}, + infrastructureProviders: []string{"docker:v0.3.0"}, + } // Create the "from" mgmt cluster and client - fromMgmtCluster, err = CreateKindCluster(kindConfigFile) + fromMgmtInfo.mgmtCluster, err = CreateKindCluster(kindConfigFile) Expect(err).ToNot(HaveOccurred()) - fromMgmtClient, err = fromMgmtCluster.GetClient() + fromMgmtInfo.mgmtClient, err = fromMgmtInfo.mgmtCluster.GetClient() Expect(err).NotTo(HaveOccurred()) - c, err := clusterctlclient.New(cfgFile) - Expect(err).ToNot(HaveOccurred()) - initOptFromMgmtCluster := clusterctlclient.InitOptions{ - Kubeconfig: fromMgmtCluster.KubeconfigPath, - CoreProvider: "cluster-api:v0.3.0", - BootstrapProviders: []string{"kubeadm-bootstrap:v0.3.0"}, - ControlPlaneProviders: []string{"kubeadm-control-plane:v0.3.0"}, - InfrastructureProviders: []string{"docker:v0.3.0"}, + initTestMgmtCluster(ctx, fromMgmtInfo) + + // Create workload cluster on "from" mgmt cluster + workloadInfo = testWorkloadClusterInfo{ + workloadClusterName: "e2e-workload-cluster", + kubernetesVersion: "1.14.2", + controlPlaneMachineCount: 1, + workerMachineCount: 0, + } + createTestWorkloadCluster(ctx, fromMgmtInfo, workloadInfo) + + // "to" mgmt cluster info object + toMgmtInfo = testMgmtClusterInfo{ + clusterctlConfigFile: clusterctlConfigFile, + coreProvider: "cluster-api:v0.3.0", + bootstrapProviders: []string{"kubeadm-bootstrap:v0.3.0"}, + controlPlaneProviders: []string{"kubeadm-control-plane:v0.3.0"}, + infrastructureProviders: []string{"docker:v0.3.0"}, } - _, err = c.Init(initOptFromMgmtCluster) - Expect(err).ToNot(HaveOccurred()) - framework.WaitForAPIServiceAvailable(ctx, fromMgmtCluster, "v1beta1.webhook.cert-manager.io") - CheckAndWaitCAPITestDeploymentsExist(fromMgmtClient) - // Create a capi Cluster and capd DockerCluster objects - err = fromMgmtCluster.Apply(ctx, []byte(testClusterYAML)) - Expect(err).ToNot(HaveOccurred()) - // Create the "to" mgmt cluster and client - toMgmtCluster, err = CreateKindCluster(kindConfigFile) + toMgmtInfo.mgmtCluster, err = CreateKindCluster(kindConfigFile) Expect(err).ToNot(HaveOccurred()) - toMgmtClient, err = toMgmtCluster.GetClient() + toMgmtInfo.mgmtClient, err = toMgmtInfo.mgmtCluster.GetClient() Expect(err).NotTo(HaveOccurred()) - initOptToMgmtCluster := clusterctlclient.InitOptions{ - Kubeconfig: toMgmtCluster.KubeconfigPath, - CoreProvider: "cluster-api:v0.3.0", - BootstrapProviders: []string{"kubeadm-bootstrap:v0.3.0"}, - ControlPlaneProviders: []string{"kubeadm-control-plane:v0.3.0"}, - InfrastructureProviders: []string{"docker:v0.3.0"}, - } - _, err = c.Init(initOptToMgmtCluster) - Expect(err).ToNot(HaveOccurred()) - framework.WaitForAPIServiceAvailable(ctx, toMgmtCluster, "v1beta1.webhook.cert-manager.io") - CheckAndWaitCAPITestDeploymentsExist(toMgmtClient) + initTestMgmtCluster(ctx, toMgmtInfo) + // Do the move + c, err := clusterctlclient.New(fromMgmtInfo.clusterctlConfigFile) + Expect(err).ToNot(HaveOccurred()) err = c.Move(clusterctlclient.MoveOptions{ - FromKubeconfig: fromMgmtCluster.KubeconfigPath, - ToKubeconfig: toMgmtCluster.KubeconfigPath, - // Namespace: "TODO", + FromKubeconfig: fromMgmtInfo.mgmtCluster.KubeconfigPath, + ToKubeconfig: toMgmtInfo.mgmtCluster.KubeconfigPath, }) Expect(err).ToNot(HaveOccurred()) @@ -98,12 +96,12 @@ var _ = Describe("clusterctl move", func() { AfterEach(func() { fmt.Fprintf(GinkgoWriter, "Tearing down kind clusters\n") - fromMgmtCluster.Teardown(ctx) - toMgmtCluster.Teardown(ctx) + fromMgmtInfo.mgmtCluster.Teardown(ctx) + toMgmtInfo.mgmtCluster.Teardown(ctx) fmt.Fprintf(GinkgoWriter, "Tearing down kind workload cluster\n") - if err := cluster.NewProvider().Delete("foo-cluster", ""); err != nil { + if err := cluster.NewProvider().Delete(workloadInfo.workloadClusterName, ""); err != nil { // Treat this as a non critical error - fmt.Fprintf(GinkgoWriter, "Deleting the kind cluster \"foo-cluster\" failed. You may need to remove this by hand.\n") + fmt.Fprintf(GinkgoWriter, "Deleting the kind cluster %q failed. You may need to remove this by hand.\n", workloadInfo.workloadClusterName) } }) @@ -111,19 +109,19 @@ var _ = Describe("clusterctl move", func() { It("should move all Cluster API objects to the new mgmt cluster, unpause the Cluster and delete all objects from previous mgmt cluster", func() { Eventually( func() error { - if err := toMgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "foo-cluster"}, &clusterv1.Cluster{}); err != nil { + if err := toMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, &clusterv1.Cluster{}); err != nil { return err } - if err := toMgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &clusterv1.Machine{}); err != nil { + if err := toMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &clusterv1.Machine{}); err != nil { return err } - if err := toMgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0-config"}, &bootstrapv1.KubeadmConfig{}); err != nil { + if err := toMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0-config"}, &bootstrapv1.KubeadmConfig{}); err != nil { return err } - if err := toMgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "foo-cluster"}, &infrav1.DockerCluster{}); err != nil { + if err := toMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, &infrav1.DockerCluster{}); err != nil { return err } - if err := toMgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &infrav1.DockerMachine{}); err != nil { + if err := toMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &infrav1.DockerMachine{}); err != nil { return err } return nil @@ -133,7 +131,7 @@ var _ = Describe("clusterctl move", func() { Eventually( func() (bool, error) { testCluster := &clusterv1.Cluster{} - if err := toMgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "foo-cluster"}, testCluster); err != nil { + if err := toMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, testCluster); err != nil { return false, err } if testCluster.Spec.Paused { @@ -145,23 +143,23 @@ var _ = Describe("clusterctl move", func() { // Should delete all Cluster API objects from the previous management cluster. Eventually( func() error { - err := fromMgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "foo-cluster"}, &clusterv1.Cluster{}) + err := fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, &clusterv1.Cluster{}) if err == nil || !apierrors.IsNotFound(err) { return fmt.Errorf("%v", err) } - err = fromMgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &clusterv1.Machine{}) + err = fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &clusterv1.Machine{}) if err == nil || !apierrors.IsNotFound(err) { return fmt.Errorf("%v", err) } - err = fromMgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0-config"}, &bootstrapv1.KubeadmConfig{}) + err = fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0-config"}, &bootstrapv1.KubeadmConfig{}) if err == nil || !apierrors.IsNotFound(err) { return fmt.Errorf("%v", err) } - err = fromMgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "foo-cluster"}, &infrav1.DockerCluster{}) + err = fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, &infrav1.DockerCluster{}) if err == nil || !apierrors.IsNotFound(err) { return fmt.Errorf("%v", err) } - err = fromMgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &infrav1.DockerMachine{}) + err = fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &infrav1.DockerMachine{}) if err == nil || !apierrors.IsNotFound(err) { return fmt.Errorf("%v", err) } diff --git a/cmd/clusterctl/test/run-e2e.sh b/cmd/clusterctl/test/run-e2e.sh index 1518a1a82ee1..f327472039e0 100755 --- a/cmd/clusterctl/test/run-e2e.sh +++ b/cmd/clusterctl/test/run-e2e.sh @@ -21,7 +21,7 @@ set -o pipefail REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/../../.. cd "${REPO_ROOT}" || exit 1 REPO_ROOT_ABS=${PWD} -ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}" +ARTIFACTS="${ARTIFACTS:-${REPO_ROOT_ABS}/_artifacts}" mkdir -p "$ARTIFACTS/logs/" cat < "clusterctl-settings.json" @@ -30,7 +30,20 @@ cat < "clusterctl-settings.json" } EOF -./cmd/clusterctl/hack/local-overrides.py +# Create a local filesystem repository for the docker provider and update clusterctl.yaml +mkdir -p "$ARTIFACTS/testdata/" +cp -r "${REPO_ROOT_ABS}/cmd/clusterctl/test/testdata" "$ARTIFACTS/" +export CLUSTERCTL_CONFIG="${ARTIFACTS}/testdata/clusterctl.yaml" +cat < "${CLUSTERCTL_CONFIG}" +providers: +- name: docker + url: ${ARTIFACTS}/testdata/docker/v0.3.0/infrastructure-components.yaml + type: InfrastructureProvider + +DOCKER_SERVICE_DOMAIN: "cluster.local" +DOCKER_SERVICE_CIDRS: "10.128.0.0/12" +DOCKER_POD_CIDRS: "192.168.0.0/16" +EOF export KIND_CONFIG_FILE="${ARTIFACTS}/kind-cluster-with-extramounts.yaml" cat < "${KIND_CONFIG_FILE}" diff --git a/cmd/clusterctl/test/testdata/docker/v0.3.0/cluster-template.yaml b/cmd/clusterctl/test/testdata/docker/v0.3.0/cluster-template.yaml new file mode 100644 index 000000000000..24d26af7e5b3 --- /dev/null +++ b/cmd/clusterctl/test/testdata/docker/v0.3.0/cluster-template.yaml @@ -0,0 +1,107 @@ +# Creates a cluster with one control-plane node and one worker node +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerCluster +metadata: + name: ${ CLUSTER_NAME } + namespace: default +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: ${ CLUSTER_NAME } + namespace: default +spec: + clusterNetwork: + services: + cidrBlocks: [${ DOCKER_SERVICE_CIDRS }] + pods: + cidrBlocks: [${ DOCKER_POD_CIDRS }] + serviceDomain: ${ DOCKER_SERVICE_DOMAIN } + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerCluster + name: ${ CLUSTER_NAME } + namespace: default +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachine +metadata: + name: controlplane-0 + namespace: default +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Machine +metadata: + labels: + cluster.x-k8s.io/cluster-name: ${ CLUSTER_NAME } + cluster.x-k8s.io/control-plane: "true" + name: controlplane-0 + namespace: default +spec: + version: ${ KUBERNETES_VERSION } + clusterName: ${ CLUSTER_NAME } + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + name: controlplane-0-config + namespace: default + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachine + name: controlplane-0 + namespace: default +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfig +metadata: + name: controlplane-0-config + namespace: default +spec: + clusterConfiguration: + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachine +metadata: + name: worker-0 + namespace: default +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Machine +metadata: + labels: + cluster.x-k8s.io/cluster-name: ${ CLUSTER_NAME } + name: worker-0 + namespace: default +spec: + version: "v1.14.2" + clusterName: ${ CLUSTER_NAME } + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + name: worker-0-config + namespace: default + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachine + name: worker-0 + namespace: default +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfig +metadata: + name: worker-0-config + namespace: default +spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% diff --git a/cmd/clusterctl/test/testdata/docker/v0.3.0/infrastructure-components.yaml b/cmd/clusterctl/test/testdata/docker/v0.3.0/infrastructure-components.yaml new file mode 100644 index 000000000000..3faf895c22d1 --- /dev/null +++ b/cmd/clusterctl/test/testdata/docker/v0.3.0/infrastructure-components.yaml @@ -0,0 +1,514 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-docker + control-plane: controller-manager + name: capd-system +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.4 + creationTimestamp: null + labels: + cluster.x-k8s.io/provider: infrastructure-docker + name: dockerclusters.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: DockerCluster + listKind: DockerClusterList + plural: dockerclusters + singular: dockercluster + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: DockerCluster is the Schema for the dockerclusters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DockerClusterSpec defines the desired state of DockerCluster. + properties: + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint used to communicate + with the control plane. + properties: + host: + description: Host is the hostname on which the API server is serving. + type: string + port: + description: Port is the port on which the API server is serving. + type: integer + required: + - host + - port + type: object + failureDomains: + additionalProperties: + description: FailureDomainSpec is the Schema for Cluster API failure + domains. It allows controllers to understand how many failure domains + a cluster can optionally span across. + properties: + attributes: + additionalProperties: + type: string + description: Attributes is a free form map of attributes an infrastructure + provider might use or require. + type: object + controlPlane: + description: ControlPlane determines if this failure domain is + suitable for use by control plane machines. + type: boolean + type: object + description: FailureDomains are not usulaly defined on the spec. The + docker provider is special since failure domains don't mean anything + in a local docker environment. Instead, the docker cluster controller + will simply copy these into the Status and allow the Cluster API controllers + to do what they will with the defined failure domains. + type: object + type: object + status: + description: DockerClusterStatus defines the observed state of DockerCluster. + properties: + failureDomains: + additionalProperties: + description: FailureDomainSpec is the Schema for Cluster API failure + domains. It allows controllers to understand how many failure domains + a cluster can optionally span across. + properties: + attributes: + additionalProperties: + type: string + description: Attributes is a free form map of attributes an infrastructure + provider might use or require. + type: object + controlPlane: + description: ControlPlane determines if this failure domain is + suitable for use by control plane machines. + type: boolean + type: object + description: FailureDomains don't mean much in CAPD since it's all local, + but we can see how the rest of cluster API will use this if we populate + it. + type: object + ready: + description: Ready denotes that the docker cluster (infrastructure) + is ready. + type: boolean + required: + - ready + type: object + type: object + version: v1alpha3 + versions: + - name: v1alpha3 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.4 + creationTimestamp: null + labels: + cluster.x-k8s.io/provider: infrastructure-docker + name: dockermachines.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: DockerMachine + listKind: DockerMachineList + plural: dockermachines + singular: dockermachine + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: DockerMachine is the Schema for the dockermachines API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DockerMachineSpec defines the desired state of DockerMachine + properties: + customImage: + description: CustomImage allows customizing the container image that + is used for running the machine + type: string + providerID: + description: ProviderID will be the container name in ProviderID format + (docker:////) + type: string + type: object + status: + description: DockerMachineStatus defines the observed state of DockerMachine + properties: + ready: + description: Ready denotes that the machine (docker container) is ready + type: boolean + required: + - ready + type: object + type: object + version: v1alpha3 + versions: + - name: v1alpha3 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.4 + creationTimestamp: null + labels: + cluster.x-k8s.io/provider: infrastructure-docker + name: dockermachinetemplates.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: DockerMachineTemplate + listKind: DockerMachineTemplateList + plural: dockermachinetemplates + singular: dockermachinetemplate + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: DockerMachineTemplate is the Schema for the dockermachinetemplates + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DockerMachineTemplateSpec defines the desired state of DockerMachineTemplate + properties: + template: + description: DockerMachineTemplateResource describes the data needed + to create a DockerMachine from a template + properties: + spec: + description: Spec is the specification of the desired behavior of + the machine. + properties: + customImage: + description: CustomImage allows customizing the container image + that is used for running the machine + type: string + providerID: + description: ProviderID will be the container name in ProviderID + format (docker:////) + type: string + type: object + required: + - spec + type: object + required: + - template + type: object + type: object + version: v1alpha3 + versions: + - name: v1alpha3 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-docker + name: capd-leader-election-role + namespace: capd-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + cluster.x-k8s.io/provider: infrastructure-docker + name: capd-manager-role +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - cluster.x-k8s.io + resources: + - clusters + - machines + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - dockerclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - dockerclusters/status + verbs: + - get + - patch + - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - dockermachines + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - dockermachines/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-docker + name: capd-proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-docker + name: capd-leader-election-rolebinding + namespace: capd-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: capd-leader-election-role +subjects: +- kind: ServiceAccount + name: default + namespace: capd-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-docker + name: capd-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: capd-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: capd-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-docker + name: capd-proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: capd-proxy-role +subjects: +- kind: ServiceAccount + name: default + namespace: capd-system +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "8443" + prometheus.io/scheme: https + prometheus.io/scrape: "true" + labels: + cluster.x-k8s.io/provider: infrastructure-docker + control-plane: controller-manager + name: capd-controller-manager-metrics-service + namespace: capd-system +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + cluster.x-k8s.io/provider: infrastructure-docker + control-plane: controller-manager +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-docker + control-plane: controller-manager + name: capd-controller-manager + namespace: capd-system +spec: + replicas: 1 + selector: + matchLabels: + cluster.x-k8s.io/provider: infrastructure-docker + control-plane: controller-manager + template: + metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-docker + control-plane: controller-manager + spec: + containers: + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=10 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0 + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + - args: + - --metrics-addr=0 + - -v=4 + image: gcr.io/arvinders-1st-project/docker-provider-manager-amd64:dev + imagePullPolicy: IfNotPresent + name: manager + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/run/docker.sock + name: dockersock + terminationGracePeriodSeconds: 10 + volumes: + - hostPath: + path: /var/run/docker.sock + name: dockersock