From 8b1ba697ca45f10f6b6f05a46a1a3d866c1b1d7c Mon Sep 17 00:00:00 2001 From: David Justice Date: Wed, 19 Aug 2020 12:30:33 -0400 Subject: [PATCH] =?UTF-8?q?=E2=9C=A8=20add=20docker=20machine=20pool=20and?= =?UTF-8?q?=20e2e=20tests=20for=20capi=20machine=20pool?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- exp/util/util.go | 37 ++ test/e2e/Makefile | 1 + test/e2e/config/docker.yaml | 4 + .../bases/cluster-with-kcp.yaml | 2 +- .../data/infrastructure-docker/bases/mp.yaml | 39 ++ .../kustomization.yaml | 4 + test/e2e/kcp_upgrade.go | 28 +- test/e2e/machine_pool.go | 132 +++++++ test/e2e/machine_pool_test.go | 37 ++ test/e2e/md_upgrades.go | 26 +- test/e2e/mhc_remediations.go | 18 +- test/e2e/quick_start.go | 16 +- test/e2e/self_hosted.go | 23 +- .../clusterctl/clusterctl_helpers.go | 27 +- test/framework/clusterctl/e2e_config.go | 14 +- test/framework/machinedeployment_helpers.go | 2 +- test/framework/machinepool_helpers.go | 268 ++++++++++++++ test/infrastructure/docker/Makefile | 6 +- ...e.cluster.x-k8s.io_dockermachinepools.yaml | 203 +++++++++++ .../docker/config/crd/kustomization.yaml | 1 + .../cainjection_in_dockermachinepools.yaml | 8 + .../webhook_in_dockermachinepools.yaml | 19 + .../manager/manager_auth_proxy_patch.yaml | 1 + .../docker/config/rbac/role.yaml | 20 ++ .../controllers/dockermachine_controller.go | 2 +- .../docker/docker/kind_manager.go | 22 +- test/infrastructure/docker/docker/machine.go | 78 +++- .../docker/docker/types/node.go | 4 +- test/infrastructure/docker/docker/util.go | 20 +- .../docker/examples/machine-pool.yaml | 102 ++++++ .../docker/examples/simple-cluster.yaml | 5 + test/infrastructure/docker/exp/PROJECT | 7 + test/infrastructure/docker/exp/README.md | 21 ++ .../api/v1alpha3/dockermachinepool_types.go | 146 ++++++++ .../exp/api/v1alpha3/groupversion_info.go | 36 ++ .../exp/api/v1alpha3/zz_generated.deepcopy.go | 195 ++++++++++ .../dockermachinepool_controller.go | 232 ++++++++++++ .../docker/exp/controllers/exp.go | 22 ++ .../docker/exp/docker/nodepool.go | 332 ++++++++++++++++++ test/infrastructure/docker/go.mod | 2 + test/infrastructure/docker/go.sum | 1 + .../docker/hack/boilerplate.go.txt | 2 +- test/infrastructure/docker/main.go | 44 ++- 43 files changed, 2118 insertions(+), 91 deletions(-) create mode 100644 test/e2e/data/infrastructure-docker/bases/mp.yaml create mode 100644 test/e2e/data/infrastructure-docker/cluster-template-machine-pool/kustomization.yaml create mode 100644 test/e2e/machine_pool.go create mode 100644 test/e2e/machine_pool_test.go create mode 100644 test/framework/machinepool_helpers.go create mode 100644 test/infrastructure/docker/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_dockermachinepools.yaml create mode 100644 test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepools.yaml create mode 100644 test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepools.yaml create mode 100644 test/infrastructure/docker/examples/machine-pool.yaml create mode 100644 test/infrastructure/docker/exp/PROJECT create mode 100644 test/infrastructure/docker/exp/README.md create mode 100644 test/infrastructure/docker/exp/api/v1alpha3/dockermachinepool_types.go create mode 100644 test/infrastructure/docker/exp/api/v1alpha3/groupversion_info.go create mode 100644 test/infrastructure/docker/exp/api/v1alpha3/zz_generated.deepcopy.go create mode 100644 test/infrastructure/docker/exp/controllers/dockermachinepool_controller.go create mode 100644 test/infrastructure/docker/exp/controllers/exp.go create mode 100644 test/infrastructure/docker/exp/docker/nodepool.go diff --git a/exp/util/util.go b/exp/util/util.go index ea91791048f3..658a385d7488 100644 --- a/exp/util/util.go +++ b/exp/util/util.go @@ -19,11 +19,15 @@ package util import ( "context" + "github.com/go-logr/logr" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // GetOwnerMachinePool returns the MachinePool objects owning the current resource. @@ -52,3 +56,36 @@ func GetMachinePoolByName(ctx context.Context, c client.Client, namespace, name } return m, nil } + +// MachinePoolToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for +// MachinePool events and returns reconciliation requests for an infrastructure provider object. +func MachinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Logger) handler.ToRequestsFunc { + log = log.WithValues("machine-pool-to-infra-map-func", gvk.String()) + return func(o handler.MapObject) []reconcile.Request { + log := log.WithValues("namespace", o.Meta.GetNamespace(), "name", o.Meta.GetName()) + m, ok := o.Object.(*clusterv1exp.MachinePool) + if !ok { + log.V(4).Info("not a machine pool") + return nil + } + + gk := gvk.GroupKind() + ref := m.Spec.Template.Spec.InfrastructureRef + // Return early if the GroupKind doesn't match what we expect. + infraGK := ref.GroupVersionKind().GroupKind() + if gk != infraGK { + log.V(4).Info("infra kind doesn't match filter group kind", infraGK.String()) + return nil + } + + log.V(4).Info("projecting object", "namespace", m.Namespace, "name", ref.Name) + return []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: m.Namespace, + Name: ref.Name, + }, + }, + } + } +} diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 9592937620f3..0c5b4643b2a6 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -59,6 +59,7 @@ cluster-templates: $(KUSTOMIZE) ## Generate cluster templates $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-kcp-adoption/step1 --load_restrictor none > $(DOCKER_TEMPLATES)/cluster-template-kcp-adoption.yaml echo "---" >> $(DOCKER_TEMPLATES)/cluster-template-kcp-adoption.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-kcp-adoption/step2 --load_restrictor none >> $(DOCKER_TEMPLATES)/cluster-template-kcp-adoption.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-machine-pool --load_restrictor none > $(DOCKER_TEMPLATES)/cluster-template-machine-pool.yaml ## -------------------------------------- ## Testing diff --git a/test/e2e/config/docker.yaml b/test/e2e/config/docker.yaml index 1529764a9991..0f9c390ffa55 100644 --- a/test/e2e/config/docker.yaml +++ b/test/e2e/config/docker.yaml @@ -70,6 +70,7 @@ providers: - sourcePath: "../data/infrastructure-docker/cluster-template.yaml" - sourcePath: "../data/infrastructure-docker/cluster-template-mhc.yaml" - sourcePath: "../data/infrastructure-docker/cluster-template-kcp-adoption.yaml" + - sourcePath: "../data/infrastructure-docker/cluster-template-machine-pool.yaml" variables: KUBERNETES_VERSION: "v1.18.2" @@ -83,12 +84,15 @@ variables: DOCKER_POD_CIDRS: "192.168.0.0/16" CNI: "./data/cni/kindnet/kindnet.yaml" EXP_CLUSTER_RESOURCE_SET: "true" + EXP_MACHINE_POOL: "true" intervals: default/wait-controllers: ["3m", "10s"] default/wait-cluster: ["5m", "10s"] default/wait-control-plane: ["10m", "10s"] default/wait-worker-nodes: ["5m", "10s"] + default/wait-machine-pool-nodes: ["5m", "10s"] default/wait-delete-cluster: ["3m", "10s"] default/wait-machine-upgrade: ["20m", "10s"] + default/wait-machine-pool-upgrade: ["5m", "10s"] default/wait-machine-remediation: ["5m", "10s"] diff --git a/test/e2e/data/infrastructure-docker/bases/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/bases/cluster-with-kcp.yaml index 65c0920e8278..5fb45f7a77ea 100644 --- a/test/e2e/data/infrastructure-docker/bases/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-docker/bases/cluster-with-kcp.yaml @@ -61,7 +61,7 @@ spec: controllerManager: extraArgs: {enable-hostpath-provisioner: 'true'} apiServer: - certSANs: [localhost, 127.0.0.1] + certSANs: [localhost, 127.0.0.1, 0.0.0.0] initConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock diff --git a/test/e2e/data/infrastructure-docker/bases/mp.yaml b/test/e2e/data/infrastructure-docker/bases/mp.yaml new file mode 100644 index 000000000000..44249ad2b9b7 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/bases/mp.yaml @@ -0,0 +1,39 @@ +--- +# MachinePool which references the DockerMachinePool and KubeadmConfigTemplate below +apiVersion: exp.cluster.x-k8s.io/v1alpha3 +kind: MachinePool +metadata: + name: "${CLUSTER_NAME}-mp-0" +spec: + clusterName: '${CLUSTER_NAME}' + replicas: ${WORKER_MACHINE_COUNT} + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + name: "${CLUSTER_NAME}-mp-0-config" + clusterName: '${CLUSTER_NAME}' + infrastructureRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachinePool + name: "${CLUSTER_NAME}-dmp-0" + version: "${KUBERNETES_VERSION}" +--- +# DockerMachinePool using default values referenced by the MachinePool +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachinePool +metadata: + name: "${CLUSTER_NAME}-dmp-0" +--- +# KubeadmConfigTemplate referenced by the MachinePool +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfig +metadata: + name: "${CLUSTER_NAME}-mp-0-config" +spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% diff --git a/test/e2e/data/infrastructure-docker/cluster-template-machine-pool/kustomization.yaml b/test/e2e/data/infrastructure-docker/cluster-template-machine-pool/kustomization.yaml new file mode 100644 index 000000000000..2323bae83aea --- /dev/null +++ b/test/e2e/data/infrastructure-docker/cluster-template-machine-pool/kustomization.yaml @@ -0,0 +1,4 @@ +resources: + - ../bases/cluster-with-kcp.yaml + - ../bases/mp.yaml + - ../bases/crs.yaml diff --git a/test/e2e/kcp_upgrade.go b/test/e2e/kcp_upgrade.go index ab8a69bff8a4..136fea7f6288 100644 --- a/test/e2e/kcp_upgrade.go +++ b/test/e2e/kcp_upgrade.go @@ -27,8 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/utils/pointer" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" @@ -46,12 +45,11 @@ type KCPUpgradeSpecInput struct { // KCPUpgradeSpec implements a test that verifies KCP to properly upgrade a control plane with 3 machines. func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) { var ( - specName = "kcp-upgrade" - input KCPUpgradeSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - cluster *clusterv1.Cluster - controlPlane *controlplanev1.KubeadmControlPlane + specName = "kcp-upgrade" + input KCPUpgradeSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult ) BeforeEach(func() { @@ -73,7 +71,7 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) It("Should successfully upgrade Kubernetes, DNS, kube-proxy, and etcd in a single control plane cluster", func() { By("Creating a workload cluster") - cluster, controlPlane, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), @@ -95,8 +93,8 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) By("Upgrading Kubernetes, DNS, kube-proxy, and etcd versions") framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{ ClusterProxy: input.BootstrapClusterProxy, - Cluster: cluster, - ControlPlane: controlPlane, + Cluster: clusterResources.Cluster, + ControlPlane: clusterResources.ControlPlane, EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo), DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo), KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), @@ -112,7 +110,7 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) By("Creating a workload cluster") - cluster, controlPlane, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), @@ -134,8 +132,8 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) By("Upgrading Kubernetes, DNS, kube-proxy, and etcd versions") framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{ ClusterProxy: input.BootstrapClusterProxy, - Cluster: cluster, - ControlPlane: controlPlane, + Cluster: clusterResources.Cluster, + ControlPlane: clusterResources.ControlPlane, EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo), DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo), KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), @@ -149,6 +147,6 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/machine_pool.go b/test/e2e/machine_pool.go new file mode 100644 index 000000000000..3b16eccf15f5 --- /dev/null +++ b/test/e2e/machine_pool.go @@ -0,0 +1,132 @@ +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +const ( + MinNumberOfReplicas = "MIN_NUMBER_OF_REPLICAS" + MaxNumberOfReplicas = "MAX_NUMBER_OF_REPLICAS" +) + +// MachinePoolInput is the input for MachinePoolSpec +type MachinePoolInput struct { + E2EConfig *clusterctl.E2EConfig + ClusterctlConfigPath string + BootstrapClusterProxy framework.ClusterProxy + ArtifactFolder string + SkipCleanup bool +} + +// MachinePoolSpec implements a test that verifies MachinePool scale up, down and version update +func MachinePoolSpec(ctx context.Context, inputGetter func() MachinePoolInput) { + var ( + specName = "machine-pool" + input MachinePoolInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeTo)) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeFrom)) + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) + }) + + It("Should successfully create a cluster with machine pool machines", func() { + By("Creating a workload cluster") + workerMachineCount := int32(2) + clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: "machine-pool", + Namespace: namespace.Name, + ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom), + ControlPlaneMachineCount: pointer.Int64Ptr(1), + WorkerMachineCount: pointer.Int64Ptr(int64(workerMachineCount)), + }, + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + }) + + By("Scaling the machine pool up") + framework.ScaleMachinePoolAndWait(context.TODO(), framework.ScaleMachinePoolAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + Replicas: workerMachineCount + 1, + MachinePools: clusterResources.MachinePools, + WaitForMachinePoolToScale: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + }) + + By("Scaling the machine pool down") + framework.ScaleMachinePoolAndWait(context.TODO(), framework.ScaleMachinePoolAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + Replicas: workerMachineCount - 1, + MachinePools: clusterResources.MachinePools, + WaitForMachinePoolToScale: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + }) + + By("Upgrading the instances") + framework.UpgradeMachinePoolAndWait(context.TODO(), framework.UpgradeMachinePoolAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), + WaitForMachinePoolToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-upgrade"), + MachinePools: clusterResources.MachinePools, + }) + + By("PASSED!") + }) + + AfterEach(func() { + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + }) +} diff --git a/test/e2e/machine_pool_test.go b/test/e2e/machine_pool_test.go new file mode 100644 index 000000000000..326797bb3ee2 --- /dev/null +++ b/test/e2e/machine_pool_test.go @@ -0,0 +1,37 @@ +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + + . "github.com/onsi/ginkgo" +) + +var _ = Describe("When testing MachinePools", func() { + MachinePoolSpec(context.TODO(), func() MachinePoolInput { + return MachinePoolInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + } + }) +}) diff --git a/test/e2e/md_upgrades.go b/test/e2e/md_upgrades.go index d33bcf7398f4..01e317e5836c 100644 --- a/test/e2e/md_upgrades.go +++ b/test/e2e/md_upgrades.go @@ -27,7 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/utils/pointer" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" @@ -45,11 +45,11 @@ type MachineDeploymentUpgradesSpecInput struct { // MachineDeploymentUpgradesSpec implements a test that verifies that MachineDeployment upgrades are successful. func MachineDeploymentUpgradesSpec(ctx context.Context, inputGetter func() MachineDeploymentUpgradesSpecInput) { var ( - specName = "md-upgrades" - input MachineDeploymentUpgradesSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - cluster *clusterv1.Cluster + specName = "md-upgrades" + input MachineDeploymentUpgradesSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult ) BeforeEach(func() { @@ -71,9 +71,7 @@ func MachineDeploymentUpgradesSpec(ctx context.Context, inputGetter func() Machi It("Should successfully upgrade Machines upon changes in relevant MachineDeployment fields", func() { By("Creating a workload cluster") - - var mds []*clusterv1.MachineDeployment - cluster, _, mds = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), @@ -95,24 +93,24 @@ func MachineDeploymentUpgradesSpec(ctx context.Context, inputGetter func() Machi By("Upgrading MachineDeployment's Kubernetes version to a valid version") framework.UpgradeMachineDeploymentsAndWait(context.TODO(), framework.UpgradeMachineDeploymentsAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, - Cluster: cluster, + Cluster: clusterResources.Cluster, UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersion), WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), - MachineDeployments: mds, + MachineDeployments: clusterResources.MachineDeployments, }) By("Upgrading MachineDeployment Infrastructure ref and wait for rolling upgrade") framework.UpgradeMachineDeploymentInfrastructureRefAndWait(context.TODO(), framework.UpgradeMachineDeploymentInfrastructureRefAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, - Cluster: cluster, + Cluster: clusterResources.Cluster, WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), - MachineDeployments: mds, + MachineDeployments: clusterResources.MachineDeployments, }) By("PASSED!") }) AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/mhc_remediations.go b/test/e2e/mhc_remediations.go index 79caa529be59..f7792a901430 100644 --- a/test/e2e/mhc_remediations.go +++ b/test/e2e/mhc_remediations.go @@ -27,7 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/utils/pointer" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" @@ -45,11 +45,11 @@ type MachineRemediationSpecInput struct { // MachineRemediationSpec implements a test that verifies that Machines are remediated by MHC during unhealthy conditions. func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemediationSpecInput) { var ( - specName = "mhc-remediation" - input MachineRemediationSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - cluster *clusterv1.Cluster + specName = "mhc-remediation" + input MachineRemediationSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult ) BeforeEach(func() { @@ -69,7 +69,7 @@ func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemed By("Creating a workload cluster") - cluster, _, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), @@ -91,7 +91,7 @@ func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemed By("Waiting for MachineHealthCheck remediation") framework.DiscoverMachineHealthChecksAndWaitForRemediation(ctx, framework.DiscoverMachineHealthCheckAndWaitForRemediationInput{ ClusterProxy: input.BootstrapClusterProxy, - Cluster: cluster, + Cluster: clusterResources.Cluster, WaitForMachineRemediation: input.E2EConfig.GetIntervals(specName, "wait-machine-remediation"), }) @@ -100,6 +100,6 @@ func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemed AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/quick_start.go b/test/e2e/quick_start.go index 5faea5972c6c..00bea159179d 100644 --- a/test/e2e/quick_start.go +++ b/test/e2e/quick_start.go @@ -27,7 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/utils/pointer" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" @@ -47,11 +47,11 @@ type QuickStartSpecInput struct { // This test is meant to provide a first, fast signal to detect regression; it is recommended to use it as a PR blocker test. func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) { var ( - specName = "quick-start" - input QuickStartSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - cluster *clusterv1.Cluster + specName = "quick-start" + input QuickStartSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult ) BeforeEach(func() { @@ -72,7 +72,7 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) By("Creating a workload cluster") - cluster, _, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), @@ -96,6 +96,6 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/self_hosted.go b/test/e2e/self_hosted.go index 0909f0c67cdd..6708b7c09c97 100644 --- a/test/e2e/self_hosted.go +++ b/test/e2e/self_hosted.go @@ -48,11 +48,11 @@ type SelfHostedSpecInput struct { // SelfHostedSpec implements a test that verifies Cluster API creating a cluster, pivoting to a self-hosted cluster. func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) { var ( - specName = "self-hosted" - input SelfHostedSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - cluster *clusterv1.Cluster + specName = "self-hosted" + input SelfHostedSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult selfHostedClusterProxy framework.ClusterProxy selfHostedNamespace *corev1.Namespace @@ -77,7 +77,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) By("Creating a workload cluster") - cluster, _, _ = clusterctl.ApplyClusterTemplateAndWait(context.TODO(), clusterctl.ApplyClusterTemplateAndWaitInput{ + clusterResources = clusterctl.ApplyClusterTemplateAndWait(context.TODO(), clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), @@ -101,6 +101,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) // In case of the cluster id a DockerCluster, we should load controller images into the nodes. // Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using // this approach because this allows to have a single source of truth for images, the e2e config + cluster := clusterResources.Cluster if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" { bootstrap.LoadImagesToKindCluster(context.TODO(), bootstrap.LoadImagesToKindClusterInput{ Name: cluster.Name, @@ -172,7 +173,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{ Lister: selfHostedClusterProxy.GetClient(), Namespace: namespace.Name, - LogPath: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name, "resources"), + LogPath: filepath.Join(input.ArtifactFolder, "clusters", clusterResources.Cluster.Name, "resources"), }) } if selfHostedCluster != nil { @@ -191,7 +192,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) By("Moving the cluster back to bootstrap") clusterctl.Move(ctx, clusterctl.MoveInput{ - LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name), + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", clusterResources.Cluster.Name), ClusterctlConfigPath: input.ClusterctlConfigPath, FromKubeconfigPath: selfHostedClusterProxy.GetKubeconfigPath(), ToKubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), @@ -199,10 +200,10 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) }) log.Logf("Waiting for the cluster to be reconciled after moving back to booststrap") - cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{ + clusterResources.Cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{ Getter: input.BootstrapClusterProxy.GetClient(), Namespace: namespace.Name, - Name: cluster.Name, + Name: clusterResources.Cluster.Name, }, input.E2EConfig.GetIntervals(specName, "wait-cluster")...) } if selfHostedCancelWatches != nil { @@ -210,6 +211,6 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) } // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/framework/clusterctl/clusterctl_helpers.go b/test/framework/clusterctl/clusterctl_helpers.go index c33cbc9912f3..ae60ccda002c 100644 --- a/test/framework/clusterctl/clusterctl_helpers.go +++ b/test/framework/clusterctl/clusterctl_helpers.go @@ -27,6 +27,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/internal/log" ) @@ -109,11 +110,19 @@ type ApplyClusterTemplateAndWaitInput struct { WaitForClusterIntervals []interface{} WaitForControlPlaneIntervals []interface{} WaitForMachineDeployments []interface{} + WaitForMachinePools []interface{} +} + +type ApplyClusterTemplateAndWaitResult struct { + Cluster *clusterv1.Cluster + ControlPlane *controlplanev1.KubeadmControlPlane + MachineDeployments []*clusterv1.MachineDeployment + MachinePools []*clusterv1exp.MachinePool } // ApplyClusterTemplateAndWait gets a cluster template using clusterctl, and waits for the cluster to be ready. // Important! this method assumes the cluster uses a KubeadmControlPlane and MachineDeployments. -func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplateAndWaitInput) (*clusterv1.Cluster, *controlplanev1.KubeadmControlPlane, []*clusterv1.MachineDeployment) { +func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplateAndWaitInput) *ApplyClusterTemplateAndWaitResult { Expect(ctx).NotTo(BeNil(), "ctx is required for ApplyClusterTemplateAndWait") Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyClusterTemplateAndWait") @@ -174,11 +183,23 @@ func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplate ControlPlane: controlPlane, }, input.WaitForControlPlaneIntervals...) - log.Logf("Waiting for the worker machines to be provisioned") + log.Logf("Waiting for the machine deployments to be provisioned") machineDeployments := framework.DiscoveryAndWaitForMachineDeployments(ctx, framework.DiscoveryAndWaitForMachineDeploymentsInput{ Lister: input.ClusterProxy.GetClient(), Cluster: cluster, }, input.WaitForMachineDeployments...) - return cluster, controlPlane, machineDeployments + log.Logf("Waiting for the machine pools to be provisioned") + machinePools := framework.DiscoveryAndWaitForMachinePools(ctx, framework.DiscoveryAndWaitForMachinePoolsInput{ + Getter: input.ClusterProxy.GetClient(), + Lister: input.ClusterProxy.GetClient(), + Cluster: cluster, + }, input.WaitForMachineDeployments...) + + return &ApplyClusterTemplateAndWaitResult{ + Cluster: cluster, + ControlPlane: controlPlane, + MachineDeployments: machineDeployments, + MachinePools: machinePools, + } } diff --git a/test/framework/clusterctl/e2e_config.go b/test/framework/clusterctl/e2e_config.go index 6efd49a56c19..f8273ac326c0 100644 --- a/test/framework/clusterctl/e2e_config.go +++ b/test/framework/clusterctl/e2e_config.go @@ -390,7 +390,7 @@ func (c *E2EConfig) GetVariable(varName string) string { return version } -// GetVariable returns an Int64Ptr variable from the e2e config file. +// GetInt64PtrVariable returns an Int64Ptr variable from the e2e config file. func (c *E2EConfig) GetInt64PtrVariable(varName string) *int64 { wCountStr := c.GetVariable(varName) if wCountStr == "" { @@ -401,3 +401,15 @@ func (c *E2EConfig) GetInt64PtrVariable(varName string) *int64 { Expect(err).NotTo(HaveOccurred()) return pointer.Int64Ptr(wCount) } + +// GetInt32PtrVariable returns an Int32Ptr variable from the e2e config file. +func (c *E2EConfig) GetInt32PtrVariable(varName string) *int32 { + wCountStr := c.GetVariable(varName) + if wCountStr == "" { + return nil + } + + wCount, err := strconv.ParseUint(wCountStr, 10, 32) + Expect(err).NotTo(HaveOccurred()) + return pointer.Int32Ptr(int32(wCount)) +} diff --git a/test/framework/machinedeployment_helpers.go b/test/framework/machinedeployment_helpers.go index a2154b09eeab..8b63774061ac 100644 --- a/test/framework/machinedeployment_helpers.go +++ b/test/framework/machinedeployment_helpers.go @@ -55,7 +55,7 @@ func CreateMachineDeployment(ctx context.Context, input CreateMachineDeploymentI Expect(input.Creator.Create(ctx, input.InfraMachineTemplate)).To(Succeed()) } -// GetMachineDeploymentsByClusterInput is the input for GetMachineDeploymentsByCluster. +// GetMachineDeploymentsByClusterInput is the input for GetMachineDeploymentsByCluster type GetMachineDeploymentsByClusterInput struct { Lister Lister ClusterName string diff --git a/test/framework/machinepool_helpers.go b/test/framework/machinepool_helpers.go new file mode 100644 index 000000000000..efb7f754fb35 --- /dev/null +++ b/test/framework/machinepool_helpers.go @@ -0,0 +1,268 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/cluster-api/test/framework/internal/log" + "sigs.k8s.io/cluster-api/util/patch" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetMachinePoolsByClusterInput is the input for GetMachinePoolsByCluster +type GetMachinePoolsByClusterInput struct { + Lister Lister + ClusterName string + Namespace string +} + +// GetMachinePoolsByCluster returns the MachinePools objects for a cluster. +// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so +// it is necessary to ensure this is already happened before calling it. +func GetMachinePoolsByCluster(ctx context.Context, input GetMachinePoolsByClusterInput) []*clusterv1exp.MachinePool { + Expect(ctx).NotTo(BeNil(), "ctx is required for GetMachinePoolsByCluster") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling GetMachinePoolsByCluster") + Expect(input.Namespace).ToNot(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling GetMachinePoolsByCluster") + Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling GetMachinePoolsByCluster") + + mpList := &clusterv1exp.MachinePoolList{} + Expect(input.Lister.List(ctx, mpList, byClusterOptions(input.ClusterName, input.Namespace)...)).To(Succeed(), "Failed to list MachinePools object for Cluster %s/%s", input.Namespace, input.ClusterName) + + mps := make([]*clusterv1exp.MachinePool, len(mpList.Items)) + for i := range mpList.Items { + mps[i] = &mpList.Items[i] + } + return mps +} + +// WaitForMachinePoolNodesToExistInput is the input for WaitForMachinePoolNodesToExist. +type WaitForMachinePoolNodesToExistInput struct { + Getter Getter + MachinePool *clusterv1exp.MachinePool +} + +// WaitForMachinePoolNodesToExist waits until all nodes associated with a machine pool exist. +func WaitForMachinePoolNodesToExist(ctx context.Context, input WaitForMachinePoolNodesToExistInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachinePoolNodesToExist") + Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForMachinePoolNodesToExist") + Expect(input.MachinePool).ToNot(BeNil(), "Invalid argument. input.MachinePool can't be nil when calling WaitForMachinePoolNodesToExist") + + By("waiting for the machine pool workload nodes to exist") + Eventually(func() (int, error) { + nn := client.ObjectKey{ + Namespace: input.MachinePool.Namespace, + Name: input.MachinePool.Name, + } + + if err := input.Getter.Get(ctx, nn, input.MachinePool); err != nil { + return 0, err + } + + return int(input.MachinePool.Status.ReadyReplicas), nil + }, intervals...).Should(Equal(int(*input.MachinePool.Spec.Replicas))) +} + +// DiscoveryAndWaitForMachinePoolsInput is the input type for DiscoveryAndWaitForMachinePools. +type DiscoveryAndWaitForMachinePoolsInput struct { + Getter Getter + Lister Lister + Cluster *clusterv1.Cluster +} + +// DiscoveryAndWaitForMachinePools discovers the MachinePools existing in a cluster and waits for them to be ready (all the machine provisioned). +func DiscoveryAndWaitForMachinePools(ctx context.Context, input DiscoveryAndWaitForMachinePoolsInput, intervals ...interface{}) []*clusterv1exp.MachinePool { + Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForMachinePools") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForMachinePools") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoveryAndWaitForMachinePools") + + machinePools := GetMachinePoolsByCluster(ctx, GetMachinePoolsByClusterInput{ + Lister: input.Lister, + ClusterName: input.Cluster.Name, + Namespace: input.Cluster.Namespace, + }) + for _, machinepool := range machinePools { + WaitForMachinePoolNodesToExist(ctx, WaitForMachinePoolNodesToExistInput{ + Getter: input.Getter, + MachinePool: machinepool, + }, intervals...) + } + return machinePools +} + +type UpgradeMachinePoolAndWaitInput struct { + ClusterProxy ClusterProxy + Cluster *clusterv1.Cluster + UpgradeVersion string + MachinePools []*clusterv1exp.MachinePool + WaitForMachinePoolToBeUpgraded []interface{} +} + +// UpgradeMachinePoolAndWait upgrades a machine pool and waits for its instances to be upgraded. +func UpgradeMachinePoolAndWait(ctx context.Context, input UpgradeMachinePoolAndWaitInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeMachinePoolAndWait") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeMachinePoolAndWait") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeMachinePoolAndWait") + Expect(input.UpgradeVersion).ToNot(BeNil(), "Invalid argument. input.UpgradeVersion can't be nil when calling UpgradeMachinePoolAndWait") + Expect(input.MachinePools).ToNot(BeNil(), "Invalid argument. input.MachinePools can't be empty when calling UpgradeMachinePoolAndWait") + + mgmtClient := input.ClusterProxy.GetClient() + for _, mp := range input.MachinePools { + log.Logf("Patching the new kubernetes version to Machine Pool %s/%s", mp.Namespace, mp.Name) + patchHelper, err := patch.NewHelper(mp, mgmtClient) + Expect(err).ToNot(HaveOccurred()) + + mp.Spec.Template.Spec.Version = &input.UpgradeVersion + Expect(patchHelper.Patch(ctx, mp)).To(Succeed()) + } + + for _, mp := range input.MachinePools { + oldVersion := mp.Spec.Template.Spec.Version + log.Logf("Waiting for Kubernetes versions of machines in MachinePool %s/%s to be upgraded from %s to %s", + mp.Namespace, mp.Name, *oldVersion, input.UpgradeVersion) + WaitForMachinePoolInstancesToBeUpgraded(ctx, WaitForMachinePoolInstancesToBeUpgradedInput{ + Getter: mgmtClient, + Cluster: input.Cluster, + MachineCount: int(*mp.Spec.Replicas), + KubernetesUpgradeVersion: input.UpgradeVersion, + MachinePool: *mp, + }, input.WaitForMachinePoolToBeUpgraded...) + } +} + +type ScaleMachinePoolAndWaitInput struct { + ClusterProxy ClusterProxy + Cluster *clusterv1.Cluster + Replicas int32 + MachinePools []*clusterv1exp.MachinePool + WaitForMachinePoolToScale []interface{} +} + +// ScaleMachinePoolAndWait scales a machine pool and waits for its instances to scale up. +func ScaleMachinePoolAndWait(ctx context.Context, input ScaleMachinePoolAndWaitInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeMachinePoolAndWait") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeMachinePoolAndWait") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeMachinePoolAndWait") + Expect(input.MachinePools).ToNot(BeNil(), "Invalid argument. input.MachinePools can't be empty when calling UpgradeMachinePoolAndWait") + + mgmtClient := input.ClusterProxy.GetClient() + for _, mp := range input.MachinePools { + log.Logf("Patching the new kubernetes version to Machine Pool %s/%s", mp.Namespace, mp.Name) + patchHelper, err := patch.NewHelper(mp, mgmtClient) + Expect(err).ToNot(HaveOccurred()) + + mp.Spec.Replicas = &input.Replicas + Expect(patchHelper.Patch(ctx, mp)).To(Succeed()) + } + + for _, mp := range input.MachinePools { + WaitForMachinePoolNodesToExist(ctx, WaitForMachinePoolNodesToExistInput{ + Getter: mgmtClient, + MachinePool: mp, + }, input.WaitForMachinePoolToScale...) + } +} + +// WaitForMachinePoolInstancesToBeUpgradedInput is the input for WaitForMachinePoolInstancesToBeUpgraded. +type WaitForMachinePoolInstancesToBeUpgradedInput struct { + Getter Getter + Cluster *clusterv1.Cluster + KubernetesUpgradeVersion string + MachineCount int + MachinePool clusterv1exp.MachinePool +} + +// WaitForMachinePoolInstancesToBeUpgraded waits until all instances belonging to a MachinePool are upgraded to the correct kubernetes version. +func WaitForMachinePoolInstancesToBeUpgraded(ctx context.Context, input WaitForMachinePoolInstancesToBeUpgradedInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachinePoolInstancesToBeUpgraded") + Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForMachinePoolInstancesToBeUpgraded") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling WaitForMachinePoolInstancesToBeUpgraded") + Expect(input.KubernetesUpgradeVersion).ToNot(BeNil(), "Invalid argument. input.KubernetesUpgradeVersion can't be nil when calling WaitForMachinePoolInstancesToBeUpgraded") + Expect(input.MachinePool).ToNot(BeNil(), "Invalid argument. input.MachinePool can't be nil when calling WaitForMachinePoolInstancesToBeUpgraded") + Expect(input.MachineCount).To(BeNumerically(">", 0), "Invalid argument. input.MachineCount can't be smaller than 1 when calling WaitForMachinePoolInstancesToBeUpgraded") + + log.Logf("Ensuring all MachinePool Instances have upgraded kubernetes version %s", input.KubernetesUpgradeVersion) + Eventually(func() (int, error) { + versions := GetMachinePoolInstanceVersions(ctx, GetMachinesPoolInstancesInput{ + Getter: input.Getter, + Namespace: input.Cluster.Namespace, + MachinePool: input.MachinePool, + }) + + matches := 0 + for _, version := range versions { + if version == input.KubernetesUpgradeVersion { + matches++ + } + } + + if matches != len(versions) { + return 0, errors.New("old version instances remain") + } + + return matches, nil + }, intervals...).Should(Equal(input.MachineCount)) +} + +// GetMachinesPoolInstancesInput is the input for GetMachinesPoolInstances. +type GetMachinesPoolInstancesInput struct { + Getter Getter + Namespace string + MachinePool clusterv1exp.MachinePool +} + +// GetMachinePoolInstanceVersions returns the +func GetMachinePoolInstanceVersions(ctx context.Context, input GetMachinesPoolInstancesInput) []string { + Expect(ctx).NotTo(BeNil(), "ctx is required for GetMachinePoolInstanceVersions") + Expect(input.Namespace).ToNot(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling GetMachinePoolInstanceVersions") + Expect(input.MachinePool).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling GetMachinePoolInstanceVersions") + + obj := getUnstructuredRef(ctx, input.Getter, &input.MachinePool.Spec.Template.Spec.InfrastructureRef, input.Namespace) + instances, found, err := unstructured.NestedSlice(obj.Object, "status", "instances") + Expect(err).ToNot(HaveOccurred(), "failed to extract machines from unstructured") + if !found { + return nil + } + + versions := make([]string, len(instances)) + for i, instance := range instances { + version, found, err := unstructured.NestedString(instance.(map[string]interface{}), "version") + Expect(err).ToNot(HaveOccurred(), "failed to extract versions from unstructured instance") + Expect(found).To(BeTrue(), "unable to find nested version string in unstructured instance") + versions[i] = version + } + + return versions +} + +func getUnstructuredRef(ctx context.Context, getter Getter, ref *corev1.ObjectReference, namespace string) *unstructured.Unstructured { + obj := new(unstructured.Unstructured) + obj.SetAPIVersion(ref.APIVersion) + obj.SetKind(ref.Kind) + obj.SetName(ref.Name) + key := client.ObjectKey{Name: obj.GetName(), Namespace: namespace} + Expect(getter.Get(ctx, key, obj)).ToNot(HaveOccurred(), "failed to retrieve %s object %q/%q", obj.GetKind(), key.Namespace, key.Name) + return obj +} diff --git a/test/infrastructure/docker/Makefile b/test/infrastructure/docker/Makefile index 0d75fec84584..88b0a7606243 100644 --- a/test/infrastructure/docker/Makefile +++ b/test/infrastructure/docker/Makefile @@ -36,6 +36,7 @@ export DOCKER_CLI_EXPERIMENTAL := enabled TOOLS_DIR := hack/tools TOOLS_BIN_DIR := $(TOOLS_DIR)/bin BIN_DIR := bin +EXP_DIR := exp # Binaries. CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen @@ -108,7 +109,8 @@ generate: $(CONTROLLER_GEN) ## Generate code generate-go: $(CONTROLLER_GEN) $(CONVERSION_GEN) ## Runs Go related generate targets $(CONTROLLER_GEN) \ object:headerFile=$(ROOT)/hack/boilerplate/boilerplate.generatego.txt \ - paths=./api/... + paths=./api/... \ + paths=./$(EXP_DIR)/api/... $(CONVERSION_GEN) \ --input-dirs=./api/v1alpha3 \ --output-file-base=zz_generated.conversion \ @@ -118,6 +120,8 @@ generate-go: $(CONTROLLER_GEN) $(CONVERSION_GEN) ## Runs Go related generate tar generate-manifests: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. $(CONTROLLER_GEN) \ paths=./api/... \ + paths=./$(EXP_DIR)/api/... \ + paths=./$(EXP_DIR)/controllers/... \ paths=./controllers/... \ crd:crdVersions=v1 \ rbac:roleName=manager-role \ diff --git a/test/infrastructure/docker/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_dockermachinepools.yaml b/test/infrastructure/docker/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_dockermachinepools.yaml new file mode 100644 index 000000000000..5cac0723db8c --- /dev/null +++ b/test/infrastructure/docker/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_dockermachinepools.yaml @@ -0,0 +1,203 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: dockermachinepools.exp.infrastructure.cluster.x-k8s.io +spec: + group: exp.infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: DockerMachinePool + listKind: DockerMachinePoolList + plural: dockermachinepools + singular: dockermachinepool + scope: Namespaced + versions: + - name: v1alpha3 + schema: + openAPIV3Schema: + description: DockerMachinePool is the Schema for the dockermachinepools API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DockerMachinePoolSpec defines the desired state of DockerMachinePool + properties: + providerID: + description: ProviderID is the identification ID of the Machine Pool + type: string + providerIDList: + description: ProviderIDList is the list of identification IDs of machine + instances managed by this Machine Pool + items: + type: string + type: array + template: + description: Template contains the details used to build a replica + machine within the Machine Pool + properties: + customImage: + description: CustomImage allows customizing the container image + that is used for running the machine + type: string + extraMounts: + description: ExtraMounts describes additional mount points for + the node container These may be used to bind a hostPath + items: + description: Mount specifies a host volume to mount into a container. + This is a simplified version of kind v1alpha4.Mount types + properties: + containerPath: + description: Path of the mount within the container. + type: string + hostPath: + description: Path of the mount on the host. If the hostPath + doesn't exist, then runtimes should report error. If the + hostpath is a symbolic link, runtimes should follow the + symlink and mount the real destination to container. + type: string + readOnly: + description: If set, the mount is read-only. + type: boolean + type: object + type: array + preLoadImages: + description: PreLoadImages allows to pre-load images in a newly + created machine. This can be used to speed up tests by avoiding + e.g. to download CNI images on all the containers. + items: + type: string + type: array + type: object + type: object + status: + description: DockerMachinePoolStatus defines the observed state of DockerMachinePool + properties: + conditions: + description: Conditions defines current service state of the DockerMachinePool. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. The specific API may choose whether or not this + field is considered a guaranteed API. This field may not be + empty. + type: string + severity: + description: Severity provides an explicit classification of + Reason code, so the users or machines can immediately understand + the current situation and act accordingly. The Severity field + MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. + type: string + required: + - status + - type + type: object + type: array + instances: + description: Instances contains the status for each instance in the + pool + items: + properties: + addresses: + description: Addresses contains the associated addresses for + the docker machine. + items: + description: MachineAddress contains information for the node's + address. + properties: + address: + description: The machine address. + type: string + type: + description: Machine address type, one of Hostname, ExternalIP + or InternalIP. + type: string + required: + - address + - type + type: object + type: array + bootstrapped: + description: Bootstrapped is true when the kubeadm bootstrapping + has been run against this machine + type: boolean + instanceName: + description: InstanceName is the identification of the Machine + Instance within the Machine Pool + type: string + providerID: + description: ProviderID is the provider identification of the + Machine Pool Instance + type: string + ready: + description: Ready denotes that the machine (docker container) + is ready + type: boolean + version: + description: Version defines the Kubernetes version for the + Machine Instance + type: string + type: object + type: array + observedGeneration: + description: The generation observed by the deployment controller. + format: int64 + type: integer + ready: + description: Ready denotes that the machine pool is ready + type: boolean + replicas: + description: Replicas is the most recently observed number of replicas. + format: int32 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/test/infrastructure/docker/config/crd/kustomization.yaml b/test/infrastructure/docker/config/crd/kustomization.yaml index 257fa4b536de..8323fd3162e5 100644 --- a/test/infrastructure/docker/config/crd/kustomization.yaml +++ b/test/infrastructure/docker/config/crd/kustomization.yaml @@ -10,6 +10,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_dockermachines.yaml - bases/infrastructure.cluster.x-k8s.io_dockerclusters.yaml - bases/infrastructure.cluster.x-k8s.io_dockermachinetemplates.yaml +- bases/exp.infrastructure.cluster.x-k8s.io_dockermachinepools.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: [] diff --git a/test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepools.yaml b/test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepools.yaml new file mode 100644 index 000000000000..14bbeb5ca607 --- /dev/null +++ b/test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepools.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: dockermachinepools.exp.infrastructure.cluster.x-k8s.io diff --git a/test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepools.yaml b/test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepools.yaml new file mode 100644 index 000000000000..6f25a71cd84f --- /dev/null +++ b/test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepools.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: dockermachinepools.exp.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert \ No newline at end of file diff --git a/test/infrastructure/docker/config/manager/manager_auth_proxy_patch.yaml b/test/infrastructure/docker/config/manager/manager_auth_proxy_patch.yaml index 6c8f9cebdf34..42d3f1771a34 100644 --- a/test/infrastructure/docker/config/manager/manager_auth_proxy_patch.yaml +++ b/test/infrastructure/docker/config/manager/manager_auth_proxy_patch.yaml @@ -21,5 +21,6 @@ spec: name: https - name: manager args: + - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=false}" - "--metrics-addr=0" - "-v=4" diff --git a/test/infrastructure/docker/config/rbac/role.yaml b/test/infrastructure/docker/config/rbac/role.yaml index 18b12e9d8e05..c9f498b8dd26 100644 --- a/test/infrastructure/docker/config/rbac/role.yaml +++ b/test/infrastructure/docker/config/rbac/role.yaml @@ -23,6 +23,26 @@ rules: - get - list - watch +- apiGroups: + - exp.cluster.x-k8s.io + resources: + - '*' + verbs: + - get + - list + - watch +- apiGroups: + - exp.infrastructure.cluster.x-k8s.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/test/infrastructure/docker/controllers/dockermachine_controller.go b/test/infrastructure/docker/controllers/dockermachine_controller.go index 7b0b733948aa..beee8051ea0b 100644 --- a/test/infrastructure/docker/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/controllers/dockermachine_controller.go @@ -138,7 +138,7 @@ func (r *DockerMachineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re } // Create a helper for managing the docker container hosting the machine. - externalMachine, err := docker.NewMachine(cluster.Name, machine.Name, dockerMachine.Spec.CustomImage, log) + externalMachine, err := docker.NewMachine(cluster.Name, machine.Name, dockerMachine.Spec.CustomImage, nil, log) if err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to create helper for managing the externalMachine") } diff --git a/test/infrastructure/docker/docker/kind_manager.go b/test/infrastructure/docker/docker/kind_manager.go index 2ac04d2c1c11..f44f5aad25c2 100644 --- a/test/infrastructure/docker/docker/kind_manager.go +++ b/test/infrastructure/docker/docker/kind_manager.go @@ -34,7 +34,7 @@ const ControlPlanePort = 6443 type Manager struct{} -func (m *Manager) CreateControlPlaneNode(name, image, clusterLabel, listenAddress string, port int32, mounts []v1alpha4.Mount, portMappings []v1alpha4.PortMapping) (*types.Node, error) { +func (m *Manager) CreateControlPlaneNode(name, image, clusterLabel, listenAddress string, port int32, mounts []v1alpha4.Mount, portMappings []v1alpha4.PortMapping, labels map[string]string) (*types.Node, error) { // gets a random host port for the API server if port == 0 { p, err := getPort() @@ -53,7 +53,7 @@ func (m *Manager) CreateControlPlaneNode(name, image, clusterLabel, listenAddres node, err := createNode( name, image, clusterLabel, constants.ControlPlaneNodeRoleValue, mounts, portMappingsWithAPIServer, // publish selected port for the API server - "--expose", fmt.Sprintf("%d", port), + append([]string{"--expose", fmt.Sprintf("%d", port)}, labelsAsArgs(labels)...)..., ) if err != nil { return nil, err @@ -62,8 +62,8 @@ func (m *Manager) CreateControlPlaneNode(name, image, clusterLabel, listenAddres return node, nil } -func (m *Manager) CreateWorkerNode(name, image, clusterLabel string, mounts []v1alpha4.Mount, portMappings []v1alpha4.PortMapping) (*types.Node, error) { - return createNode(name, image, clusterLabel, constants.WorkerNodeRoleValue, mounts, portMappings) +func (m *Manager) CreateWorkerNode(name, image, clusterLabel string, mounts []v1alpha4.Mount, portMappings []v1alpha4.PortMapping, labels map[string]string) (*types.Node, error) { + return createNode(name, image, clusterLabel, constants.WorkerNodeRoleValue, mounts, portMappings, labelsAsArgs(labels)...) } func (m *Manager) CreateExternalLoadBalancerNode(name, image, clusterLabel, listenAddress string, port int32) (*types.Node, error) { @@ -152,7 +152,19 @@ func createNode(name, image, clusterLabel, role string, mounts []v1alpha4.Mount, return nil, err } - return types.NewNode(name, role), nil + return types.NewNode(name, image, role), nil +} + +// labelsAsArgs transforms a map of labels into extraArgs +func labelsAsArgs(labels map[string]string) []string { + args := make([]string, len(labels)*2) + i := 0 + for key, val := range labels { + args[i] = "--label" + args[i+1] = fmt.Sprintf("%s=%s", key, val) + i++ + } + return args } // helper used to get a free TCP port for the API server diff --git a/test/infrastructure/docker/docker/machine.go b/test/infrastructure/docker/docker/machine.go index 238a61e54d81..57c1342d0a7d 100644 --- a/test/infrastructure/docker/docker/machine.go +++ b/test/infrastructure/docker/docker/machine.go @@ -44,8 +44,8 @@ const ( ) type nodeCreator interface { - CreateControlPlaneNode(name, image, clusterLabel, listenAddress string, port int32, mounts []v1alpha4.Mount, portMappings []v1alpha4.PortMapping) (node *types.Node, err error) - CreateWorkerNode(name, image, clusterLabel string, mounts []v1alpha4.Mount, portMappings []v1alpha4.PortMapping) (node *types.Node, err error) + CreateControlPlaneNode(name, image, clusterLabel, listenAddress string, port int32, mounts []v1alpha4.Mount, portMappings []v1alpha4.PortMapping, labels map[string]string) (node *types.Node, err error) + CreateWorkerNode(name, image, clusterLabel string, mounts []v1alpha4.Mount, portMappings []v1alpha4.PortMapping, labels map[string]string) (node *types.Node, err error) } // Machine implement a service for managing the docker containers hosting a kubernetes nodes. @@ -54,13 +54,14 @@ type Machine struct { cluster string machine string image string + labels map[string]string container *types.Node nodeCreator nodeCreator } // NewMachine returns a new Machine service for the given Cluster/DockerCluster pair. -func NewMachine(cluster, machine, image string, logger logr.Logger) (*Machine, error) { +func NewMachine(cluster, machine, image string, labels map[string]string, logger logr.Logger) (*Machine, error) { if cluster == "" { return nil, errors.New("cluster is required when creating a docker.Machine") } @@ -71,10 +72,15 @@ func NewMachine(cluster, machine, image string, logger logr.Logger) (*Machine, e return nil, errors.New("logger is required when creating a docker.Machine") } - container, err := getContainer( + filters := []string{ withLabel(clusterLabel(cluster)), withName(machineContainerName(cluster, machine)), - ) + } + for key, val := range labels { + filters = append(filters, withLabel(toLabel(key, val))) + } + + container, err := getContainer(filters...) if err != nil { return nil, err } @@ -84,16 +90,76 @@ func NewMachine(cluster, machine, image string, logger logr.Logger) (*Machine, e machine: machine, image: image, container: container, + labels: labels, log: logger, nodeCreator: &Manager{}, }, nil } +func ListMachinesByCluster(cluster string, labels map[string]string, logger logr.Logger) ([]*Machine, error) { + if cluster == "" { + return nil, errors.New("cluster is required when listing machines in the cluster") + } + + if logger == nil { + return nil, errors.New("logger is required when listing machines in the cluster") + } + + filters := []string{ + withLabel(clusterLabel(cluster)), + } + for key, val := range labels { + filters = append(filters, withLabel(toLabel(key, val))) + } + + containers, err := listContainers(filters...) + if err != nil { + return nil, err + } + + machines := make([]*Machine, len(containers)) + for i, container := range containers { + machines[i] = &Machine{ + cluster: cluster, + machine: machineFromContainerName(cluster, container.Name), + image: container.Image, + labels: labels, + container: container, + log: logger, + nodeCreator: &Manager{}, + } + } + + return machines, nil +} + +// IsControlPlane returns true if the container for this machine is a control plane node +func (m *Machine) IsControlPlane() bool { + if !m.Exists() { + return false + } + return m.container.ClusterRole == constants.ControlPlaneNodeRoleValue +} + +// ImageVersion returns the version of the image used or nil if not specified +func (m *Machine) ImageVersion() string { + if m.image == "" { + return defaultImageTag + } + + return m.image[strings.LastIndex(m.image, ":")+1 : len(m.image)] +} + // Exists returns true if the container for this machine exists. func (m *Machine) Exists() bool { return m.container != nil } +// Name returns the name of the machine +func (m *Machine) Name() string { + return m.machine +} + // ContainerName return the name of the container for this machine func (m *Machine) ContainerName() string { return machineContainerName(m.cluster, m.machine) @@ -135,6 +201,7 @@ func (m *Machine) Create(ctx context.Context, role string, version *string, moun 0, kindMounts(mounts), nil, + m.labels, ) if err != nil { return errors.WithStack(err) @@ -147,6 +214,7 @@ func (m *Machine) Create(ctx context.Context, role string, version *string, moun clusterLabel(m.cluster), kindMounts(mounts), nil, + m.labels, ) if err != nil { return errors.WithStack(err) diff --git a/test/infrastructure/docker/docker/types/node.go b/test/infrastructure/docker/docker/types/node.go index 1b9ab1e28818..0ef405c56343 100644 --- a/test/infrastructure/docker/docker/types/node.go +++ b/test/infrastructure/docker/docker/types/node.go @@ -34,13 +34,15 @@ type Node struct { Name string ClusterRole string InternalIP string + Image string Commander *containerCmder } // NewNode returns a Node with defaults. -func NewNode(name, role string) *Node { +func NewNode(name, image, role string) *Node { return &Node{ Name: name, + Image: image, ClusterRole: role, Commander: ContainerCmder(name), } diff --git a/test/infrastructure/docker/docker/util.go b/test/infrastructure/docker/docker/util.go index 4e8821a05361..8910001adaf1 100644 --- a/test/infrastructure/docker/docker/util.go +++ b/test/infrastructure/docker/docker/util.go @@ -30,12 +30,16 @@ const nodeRoleLabelKey = "io.x-k8s.kind.role" // clusterLabel returns the label applied to all the containers in a cluster func clusterLabel(name string) string { - return fmt.Sprintf("%s=%s", clusterLabelKey, name) + return toLabel(clusterLabelKey, name) } // roleLabel returns the label applied to all the containers with a specific role func roleLabel(role string) string { - return fmt.Sprintf("%s=%s", nodeRoleLabelKey, role) + return toLabel(nodeRoleLabelKey, role) +} + +func toLabel(key, val string) string { + return fmt.Sprintf("%s=%s", key, val) } func machineContainerName(cluster, machine string) string { @@ -45,6 +49,11 @@ func machineContainerName(cluster, machine string) string { return fmt.Sprintf("%s-%s", cluster, machine) } +func machineFromContainerName(cluster, containerName string) string { + machine := strings.TrimPrefix(containerName, cluster) + return strings.TrimPrefix(machine, "-") +} + // withName returns a filter on name for listContainers & getContainer func withName(name string) string { return fmt.Sprintf("name=^%s$", name) @@ -101,7 +110,7 @@ func list(visit func(string, *types.Node), filters ...string) error { // filter for nodes with the cluster label "--filter", "label=" + clusterLabelKey, // format to include friendly name and the cluster name - "--format", fmt.Sprintf(`{{.Names}}\t{{.Label "%s"}}`, clusterLabelKey), + "--format", fmt.Sprintf(`{{.Names}}\t{{.Label "%s"}}\t{{.Image}}`, clusterLabelKey), } for _, filter := range filters { args = append(args, "--filter", filter) @@ -113,12 +122,13 @@ func list(visit func(string, *types.Node), filters ...string) error { } for _, line := range lines { parts := strings.Split(line, "\t") - if len(parts) != 2 { + if len(parts) != 3 { return errors.Errorf("invalid output when listing nodes: %s", line) } names := strings.Split(parts[0], ",") cluster := parts[1] - visit(cluster, types.NewNode(names[0], "undetermined")) + image := parts[2] + visit(cluster, types.NewNode(names[0], image, "undetermined")) } return nil } diff --git a/test/infrastructure/docker/examples/machine-pool.yaml b/test/infrastructure/docker/examples/machine-pool.yaml new file mode 100644 index 000000000000..de5e76641ef0 --- /dev/null +++ b/test/infrastructure/docker/examples/machine-pool.yaml @@ -0,0 +1,102 @@ +# Creates a cluster with one control-plane node and one worker node +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: my-cluster + namespace: default +spec: + clusterNetwork: + services: + cidrBlocks: ["10.96.0.0/12"] + pods: + cidrBlocks: ["192.168.0.0/16"] + serviceDomain: cluster.local + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + name: controlplane + namespace: default + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerCluster + name: my-cluster + namespace: default +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: KubeadmControlPlane +metadata: + name: controlplane + namespace: default +spec: + replicas: 1 + version: v1.18.8 + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachineTemplate + name: controlplane + namespace: default + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: + enable-hostpath-provisioner: true + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerCluster +metadata: + name: my-cluster + namespace: default +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate +metadata: + name: controlplane + namespace: default +spec: + template: + spec: {} +--- +apiVersion: exp.cluster.x-k8s.io/v1alpha3 +kind: MachinePool +metadata: + name: worker-mp-0 + namespace: default +spec: + clusterName: my-cluster + replicas: 2 + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + name: worker-mp-0-config + namespace: default + clusterName: my-cluster + infrastructureRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachinePool + name: worker-dmp-0 + namespace: default + version: v1.18.8 +--- +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachinePool +metadata: + name: worker-dmp-0 + namespace: default +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfig +metadata: + name: worker-mp-0-config + namespace: default +spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% diff --git a/test/infrastructure/docker/examples/simple-cluster.yaml b/test/infrastructure/docker/examples/simple-cluster.yaml index dbb34cc5f0d3..800b96bc4a81 100644 --- a/test/infrastructure/docker/examples/simple-cluster.yaml +++ b/test/infrastructure/docker/examples/simple-cluster.yaml @@ -52,6 +52,11 @@ spec: namespace: default kubeadmConfigSpec: clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 controllerManager: extraArgs: enable-hostpath-provisioner: "true" diff --git a/test/infrastructure/docker/exp/PROJECT b/test/infrastructure/docker/exp/PROJECT new file mode 100644 index 000000000000..30c1be97a308 --- /dev/null +++ b/test/infrastructure/docker/exp/PROJECT @@ -0,0 +1,7 @@ +domain: cluster.x-k8s.io +repo: sigs.k8s.io/cluster-api/test/infrastructure/docker/exp +resources: +- group: exp.infrastructure + kind: DockerMachinePool + version: v1alpha3 +version: "2" diff --git a/test/infrastructure/docker/exp/README.md b/test/infrastructure/docker/exp/README.md new file mode 100644 index 000000000000..b819cf94b89d --- /dev/null +++ b/test/infrastructure/docker/exp/README.md @@ -0,0 +1,21 @@ +# exp + +This subrepository holds experimental code and API types. + +**Warning**: Packages here are experimental and unreliable. Some may one day be promoted to the main repository, or they may be modified arbitrarily or even disappear altogether. + +In short, code in this subrepository is not subject to any compatibility or deprecation promise. + +Experiments follow a strict lifecycle: Alpha -> Beta prior to Graduation. + +For more information on graduation criteria, see: [Contributing Guidelines](../CONTRIBUTING.md#experiments) + +## Active Features + DockerMachinePool (alpha) + +## Create a new Resource +Below is an example of creating a `DockerMachinePool` resource in the experimental group. +``` +kubebuilder create api --kind DockerMachinePool --group exp.infrastructure --version v1alpha3 \ + --controller=true --resource=true --make=false +``` \ No newline at end of file diff --git a/test/infrastructure/docker/exp/api/v1alpha3/dockermachinepool_types.go b/test/infrastructure/docker/exp/api/v1alpha3/dockermachinepool_types.go new file mode 100644 index 000000000000..a1089a3d2a05 --- /dev/null +++ b/test/infrastructure/docker/exp/api/v1alpha3/dockermachinepool_types.go @@ -0,0 +1,146 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" +) + +const ( + // MachinePoolFinalizer allows ReconcileDockerMachinePool to clean up resources + MachinePoolFinalizer = "dockermachinepool.infrastructure.cluster.x-k8s.io" +) + +// DockerMachineTemplate defines the desired state of DockerMachine +type DockerMachineTemplate struct { + // CustomImage allows customizing the container image that is used for + // running the machine + // +optional + CustomImage string `json:"customImage,omitempty"` + + // PreLoadImages allows to pre-load images in a newly created machine. This can be used to + // speed up tests by avoiding e.g. to download CNI images on all the containers. + // +optional + PreLoadImages []string `json:"preLoadImages,omitempty"` + + // ExtraMounts describes additional mount points for the node container + // These may be used to bind a hostPath + // +optional + ExtraMounts []infrav1.Mount `json:"extraMounts,omitempty"` +} + +// DockerMachinePoolSpec defines the desired state of DockerMachinePool +type DockerMachinePoolSpec struct { + // Template contains the details used to build a replica machine within the Machine Pool + // +optional + Template DockerMachineTemplate `json:"template"` + + // ProviderID is the identification ID of the Machine Pool + // +optional + ProviderID string `json:"providerID,omitempty"` + + // ProviderIDList is the list of identification IDs of machine instances managed by this Machine Pool + //+optional + ProviderIDList []string `json:"providerIDList,omitempty"` +} + +// DockerMachinePoolStatus defines the observed state of DockerMachinePool +type DockerMachinePoolStatus struct { + // Ready denotes that the machine pool is ready + // +optional + Ready bool `json:"ready"` + + // Replicas is the most recently observed number of replicas. + // +optional + Replicas int32 `json:"replicas"` + + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Instances contains the status for each instance in the pool + // +optional + Instances []*DockerMachinePoolInstanceStatus `json:"instances,omitempty"` + + // Conditions defines current service state of the DockerMachinePool. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +type DockerMachinePoolInstanceStatus struct { + // Addresses contains the associated addresses for the docker machine. + // +optional + Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + + // InstanceName is the identification of the Machine Instance within the Machine Pool + InstanceName string `json:"instanceName,omitempty"` + + // ProviderID is the provider identification of the Machine Pool Instance + // +optional + ProviderID *string `json:"providerID,omitempty"` + + // Version defines the Kubernetes version for the Machine Instance + // +optional + Version *string `json:"version,omitempty"` + + // Ready denotes that the machine (docker container) is ready + // +optional + Ready bool `json:"ready"` + + // Bootstrapped is true when the kubeadm bootstrapping has been run + // against this machine + // +optional + Bootstrapped bool `json:"bootstrapped,omitempty"` +} + +// +kubebuilder:resource:path=dockermachinepools,scope=Namespaced,categories=cluster-api +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// DockerMachinePool is the Schema for the dockermachinepools API +type DockerMachinePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DockerMachinePoolSpec `json:"spec,omitempty"` + Status DockerMachinePoolStatus `json:"status,omitempty"` +} + +func (c *DockerMachinePool) GetConditions() clusterv1.Conditions { + return c.Status.Conditions +} + +func (c *DockerMachinePool) SetConditions(conditions clusterv1.Conditions) { + c.Status.Conditions = conditions +} + +// +kubebuilder:object:root=true + +// DockerMachinePoolList contains a list of DockerMachinePool +type DockerMachinePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DockerMachinePool `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DockerMachinePool{}, &DockerMachinePoolList{}) +} diff --git a/test/infrastructure/docker/exp/api/v1alpha3/groupversion_info.go b/test/infrastructure/docker/exp/api/v1alpha3/groupversion_info.go new file mode 100644 index 000000000000..6a6a8d3c9fcb --- /dev/null +++ b/test/infrastructure/docker/exp/api/v1alpha3/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha3 contains API Schema definitions for the exp.infrastructure v1alpha3 API group +// +kubebuilder:object:generate=true +// +groupName=exp.infrastructure.cluster.x-k8s.io +package v1alpha3 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "exp.infrastructure.cluster.x-k8s.io", Version: "v1alpha3"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.deepcopy.go b/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.deepcopy.go new file mode 100644 index 000000000000..237cf2180525 --- /dev/null +++ b/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.deepcopy.go @@ -0,0 +1,195 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + cluster_apiapiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" + apiv1alpha3 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePool) DeepCopyInto(out *DockerMachinePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePool. +func (in *DockerMachinePool) DeepCopy() *DockerMachinePool { + if in == nil { + return nil + } + out := new(DockerMachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerMachinePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolInstanceStatus) DeepCopyInto(out *DockerMachinePoolInstanceStatus) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]cluster_apiapiv1alpha3.MachineAddress, len(*in)) + copy(*out, *in) + } + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolInstanceStatus. +func (in *DockerMachinePoolInstanceStatus) DeepCopy() *DockerMachinePoolInstanceStatus { + if in == nil { + return nil + } + out := new(DockerMachinePoolInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolList) DeepCopyInto(out *DockerMachinePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DockerMachinePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolList. +func (in *DockerMachinePoolList) DeepCopy() *DockerMachinePoolList { + if in == nil { + return nil + } + out := new(DockerMachinePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerMachinePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolSpec) DeepCopyInto(out *DockerMachinePoolSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.ProviderIDList != nil { + in, out := &in.ProviderIDList, &out.ProviderIDList + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolSpec. +func (in *DockerMachinePoolSpec) DeepCopy() *DockerMachinePoolSpec { + if in == nil { + return nil + } + out := new(DockerMachinePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolStatus) DeepCopyInto(out *DockerMachinePoolStatus) { + *out = *in + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]*DockerMachinePoolInstanceStatus, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(DockerMachinePoolInstanceStatus) + (*in).DeepCopyInto(*out) + } + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(cluster_apiapiv1alpha3.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolStatus. +func (in *DockerMachinePoolStatus) DeepCopy() *DockerMachinePoolStatus { + if in == nil { + return nil + } + out := new(DockerMachinePoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachineTemplate) DeepCopyInto(out *DockerMachineTemplate) { + *out = *in + if in.PreLoadImages != nil { + in, out := &in.PreLoadImages, &out.PreLoadImages + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]apiv1alpha3.Mount, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachineTemplate. +func (in *DockerMachineTemplate) DeepCopy() *DockerMachineTemplate { + if in == nil { + return nil + } + out := new(DockerMachineTemplate) + in.DeepCopyInto(out) + return out +} diff --git a/test/infrastructure/docker/exp/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/controllers/dockermachinepool_controller.go new file mode 100644 index 000000000000..abfbdd138163 --- /dev/null +++ b/test/infrastructure/docker/exp/controllers/dockermachinepool_controller.go @@ -0,0 +1,232 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/docker" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + utilexp "sigs.k8s.io/cluster-api/exp/util" + infrav1exp "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// DockerMachinePoolReconciler reconciles a DockerMachinePool object +type DockerMachinePoolReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=dockermachinepools,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=dockermachinepools/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=exp.cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch +func (r *DockerMachinePoolReconciler) Reconcile(req ctrl.Request) (res ctrl.Result, rerr error) { + ctx := context.Background() + log := r.Log.WithName("dockermachinepool").WithValues("docker-machine-pool", req.NamespacedName) + + // Fetch the DockerMachinePool instance. + dockerMachinePool := &infrav1exp.DockerMachinePool{} + if err := r.Client.Get(ctx, req.NamespacedName, dockerMachinePool); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Fetch the MachinePool. + machinePool, err := utilexp.GetOwnerMachinePool(ctx, r.Client, dockerMachinePool.ObjectMeta) + if err != nil { + return ctrl.Result{}, err + } + if machinePool == nil { + log.Info("Waiting for MachinePool Controller to set OwnerRef on DockerMachinePool") + return ctrl.Result{}, nil + } + + log = log.WithValues("machine-pool", machinePool.Name) + + // Fetch the Cluster. + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) + if err != nil { + log.Info("DockerMachinePool owner MachinePool is missing cluster label or cluster does not exist") + return ctrl.Result{}, err + } + + if cluster == nil { + log.Info(fmt.Sprintf("Please associate this machine pool with a cluster using the label %s: ", clusterv1.ClusterLabelName)) + return ctrl.Result{}, nil + } + + log = log.WithValues("cluster", cluster.Name) + + // Initialize the patch helper + patchHelper, err := patch.NewHelper(dockerMachinePool, r) + if err != nil { + return ctrl.Result{}, err + } + + // Always attempt to Patch the DockerMachinePool object and status after each reconciliation. + defer func() { + if err := patchDockerMachinePool(ctx, patchHelper, dockerMachinePool); err != nil { + log.Error(err, "failed to patch DockerMachinePool") + if rerr == nil { + rerr = err + } + } + }() + + // Add finalizer first if not exist to avoid the race condition between init and delete + if !controllerutil.ContainsFinalizer(dockerMachinePool, infrav1exp.MachinePoolFinalizer) { + controllerutil.AddFinalizer(dockerMachinePool, infrav1exp.MachinePoolFinalizer) + return ctrl.Result{}, nil + } + + // Handle deleted machines + if !dockerMachinePool.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, cluster, machinePool, dockerMachinePool, log) + } + + // Handle non-deleted machines + return r.reconcileNormal(ctx, cluster, machinePool, dockerMachinePool, log) +} + +// SetupWithManager will add watches for this controller +func (r *DockerMachinePoolReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + clusterToDockerMachinePools, err := util.ClusterToObjectsMapper(mgr.GetClient(), &infrav1exp.DockerMachinePoolList{}, mgr.GetScheme()) + if err != nil { + return err + } + + c, err := ctrl.NewControllerManagedBy(mgr). + For(&infrav1exp.DockerMachinePool{}). + WithOptions(options). + WithEventFilter(predicates.ResourceNotPaused(r.Log)). + Watches( + &source.Kind{Type: &clusterv1exp.MachinePool{}}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: utilexp.MachinePoolToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind("DockerMachinePool"), r.Log), + }, + ). + Build(r) + if err != nil { + return err + } + return c.Watch( + &source.Kind{Type: &clusterv1.Cluster{}}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: clusterToDockerMachinePools, + }, + predicates.ClusterUnpausedAndInfrastructureReady(r.Log), + ) +} + +func (r *DockerMachinePoolReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1exp.MachinePool, dockerMachinePool *infrav1exp.DockerMachinePool, log logr.Logger) (ctrl.Result, error) { + pool, err := docker.NewNodePool(r, cluster, machinePool, dockerMachinePool, log) + if err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to build new node pool") + } + + if err := pool.Delete(ctx); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to delete all machines in the node pool") + } + + controllerutil.RemoveFinalizer(dockerMachinePool, infrav1exp.MachinePoolFinalizer) + return ctrl.Result{}, nil +} + +func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1exp.MachinePool, dockerMachinePool *infrav1exp.DockerMachinePool, log logr.Logger) (ctrl.Result, error) { + // Make sure bootstrap data is available and populated. + if machinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { + log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") + return ctrl.Result{}, nil + } + + if machinePool.Spec.Replicas == nil { + machinePool.Spec.Replicas = pointer.Int32Ptr(1) + } + + pool, err := docker.NewNodePool(r, cluster, machinePool, dockerMachinePool, log) + if err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to build new node pool") + } + + // if we don't have enough nodes matching spec, build them + if err := pool.ReconcileMachines(ctx); err != nil { + if errors.Is(err, &docker.TransientError{}) { + log.V(4).Info("requeue in 5 seconds due docker machine reconcile transient error") + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + + return ctrl.Result{}, errors.Wrap(err, "failed to reconcile machines") + } + + if err := pool.DeleteExtraMachines(ctx); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to delete overprovisioned or out of spec machines") + } + + dockerMachinePool.Spec.ProviderIDList = []string{} + for _, instance := range dockerMachinePool.Status.Instances { + if instance.ProviderID != nil && instance.Ready { + dockerMachinePool.Spec.ProviderIDList = append(dockerMachinePool.Spec.ProviderIDList, *instance.ProviderID) + } + } + + dockerMachinePool.Status.Replicas = int32(len(dockerMachinePool.Status.Instances)) + + if dockerMachinePool.Spec.ProviderID == "" { + // This is a fake provider ID which does not tie back to any docker infrastructure. In cloud providers, + // this ID would tie back to the resource which manages the machine pool implementation. For example, + // Azure uses a VirtualMachineScaleSet to manage a set of like machines. + dockerMachinePool.Spec.ProviderID = getDockerMachinePoolProviderID(cluster.Name, dockerMachinePool.Name) + } + + dockerMachinePool.Status.Ready = len(dockerMachinePool.Spec.ProviderIDList) == int(*machinePool.Spec.Replicas) + return ctrl.Result{}, nil +} + +func getDockerMachinePoolProviderID(clusterName, dockerMachinePoolName string) string { + return fmt.Sprintf("docker:////%s-dmp-%s", clusterName, dockerMachinePoolName) +} + +func patchDockerMachinePool(ctx context.Context, patchHelper *patch.Helper, dockerMachinePool *infrav1exp.DockerMachinePool) error { + // TODO: add conditions + + // Patch the object, ignoring conflicts on the conditions owned by this controller. + return patchHelper.Patch( + ctx, + dockerMachinePool, + ) +} diff --git a/test/infrastructure/docker/exp/controllers/exp.go b/test/infrastructure/docker/exp/controllers/exp.go new file mode 100644 index 000000000000..9e80544c97ba --- /dev/null +++ b/test/infrastructure/docker/exp/controllers/exp.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +// This file adds RBAC permissions to the Docker Infrastructure manager to operate on all objects in the experimental API group. + +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=exp.cluster.x-k8s.io,resources=*,verbs=get;list;watch diff --git a/test/infrastructure/docker/exp/docker/nodepool.go b/test/infrastructure/docker/exp/docker/nodepool.go new file mode 100644 index 000000000000..b764d46273e1 --- /dev/null +++ b/test/infrastructure/docker/exp/docker/nodepool.go @@ -0,0 +1,332 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + "encoding/base64" + "fmt" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/infrastructure/docker/docker" + infrav1exp "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/kind/pkg/cluster/constants" +) + +const ( + dockerMachinePoolLabel = "docker.cluster.x-k8s.io/machine-pool" +) + +// TransientError represents an error from docker provisioning which should be retried +type TransientError struct { + InstanceName string + Reason string +} + +func (e *TransientError) Error() string { + return fmt.Sprintf("container addresses for instance %s due to %s", e.InstanceName, e.Reason) +} + +func (e *TransientError) Is(target error) bool { + _, ok := target.(*TransientError) + return ok +} + +// NodePool is a wrapper around a collection of like machines which are owned by a DockerMachinePool. A node pool +// provides a friendly way of managing (adding, deleting, reimaging) a set of docker machines. The node pool will also +// sync the docker machine pool status Instances field with the state of the docker machines. +type NodePool struct { + client client.Client + cluster *clusterv1.Cluster + machinePool *clusterv1exp.MachinePool + dockerMachinePool *infrav1exp.DockerMachinePool + labelFilters map[string]string + machines []*docker.Machine + logger logr.Logger +} + +// NewNodePool creates a new node pool instances +func NewNodePool(kClient client.Client, cluster *clusterv1.Cluster, mp *clusterv1exp.MachinePool, dmp *infrav1exp.DockerMachinePool, log logr.Logger) (*NodePool, error) { + np := &NodePool{ + client: kClient, + cluster: cluster, + machinePool: mp, + dockerMachinePool: dmp, + labelFilters: map[string]string{dockerMachinePoolLabel: dmp.Name}, + logger: log.WithValues("node-pool", dmp.Name), + } + + if err := np.refresh(); err != nil { + return np, errors.Wrapf(err, "failed to refresh the node pool") + } + return np, nil +} + +// ReconcileMachines will build enough machines to satisfy the machine pool / docker machine pool spec and update the +// docker machine pool status +func (np *NodePool) ReconcileMachines(ctx context.Context) error { + matchingMachineCount := int32(len(np.machinesMatchingInfrastructureSpec())) + if matchingMachineCount < *np.machinePool.Spec.Replicas { + for i := int32(0); i < *np.machinePool.Spec.Replicas-matchingMachineCount; i++ { + if err := np.addMachine(ctx); err != nil { + return errors.Wrap(err, "failed to create a new docker machine") + } + } + } + + for _, machine := range np.machinesMatchingInfrastructureSpec() { + if err := np.reconcileMachine(ctx, machine); err != nil { + if errors.Is(err, &TransientError{}) { + return err + } + return errors.Wrap(err, "failed to reconcile machine") + } + } + + return np.refresh() +} + +// DeleteExtraMachines will delete all of the machines outside of the machine pool / docker machine pool spec and update +// the docker machine pool status. +func (np *NodePool) DeleteExtraMachines(ctx context.Context) error { + outOfSpecMachineNames := map[string]interface{}{} + for _, outOfSpecMachine := range np.machinesNotMatchingInfrastructureSpec() { + externalMachine, err := docker.NewMachine(np.cluster.Name, outOfSpecMachine.Name(), np.dockerMachinePool.Spec.Template.CustomImage, np.labelFilters, np.logger) + if err != nil { + return errors.Wrapf(err, "failed to create helper for managing the externalMachine named %s", outOfSpecMachine.Name()) + } + + if err := externalMachine.Delete(ctx); err != nil { + return errors.Wrapf(err, "failed to delete machine %s", outOfSpecMachine.Name()) + } + + outOfSpecMachineNames[outOfSpecMachine.Name()] = nil + } + + var stats []*infrav1exp.DockerMachinePoolInstanceStatus + for _, machineStatus := range np.dockerMachinePool.Status.Instances { + if _, ok := outOfSpecMachineNames[machineStatus.InstanceName]; !ok { + stats = append(stats, machineStatus) + } + } + + for _, overprovisioned := range stats[*np.machinePool.Spec.Replicas:] { + externalMachine, err := docker.NewMachine(np.cluster.Name, overprovisioned.InstanceName, np.dockerMachinePool.Spec.Template.CustomImage, np.labelFilters, np.logger) + if err != nil { + return errors.Wrapf(err, "failed to create helper for managing the externalMachine named %s", overprovisioned.InstanceName) + } + + if err := externalMachine.Delete(ctx); err != nil { + return errors.Wrapf(err, "failed to delete machine %s", overprovisioned.InstanceName) + } + } + + np.dockerMachinePool.Status.Instances = stats[:*np.machinePool.Spec.Replicas] + return np.refresh() +} + +// Delete will delete all of the machines in the node pool +func (np *NodePool) Delete(ctx context.Context) error { + for _, machine := range np.machines { + externalMachine, err := docker.NewMachine(np.cluster.Name, machine.Name(), np.dockerMachinePool.Spec.Template.CustomImage, np.labelFilters, np.logger) + if err != nil { + return errors.Wrapf(err, "failed to create helper for managing the externalMachine named %s", machine.Name()) + } + + if err := externalMachine.Delete(ctx); err != nil { + return errors.Wrapf(err, "failed to delete machine %s", machine.Name()) + } + } + + return nil +} + +// machinesMatchingInfrastructureSpec returns all of the docker.Machines which match the machine pool / docker machine pool spec +func (np *NodePool) machinesMatchingInfrastructureSpec() []*docker.Machine { + var matchingMachines []*docker.Machine + for _, machine := range np.machines { + if !machine.IsControlPlane() && machine.ImageVersion() == *np.machinePool.Spec.Template.Spec.Version { + matchingMachines = append(matchingMachines, machine) + } + } + + return matchingMachines +} + +// machinesNotMatchingInfrastructureSpec returns all of the machines which do not match the machine pool / docker machine pool spec +func (np *NodePool) machinesNotMatchingInfrastructureSpec() []*docker.Machine { + var matchingMachines []*docker.Machine + for _, machine := range np.machines { + if !machine.IsControlPlane() && machine.ImageVersion() != *np.machinePool.Spec.Template.Spec.Version { + matchingMachines = append(matchingMachines, machine) + } + } + + return matchingMachines +} + +// addMachine will add a new machine to the node pool and update the docker machine pool status +func (np *NodePool) addMachine(ctx context.Context) error { + instanceName := fmt.Sprintf("worker-%s", util.RandomString(6)) + externalMachine, err := docker.NewMachine(np.cluster.Name, instanceName, np.dockerMachinePool.Spec.Template.CustomImage, np.labelFilters, np.logger) + if err != nil { + return errors.Wrapf(err, "failed to create helper for managing the externalMachine named %s", instanceName) + } + + if err := externalMachine.Create(ctx, constants.WorkerNodeRoleValue, np.machinePool.Spec.Template.Spec.Version, np.dockerMachinePool.Spec.Template.ExtraMounts); err != nil { + return errors.Wrapf(err, "failed to create docker machine with instance name %s", instanceName) + } + + np.dockerMachinePool.Status.Instances = append(np.dockerMachinePool.Status.Instances, &infrav1exp.DockerMachinePoolInstanceStatus{ + InstanceName: instanceName, + Version: np.machinePool.Spec.Template.Spec.Version, + }) + + return np.refresh() +} + +// refresh asks docker to list all the machines matching the node pool label and updates the cached list of node pool +// machines +func (np *NodePool) refresh() error { + machines, err := docker.ListMachinesByCluster(np.cluster.Name, np.labelFilters, np.logger) + if err != nil { + return errors.Wrapf(err, "failed to list all machines in the cluster") + } + + np.machines = machines + return nil +} + +// reconcileMachine will build and provision a docker machine and update the docker machine pool status for that instance +func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machine) error { + machineStatus := getInstanceStatusByMachineName(np.dockerMachinePool, machine.Name()) + if machineStatus == nil { + machineStatus = &infrav1exp.DockerMachinePoolInstanceStatus{ + InstanceName: machine.Name(), + Version: np.machinePool.Spec.Template.Spec.Version, + } + np.dockerMachinePool.Status.Instances = append(np.dockerMachinePool.Status.Instances, machineStatus) + } + + externalMachine, err := docker.NewMachine(np.cluster.Name, machine.Name(), np.dockerMachinePool.Spec.Template.CustomImage, np.labelFilters, np.logger) + if err != nil { + return errors.Wrapf(err, "failed to create helper for managing the externalMachine named %s", machine.Name()) + } + + // if the machine isn't bootstrapped, only then run bootstrap scripts + if !machineStatus.Bootstrapped { + if err := externalMachine.PreloadLoadImages(ctx, np.dockerMachinePool.Spec.Template.PreLoadImages); err != nil { + return errors.Wrapf(err, "failed to pre-load images into the docker machine with instance name %s", machine.Name()) + } + + bootstrapData, err := getBootstrapData(ctx, np.client, np.machinePool) + if err != nil { + return errors.Wrapf(err, "failed to get bootstrap data for instance named %s", machine.Name()) + } + + timeoutctx, cancel := context.WithTimeout(ctx, 3*time.Minute) + defer cancel() + // Run the bootstrap script. Simulates cloud-init. + if err := externalMachine.ExecBootstrap(timeoutctx, bootstrapData); err != nil { + return errors.Wrapf(err, "failed to exec DockerMachinePool instance bootstrap for instance named %s", machine.Name()) + } + machineStatus.Bootstrapped = true + } + + if machineStatus.Addresses == nil { + // set address in machine status + machineAddress, err := externalMachine.Address(ctx) + if err != nil { + return &TransientError{ + InstanceName: machine.Name(), + Reason: "failed to fetch addresses for container", + } + } + + machineStatus.Addresses = []clusterv1.MachineAddress{ + { + Type: clusterv1.MachineHostName, + Address: externalMachine.ContainerName(), + }, + { + Type: clusterv1.MachineInternalIP, + Address: machineAddress, + }, + { + Type: clusterv1.MachineExternalIP, + Address: machineAddress, + }, + } + } + + if machineStatus.ProviderID == nil { + // Usually a cloud provider will do this, but there is no docker-cloud provider. + // Requeue if there is an error, as this is likely momentary load balancer + // state changes during control plane provisioning. + if err := externalMachine.SetNodeProviderID(ctx); err != nil { + np.logger.V(4).Info("transient error setting the provider id") + return &TransientError{ + InstanceName: machine.Name(), + Reason: "failed to patch the Kubernetes node with the machine providerID", + } + } + // Set ProviderID so the Cluster API Machine Controller can pull it + providerID := externalMachine.ProviderID() + machineStatus.ProviderID = &providerID + } + + machineStatus.Ready = true + return nil +} + +// getBootstrapData fetches the bootstrap data for the machine pool +func getBootstrapData(ctx context.Context, kClient client.Client, machinePool *clusterv1exp.MachinePool) (string, error) { + if machinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { + return "", errors.New("error retrieving bootstrap data: linked MachinePool's bootstrap.dataSecretName is nil") + } + + s := &corev1.Secret{} + key := client.ObjectKey{Namespace: machinePool.GetNamespace(), Name: *machinePool.Spec.Template.Spec.Bootstrap.DataSecretName} + if err := kClient.Get(ctx, key, s); err != nil { + return "", errors.Wrapf(err, "failed to retrieve bootstrap data secret for DockerMachinePool instance %s/%s", machinePool.GetNamespace(), machinePool.GetName()) + } + + value, ok := s.Data["value"] + if !ok { + return "", errors.New("error retrieving bootstrap data: secret value key is missing") + } + + return base64.StdEncoding.EncodeToString(value), nil +} + +// getInstanceStatusByMachineName returns the instance status for a given machine by name or nil if it doesn't exist +func getInstanceStatusByMachineName(dockerMachinePool *infrav1exp.DockerMachinePool, machineName string) *infrav1exp.DockerMachinePoolInstanceStatus { + for _, machine := range dockerMachinePool.Status.Instances { + if machine.InstanceName == machineName { + return machine + } + } + + return nil +} diff --git a/test/infrastructure/docker/go.mod b/test/infrastructure/docker/go.mod index a66fbe6a04a3..9d50e2141167 100644 --- a/test/infrastructure/docker/go.mod +++ b/test/infrastructure/docker/go.mod @@ -6,10 +6,12 @@ require ( github.com/go-logr/logr v0.1.0 github.com/onsi/gomega v1.10.1 github.com/pkg/errors v0.9.1 + github.com/spf13/pflag v1.0.5 k8s.io/api v0.17.9 k8s.io/apimachinery v0.17.9 k8s.io/client-go v0.17.9 k8s.io/klog v1.0.0 + k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19 sigs.k8s.io/cluster-api v0.3.3 sigs.k8s.io/controller-runtime v0.5.11 sigs.k8s.io/kind v0.7.1-0.20200303021537-981bd80d3802 diff --git a/test/infrastructure/docker/go.sum b/test/infrastructure/docker/go.sum index b7f71f5d6460..01dbb686f152 100644 --- a/test/infrastructure/docker/go.sum +++ b/test/infrastructure/docker/go.sum @@ -596,6 +596,7 @@ k8s.io/client-go v0.17.9/go.mod h1:3cM92qAd1XknA5IRkRfpJhl9OQjkYy97ZEUio70wVnI= k8s.io/cluster-bootstrap v0.17.9 h1:IH/MwGor5/7bwHClz0PO/8pKq+SU1eSB1rs645pGu8Y= k8s.io/cluster-bootstrap v0.17.9/go.mod h1:Q6nXn/sqVfMvT1VIJVPxFboYAoqH06PCjZnaYzbpZC0= k8s.io/code-generator v0.17.9/go.mod h1:iiHz51+oTx+Z9D0vB3CH3O4HDDPWrvZyUgUYaIE9h9M= +k8s.io/component-base v0.17.9 h1:1CmgQ367Eo6UWkfO1sl7Z99KJpbwkrs9aMY5LZTQR9s= k8s.io/component-base v0.17.9/go.mod h1:Wg22ePDK0mfTa+bEFgZHGwr0h40lXnYy6D7D+f7itFk= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/test/infrastructure/docker/hack/boilerplate.go.txt b/test/infrastructure/docker/hack/boilerplate.go.txt index 0926592d3895..4b76f1fdd88a 100644 --- a/test/infrastructure/docker/hack/boilerplate.go.txt +++ b/test/infrastructure/docker/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright YEAR The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/infrastructure/docker/main.go b/test/infrastructure/docker/main.go index a78125ffc0bd..536eed9e4dcc 100644 --- a/test/infrastructure/docker/main.go +++ b/test/infrastructure/docker/main.go @@ -18,6 +18,7 @@ package main import ( "flag" + "github.com/spf13/pflag" "math/rand" "os" "time" @@ -28,8 +29,12 @@ import ( "k8s.io/klog" "k8s.io/klog/klogr" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api/feature" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" "sigs.k8s.io/cluster-api/test/infrastructure/docker/controllers" + infrav1exp "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1alpha3" + expcontrollers "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/controllers" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/healthz" @@ -49,24 +54,22 @@ var ( ) func init() { + klog.InitFlags(nil) + _ = scheme.AddToScheme(myscheme) _ = infrav1.AddToScheme(myscheme) + _ = infrav1exp.AddToScheme(myscheme) _ = clusterv1.AddToScheme(myscheme) + _ = clusterv1exp.AddToScheme(myscheme) // +kubebuilder:scaffold:scheme } func main() { rand.Seed(time.Now().UnixNano()) - klog.InitFlags(nil) - flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") - flag.IntVar(&concurrency, "concurrency", 10, "The number of docker machines to process simultaneously") - flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, - "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") - flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, - "The minimum interval at which watched resources are reconciled (e.g. 15m)") - flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") - flag.Parse() + initFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() ctrl.SetLogger(klogr.New()) @@ -95,6 +98,17 @@ func main() { } } +func initFlags(fs *pflag.FlagSet) { + fs.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + fs.IntVar(&concurrency, "concurrency", 10, "The number of docker machines to process simultaneously") + fs.BoolVar(&enableLeaderElection, "enable-leader-election", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, + "The minimum interval at which watched resources are reconciled (e.g. 15m)") + fs.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") + feature.MutableGates.AddFlag(fs) +} + func setupChecks(mgr ctrl.Manager) { if err := mgr.AddReadyzCheck("ping", healthz.Ping); err != nil { setupLog.Error(err, "unable to create ready check") @@ -125,6 +139,18 @@ func setupReconcilers(mgr ctrl.Manager) { setupLog.Error(err, "unable to create controller", "controller", "DockerCluster") os.Exit(1) } + + if feature.Gates.Enabled(feature.MachinePool) { + if err := (&expcontrollers.DockerMachinePoolReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("DockerMachinePool"), + }).SetupWithManager(mgr, controller.Options{ + MaxConcurrentReconciles: concurrency, + }); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "DockerMachinePool") + os.Exit(1) + } + } } func setupWebhooks(mgr ctrl.Manager) {