diff --git a/exp/util/util.go b/exp/util/util.go index ea91791048f3..76d18fc838de 100644 --- a/exp/util/util.go +++ b/exp/util/util.go @@ -18,12 +18,15 @@ package util import ( "context" + "github.com/go-logr/logr" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // GetOwnerMachinePool returns the MachinePool objects owning the current resource. @@ -52,3 +55,36 @@ func GetMachinePoolByName(ctx context.Context, c client.Client, namespace, name } return m, nil } + +// MachinePoolToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for +// MachinePool events and returns reconciliation requests for an infrastructure provider object. +func MachinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Logger) handler.ToRequestsFunc { + log = log.WithValues("machine-pool-to-infra-map-func", gvk.String()) + return func(o handler.MapObject) []reconcile.Request { + log := log.WithValues("namespace", o.Meta.GetNamespace(), "name", o.Meta.GetName()) + m, ok := o.Object.(*clusterv1exp.MachinePool) + if !ok { + log.V(4).Info("not a machine pool") + return nil + } + + gk := gvk.GroupKind() + ref := m.Spec.Template.Spec.InfrastructureRef + // Return early if the GroupKind doesn't match what we expect. + infraGK := ref.GroupVersionKind().GroupKind() + if gk != infraGK { + log.V(4).Info("infra kind doesn't match filter group kind", infraGK.String()) + return nil + } + + log.V(4).Info("projecting object", "namespace", m.Namespace, "name", ref.Name) + return []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: m.Namespace, + Name: ref.Name, + }, + }, + } + } +} diff --git a/test/e2e/config/docker-ci.yaml b/test/e2e/config/docker-ci.yaml index a51deb424822..209ceff0399d 100644 --- a/test/e2e/config/docker-ci.yaml +++ b/test/e2e/config/docker-ci.yaml @@ -72,6 +72,7 @@ providers: - sourcePath: "../data/infrastructure-docker/cluster-template-ci.yaml" targetName: "cluster-template.yaml" - sourcePath: "../data/infrastructure-docker/cluster-template-kcp-adoption.yaml" + - sourcePath: "../data/infrastructure-docker/cluster-template-machine-pool.yaml" variables: KUBERNETES_VERSION: "v1.18.2" @@ -79,19 +80,22 @@ variables: COREDNS_VERSION_UPGRADE_TO: "1.6.7" KUBERNETES_VERSION_UPGRADE_TO: "v1.18.2" KUBERNETES_VERSION_UPGRADE_FROM: "v1.17.2" + MIN_NUMBER_OF_REPLICAS: "0" + MAX_NUMBER_OF_REPLICAS: "5" DOCKER_SERVICE_DOMAIN: "cluster.local" DOCKER_SERVICE_CIDRS: "10.128.0.0/12" # IMPORTANT! This values should match the one used by the CNI provider DOCKER_POD_CIDRS: "192.168.0.0/16" CNI: "./data/cni/kindnet/kindnet.yaml" EXP_CLUSTER_RESOURCE_SET: "true" + EXP_MACHINE_POOL: "true" intervals: default/wait-controllers: ["3m", "10s"] default/wait-cluster: ["3m", "10s"] default/wait-control-plane: ["10m", "10s"] default/wait-worker-nodes: ["5m", "10s"] - default/wait-machine-pool-nodes: ["5m", "10s"] + default/wait-machine-pool-nodes: ["3m", "10s"] default/wait-delete-cluster: ["3m", "10s"] default/wait-machine-upgrade: ["20m", "10s"] default/wait-machine-remediation: ["5m", "10s"] diff --git a/test/e2e/config/docker-dev.yaml b/test/e2e/config/docker-dev.yaml index 7ab2c0505402..e6cc950f8b25 100644 --- a/test/e2e/config/docker-dev.yaml +++ b/test/e2e/config/docker-dev.yaml @@ -105,6 +105,8 @@ variables: COREDNS_VERSION_UPGRADE_TO: "1.6.7" KUBERNETES_VERSION_UPGRADE_TO: "v1.18.2" KUBERNETES_VERSION_UPGRADE_FROM: "v1.17.2" + MIN_NUMBER_OF_REPLICAS: "0" + MAX_NUMBER_OF_REPLICAS: "5" DOCKER_SERVICE_DOMAIN: "cluster.local" DOCKER_SERVICE_CIDRS: "10.128.0.0/12" # IMPORTANT! This values should match the one used by the CNI provider @@ -112,6 +114,7 @@ variables: #CNI: "./data/cni/calico/calico.yaml" CNI: "./data/cni/kindnet/kindnet.yaml" EXP_CLUSTER_RESOURCE_SET: "true" + EXP_MACHINE_POOL: "true" intervals: default/wait-controllers: ["3m", "10s"] diff --git a/test/e2e/data/infrastructure-docker/cluster-template-machine-pool.yaml b/test/e2e/data/infrastructure-docker/cluster-template-machine-pool.yaml new file mode 100644 index 000000000000..d0ca8f3f84e4 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/cluster-template-machine-pool.yaml @@ -0,0 +1,118 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerCluster +metadata: + name: '${ CLUSTER_NAME }' +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: '${ CLUSTER_NAME }' + labels: + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + services: + cidrBlocks: ['${ DOCKER_SERVICE_CIDRS }'] + pods: + cidrBlocks: ['${ DOCKER_POD_CIDRS }'] + serviceDomain: '${ DOCKER_SERVICE_DOMAIN }' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerCluster + name: '${ CLUSTER_NAME }' + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${ CLUSTER_NAME }-control-plane" +spec: + replicas: ${ CONTROL_PLANE_MACHINE_COUNT } + infrastructureTemplate: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: {enable-hostpath-provisioner: 'true'} + apiServer: + certSANs: [localhost, 127.0.0.1] + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} + version: "${KUBERNETES_VERSION}" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-0" +data: ${CNI_RESOURCES} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-0" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-0" + resources: + - name: "cni-${CLUSTER_NAME}-crs-0" + kind: ConfigMap +--- +apiVersion: exp.cluster.x-k8s.io/v1alpha3 +kind: MachinePool +metadata: + name: "${ CLUSTER_NAME }-mp-0" +spec: + clusterName: '${ CLUSTER_NAME }' + replicas: ${ WORKER_MACHINE_COUNT } + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + name: "${ CLUSTER_NAME }-mp-0-config" + clusterName: '${ CLUSTER_NAME }' + infrastructureRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachinePool + name: "${ CLUSTER_NAME }-dmp-0" + version: "${KUBERNETES_VERSION}" +--- +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachinePool +metadata: + name: "${ CLUSTER_NAME }-dmp-0" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfig +metadata: + name: "${ CLUSTER_NAME }-mp-0-config" +spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% diff --git a/test/e2e/kcp_upgrade.go b/test/e2e/kcp_upgrade.go index 90c989ffc265..136fea7f6288 100644 --- a/test/e2e/kcp_upgrade.go +++ b/test/e2e/kcp_upgrade.go @@ -45,11 +45,11 @@ type KCPUpgradeSpecInput struct { // KCPUpgradeSpec implements a test that verifies KCP to properly upgrade a control plane with 3 machines. func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) { var ( - specName = "kcp-upgrade" - input KCPUpgradeSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - result *clusterctl.ApplyClusterTemplateAndWaitResult + specName = "kcp-upgrade" + input KCPUpgradeSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult ) BeforeEach(func() { @@ -71,7 +71,7 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) It("Should successfully upgrade Kubernetes, DNS, kube-proxy, and etcd in a single control plane cluster", func() { By("Creating a workload cluster") - result = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), @@ -93,8 +93,8 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) By("Upgrading Kubernetes, DNS, kube-proxy, and etcd versions") framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{ ClusterProxy: input.BootstrapClusterProxy, - Cluster: result.Cluster, - ControlPlane: result.ControlPlane, + Cluster: clusterResources.Cluster, + ControlPlane: clusterResources.ControlPlane, EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo), DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo), KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), @@ -110,7 +110,7 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) By("Creating a workload cluster") - result = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), @@ -132,8 +132,8 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) By("Upgrading Kubernetes, DNS, kube-proxy, and etcd versions") framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{ ClusterProxy: input.BootstrapClusterProxy, - Cluster: result.Cluster, - ControlPlane: result.ControlPlane, + Cluster: clusterResources.Cluster, + ControlPlane: clusterResources.ControlPlane, EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo), DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo), KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), @@ -147,6 +147,6 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, result.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/machine_pool.go b/test/e2e/machine_pool.go new file mode 100644 index 000000000000..714896bb98cb --- /dev/null +++ b/test/e2e/machine_pool.go @@ -0,0 +1,106 @@ +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +const ( + MinNumberOfReplicas = "MIN_NUMBER_OF_REPLICAS" + MaxNumberOfReplicas = "MAX_NUMBER_OF_REPLICAS" +) + +// MachinePoolInput is the input for MachinePoolSpec +type MachinePoolInput struct { + E2EConfig *clusterctl.E2EConfig + ClusterctlConfigPath string + BootstrapClusterProxy framework.ClusterProxy + ArtifactFolder string + SkipCleanup bool +} + +// MachinePoolSpec implements a test that verifies MachinePool scale up, down and version update +func MachinePoolSpec(ctx context.Context, inputGetter func() MachinePoolInput) { + var ( + specName = "machine-pool" + input MachinePoolInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeTo)) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeFrom)) + Expect(input.E2EConfig.Variables).To(HaveKey(MinNumberOfReplicas)) + Expect(input.E2EConfig.Variables).To(HaveKey(MaxNumberOfReplicas)) + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) + }) + + It("Should successfully create a cluster with machine pool machines", func() { + By("Creating a workload cluster") + clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: "machine-pool", + Namespace: namespace.Name, + ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom), + ControlPlaneMachineCount: pointer.Int64Ptr(1), + WorkerMachineCount: pointer.Int64Ptr(2), + }, + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + }) + + By("PASSED!") + }) + + AfterEach(func() { + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + }) +} diff --git a/test/e2e/machine_pool_test.go b/test/e2e/machine_pool_test.go new file mode 100644 index 000000000000..5b0b408d0003 --- /dev/null +++ b/test/e2e/machine_pool_test.go @@ -0,0 +1,39 @@ +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + + . "github.com/onsi/ginkgo" +) + +var _ = FDescribe("When testing MachinePools", func() { + + MachinePoolSpec(context.TODO(), func() MachinePoolInput { + return MachinePoolInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + } + }) + +}) diff --git a/test/e2e/md_upgrades.go b/test/e2e/md_upgrades.go index c2c8795c8659..01e317e5836c 100644 --- a/test/e2e/md_upgrades.go +++ b/test/e2e/md_upgrades.go @@ -45,11 +45,11 @@ type MachineDeploymentUpgradesSpecInput struct { // MachineDeploymentUpgradesSpec implements a test that verifies that MachineDeployment upgrades are successful. func MachineDeploymentUpgradesSpec(ctx context.Context, inputGetter func() MachineDeploymentUpgradesSpecInput) { var ( - specName = "md-upgrades" - input MachineDeploymentUpgradesSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - result *clusterctl.ApplyClusterTemplateAndWaitResult + specName = "md-upgrades" + input MachineDeploymentUpgradesSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult ) BeforeEach(func() { @@ -71,7 +71,7 @@ func MachineDeploymentUpgradesSpec(ctx context.Context, inputGetter func() Machi It("Should successfully upgrade Machines upon changes in relevant MachineDeployment fields", func() { By("Creating a workload cluster") - result = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), @@ -93,24 +93,24 @@ func MachineDeploymentUpgradesSpec(ctx context.Context, inputGetter func() Machi By("Upgrading MachineDeployment's Kubernetes version to a valid version") framework.UpgradeMachineDeploymentsAndWait(context.TODO(), framework.UpgradeMachineDeploymentsAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, - Cluster: result.Cluster, + Cluster: clusterResources.Cluster, UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersion), WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), - MachineDeployments: result.MachineDeployments, + MachineDeployments: clusterResources.MachineDeployments, }) By("Upgrading MachineDeployment Infrastructure ref and wait for rolling upgrade") framework.UpgradeMachineDeploymentInfrastructureRefAndWait(context.TODO(), framework.UpgradeMachineDeploymentInfrastructureRefAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, - Cluster: result.Cluster, + Cluster: clusterResources.Cluster, WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), - MachineDeployments: result.MachineDeployments, + MachineDeployments: clusterResources.MachineDeployments, }) By("PASSED!") }) AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, result.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/mhc_remediations.go b/test/e2e/mhc_remediations.go index c3c352030350..402e01fdf40a 100644 --- a/test/e2e/mhc_remediations.go +++ b/test/e2e/mhc_remediations.go @@ -45,11 +45,11 @@ type MachineRemediationSpecInput struct { // MachineRemediationSpec implements a test that verifies that Machines are remediated by MHC during unhealthy conditions. func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemediationSpecInput) { var ( - specName = "mhc-remediation" - input MachineRemediationSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - result *clusterctl.ApplyClusterTemplateAndWaitResult + specName = "mhc-remediation" + input MachineRemediationSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult ) BeforeEach(func() { @@ -69,7 +69,7 @@ func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemed By("Creating a workload cluster") - result = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), @@ -91,7 +91,7 @@ func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemed By("Waiting for MachineHealthCheck remediation") framework.DiscoverMachineHealthChecksAndWaitForRemediation(ctx, framework.DiscoverMachineHealthCheckAndWaitForRemediationInput{ ClusterProxy: input.BootstrapClusterProxy, - Cluster: result.Cluster, + Cluster: clusterResources.Cluster, WaitForMachineRemediation: input.E2EConfig.GetIntervals(specName, "wait-machine-remediation"), }) @@ -100,6 +100,6 @@ func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemed AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, result.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/quick_start.go b/test/e2e/quick_start.go index e11137567ec1..00bea159179d 100644 --- a/test/e2e/quick_start.go +++ b/test/e2e/quick_start.go @@ -47,11 +47,11 @@ type QuickStartSpecInput struct { // This test is meant to provide a first, fast signal to detect regression; it is recommended to use it as a PR blocker test. func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) { var ( - specName = "quick-start" - input QuickStartSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - result *clusterctl.ApplyClusterTemplateAndWaitResult + specName = "quick-start" + input QuickStartSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult ) BeforeEach(func() { @@ -72,7 +72,7 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) By("Creating a workload cluster") - result = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), @@ -96,6 +96,6 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, result.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/self_hosted.go b/test/e2e/self_hosted.go index 7be74d4c6ef2..cd444542ba04 100644 --- a/test/e2e/self_hosted.go +++ b/test/e2e/self_hosted.go @@ -47,11 +47,11 @@ type SelfHostedSpecInput struct { // SelfHostedSpec implements a test that verifies Cluster API creating a cluster, pivoting to a self-hosted cluster. func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) { var ( - specName = "self-hosted" - input SelfHostedSpecInput - namespace *corev1.Namespace - cancelWatches context.CancelFunc - result *clusterctl.ApplyClusterTemplateAndWaitResult + specName = "self-hosted" + input SelfHostedSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult selfHostedClusterProxy framework.ClusterProxy selfHostedNamespace *corev1.Namespace @@ -76,7 +76,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) By("Creating a workload cluster") - result = clusterctl.ApplyClusterTemplateAndWait(context.TODO(), clusterctl.ApplyClusterTemplateAndWaitInput{ + clusterResources = clusterctl.ApplyClusterTemplateAndWait(context.TODO(), clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), @@ -101,7 +101,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) // In case of the cluster id a DockerCluster, we should load controller images into the nodes. // Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using // this approach because this allows to have a single source of truth for images, the e2e config - cluster := result.Cluster + cluster := clusterResources.Cluster if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" { bootstrap.LoadImagesToKindCluster(context.TODO(), bootstrap.LoadImagesToKindClusterInput{ Name: cluster.Name, @@ -162,13 +162,13 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{ Lister: selfHostedClusterProxy.GetClient(), Namespace: namespace.Name, - LogPath: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name, "resources"), + LogPath: filepath.Join(input.ArtifactFolder, "clusters", clusterResources.Cluster.Name, "resources"), }) } if selfHostedCluster != nil { By("Moving the cluster back to bootstrap") clusterctl.Move(ctx, clusterctl.MoveInput{ - LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name), + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", clusterResources.Cluster.Name), ClusterctlConfigPath: input.ClusterctlConfigPath, FromKubeconfigPath: selfHostedClusterProxy.GetKubeconfigPath(), ToKubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), @@ -176,10 +176,10 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) }) log.Logf("Waiting for the cluster infrastructure to be provisioned") - result.Cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{ + clusterResources.Cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{ Getter: input.BootstrapClusterProxy.GetClient(), Namespace: namespace.Name, - Name: result.Cluster.Name, + Name: clusterResources.Cluster.Name, }, input.E2EConfig.GetIntervals(specName, "wait-cluster")...) } if selfHostedCancelWatches != nil { @@ -187,6 +187,6 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) } // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, result.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/framework/machinedeployment_helpers.go b/test/framework/machinedeployment_helpers.go index 78466b32859a..9821dd1e3bb9 100644 --- a/test/framework/machinedeployment_helpers.go +++ b/test/framework/machinedeployment_helpers.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" "sigs.k8s.io/cluster-api/test/framework/internal/log" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" @@ -56,8 +55,8 @@ func CreateMachineDeployment(ctx context.Context, input CreateMachineDeploymentI Expect(input.Creator.Create(ctx, input.InfraMachineTemplate)).To(Succeed()) } -// GetResourcesByClusterInput is the input for GetMachineDeploymentsByCluster and GetMachinePoolsByCluster. -type GetResourcesByClusterInput struct { +// GetMachineDeploymentsByClusterInput is the input for GetMachineDeploymentsByCluster +type GetMachineDeploymentsByClusterInput struct { Lister Lister ClusterName string Namespace string @@ -66,7 +65,7 @@ type GetResourcesByClusterInput struct { // GetMachineDeploymentsByCluster returns the MachineDeployments objects for a cluster. // Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so // it is necessary to ensure this is already happened before calling it. -func GetMachineDeploymentsByCluster(ctx context.Context, input GetResourcesByClusterInput) []*clusterv1.MachineDeployment { +func GetMachineDeploymentsByCluster(ctx context.Context, input GetMachineDeploymentsByClusterInput) []*clusterv1.MachineDeployment { deploymentList := &clusterv1.MachineDeploymentList{} Expect(input.Lister.List(ctx, deploymentList, byClusterOptions(input.ClusterName, input.Namespace)...)).To(Succeed(), "Failed to list MachineDeployments object for Cluster %s/%s", input.Namespace, input.ClusterName) @@ -77,20 +76,6 @@ func GetMachineDeploymentsByCluster(ctx context.Context, input GetResourcesByClu return deployments } -// GetMachinePoolsByCluster returns the MachinePools objects for a cluster. -// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so -// it is necessary to ensure this is already happened before calling it. -func GetMachinePoolsByCluster(ctx context.Context, input GetResourcesByClusterInput) []*clusterv1exp.MachinePool { - mpList := &clusterv1exp.MachinePoolList{} - Expect(input.Lister.List(ctx, mpList, byClusterOptions(input.ClusterName, input.Namespace)...)).To(Succeed(), "Failed to list MachinePools object for Cluster %s/%s", input.Namespace, input.ClusterName) - - mps := make([]*clusterv1exp.MachinePool, len(mpList.Items)) - for i := range mpList.Items { - mps[i] = &mpList.Items[i] - } - return mps -} - // WaitForMachineDeploymentNodesToExistInput is the input for WaitForMachineDeploymentNodesToExist. type WaitForMachineDeploymentNodesToExistInput struct { Lister Lister @@ -136,37 +121,6 @@ func WaitForMachineDeploymentNodesToExist(ctx context.Context, input WaitForMach }, intervals...).Should(Equal(int(*input.MachineDeployment.Spec.Replicas))) } -// WaitForMachinePoolNodesToExistInput is the input for WaitForMachinePoolNodesToExist. -type WaitForMachinePoolNodesToExistInput struct { - Getter Getter - MachinePool *clusterv1exp.MachinePool -} - -// WaitForMachinePoolNodesToExist waits until all nodes associated with a machine pool exist. -func WaitForMachinePoolNodesToExist(ctx context.Context, input WaitForMachinePoolNodesToExistInput, intervals ...interface{}) { - Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachinePoolNodesToExist") - Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForMachinePoolNodesToExist") - Expect(input.MachinePool).ToNot(BeNil(), "Invalid argument. input.MachinePool can't be nil when calling WaitForMachinePoolNodesToExist") - - By("waiting for the machine pool workload nodes to exist") - Eventually(func() (int, error) { - nn := client.ObjectKey{ - Namespace: input.MachinePool.Namespace, - Name: input.MachinePool.Name, - } - - if err := input.Getter.Get(ctx, nn, input.MachinePool); err != nil { - return 0, err - } - - if input.MachinePool.Status.NodeRefs != nil { - return len(input.MachinePool.Status.NodeRefs), nil - } - - return 0, nil - }, intervals...).Should(Equal(int(*input.MachinePool.Spec.Replicas))) -} - // DiscoveryAndWaitForMachineDeploymentsInput is the input type for DiscoveryAndWaitForMachineDeployments. type DiscoveryAndWaitForMachineDeploymentsInput struct { Lister Lister @@ -179,7 +133,7 @@ func DiscoveryAndWaitForMachineDeployments(ctx context.Context, input DiscoveryA Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForMachineDeployments") Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoveryAndWaitForMachineDeployments") - machineDeployments := GetMachineDeploymentsByCluster(ctx, GetResourcesByClusterInput{ + machineDeployments := GetMachineDeploymentsByCluster(ctx, GetMachineDeploymentsByClusterInput{ Lister: input.Lister, ClusterName: input.Cluster.Name, Namespace: input.Cluster.Namespace, @@ -194,33 +148,6 @@ func DiscoveryAndWaitForMachineDeployments(ctx context.Context, input DiscoveryA return machineDeployments } -// DiscoveryAndWaitForMachinePoolsInput is the input type for DiscoveryAndWaitForMachinePools. -type DiscoveryAndWaitForMachinePoolsInput struct { - Getter Getter - Lister Lister - Cluster *clusterv1.Cluster -} - -// DiscoveryAndWaitForMachinePools discovers the MachinePools existing in a cluster and waits for them to be ready (all the machine provisioned). -func DiscoveryAndWaitForMachinePools(ctx context.Context, input DiscoveryAndWaitForMachinePoolsInput, intervals ...interface{}) []*clusterv1exp.MachinePool { - Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForMachinePools") - Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForMachineDeployments") - Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoveryAndWaitForMachineDeployments") - - machinePools := GetMachinePoolsByCluster(ctx, GetResourcesByClusterInput{ - Lister: input.Lister, - ClusterName: input.Cluster.Name, - Namespace: input.Cluster.Namespace, - }) - for _, machinepool := range machinePools { - WaitForMachinePoolNodesToExist(ctx, WaitForMachinePoolNodesToExistInput{ - Getter: input.Getter, - MachinePool: machinepool, - }, intervals...) - } - return machinePools -} - // UpgradeMachineDeploymentsAndWaitInput is the input type for UpgradeMachineDeploymentsAndWait. type UpgradeMachineDeploymentsAndWaitInput struct { ClusterProxy ClusterProxy diff --git a/test/framework/machinepool_helpers.go b/test/framework/machinepool_helpers.go new file mode 100644 index 000000000000..b45da19d41bb --- /dev/null +++ b/test/framework/machinepool_helpers.go @@ -0,0 +1,103 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetMachinePoolsByClusterInput is the input for GetMachinePoolsByCluster +type GetMachinePoolsByClusterInput struct { + Lister Lister + ClusterName string + Namespace string +} + +// GetMachinePoolsByCluster returns the MachinePools objects for a cluster. +// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so +// it is necessary to ensure this is already happened before calling it. +func GetMachinePoolsByCluster(ctx context.Context, input GetMachinePoolsByClusterInput) []*clusterv1exp.MachinePool { + mpList := &clusterv1exp.MachinePoolList{} + Expect(input.Lister.List(ctx, mpList, byClusterOptions(input.ClusterName, input.Namespace)...)).To(Succeed(), "Failed to list MachinePools object for Cluster %s/%s", input.Namespace, input.ClusterName) + + mps := make([]*clusterv1exp.MachinePool, len(mpList.Items)) + for i := range mpList.Items { + mps[i] = &mpList.Items[i] + } + return mps +} + +// WaitForMachinePoolNodesToExistInput is the input for WaitForMachinePoolNodesToExist. +type WaitForMachinePoolNodesToExistInput struct { + Getter Getter + MachinePool *clusterv1exp.MachinePool +} + +// WaitForMachinePoolNodesToExist waits until all nodes associated with a machine pool exist. +func WaitForMachinePoolNodesToExist(ctx context.Context, input WaitForMachinePoolNodesToExistInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachinePoolNodesToExist") + Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForMachinePoolNodesToExist") + Expect(input.MachinePool).ToNot(BeNil(), "Invalid argument. input.MachinePool can't be nil when calling WaitForMachinePoolNodesToExist") + + By("waiting for the machine pool workload nodes to exist") + Eventually(func() (int, error) { + nn := client.ObjectKey{ + Namespace: input.MachinePool.Namespace, + Name: input.MachinePool.Name, + } + + if err := input.Getter.Get(ctx, nn, input.MachinePool); err != nil { + return 0, err + } + + return int(input.MachinePool.Status.ReadyReplicas), nil + }, intervals...).Should(Equal(int(*input.MachinePool.Spec.Replicas))) +} + +// DiscoveryAndWaitForMachinePoolsInput is the input type for DiscoveryAndWaitForMachinePools. +type DiscoveryAndWaitForMachinePoolsInput struct { + Getter Getter + Lister Lister + Cluster *clusterv1.Cluster +} + +// DiscoveryAndWaitForMachinePools discovers the MachinePools existing in a cluster and waits for them to be ready (all the machine provisioned). +func DiscoveryAndWaitForMachinePools(ctx context.Context, input DiscoveryAndWaitForMachinePoolsInput, intervals ...interface{}) []*clusterv1exp.MachinePool { + Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForMachinePools") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForMachineDeployments") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoveryAndWaitForMachineDeployments") + + machinePools := GetMachinePoolsByCluster(ctx, GetMachinePoolsByClusterInput{ + Lister: input.Lister, + ClusterName: input.Cluster.Name, + Namespace: input.Cluster.Namespace, + }) + for _, machinepool := range machinePools { + WaitForMachinePoolNodesToExist(ctx, WaitForMachinePoolNodesToExistInput{ + Getter: input.Getter, + MachinePool: machinepool, + }, intervals...) + } + return machinePools +} diff --git a/test/infrastructure/docker/Makefile b/test/infrastructure/docker/Makefile index 0d75fec84584..88b0a7606243 100644 --- a/test/infrastructure/docker/Makefile +++ b/test/infrastructure/docker/Makefile @@ -36,6 +36,7 @@ export DOCKER_CLI_EXPERIMENTAL := enabled TOOLS_DIR := hack/tools TOOLS_BIN_DIR := $(TOOLS_DIR)/bin BIN_DIR := bin +EXP_DIR := exp # Binaries. CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen @@ -108,7 +109,8 @@ generate: $(CONTROLLER_GEN) ## Generate code generate-go: $(CONTROLLER_GEN) $(CONVERSION_GEN) ## Runs Go related generate targets $(CONTROLLER_GEN) \ object:headerFile=$(ROOT)/hack/boilerplate/boilerplate.generatego.txt \ - paths=./api/... + paths=./api/... \ + paths=./$(EXP_DIR)/api/... $(CONVERSION_GEN) \ --input-dirs=./api/v1alpha3 \ --output-file-base=zz_generated.conversion \ @@ -118,6 +120,8 @@ generate-go: $(CONTROLLER_GEN) $(CONVERSION_GEN) ## Runs Go related generate tar generate-manifests: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. $(CONTROLLER_GEN) \ paths=./api/... \ + paths=./$(EXP_DIR)/api/... \ + paths=./$(EXP_DIR)/controllers/... \ paths=./controllers/... \ crd:crdVersions=v1 \ rbac:roleName=manager-role \ diff --git a/test/infrastructure/docker/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_dockermachinepools.yaml b/test/infrastructure/docker/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_dockermachinepools.yaml new file mode 100644 index 000000000000..88959d6c2293 --- /dev/null +++ b/test/infrastructure/docker/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_dockermachinepools.yaml @@ -0,0 +1,200 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: dockermachinepools.exp.infrastructure.cluster.x-k8s.io +spec: + group: exp.infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: DockerMachinePool + listKind: DockerMachinePoolList + plural: dockermachinepools + singular: dockermachinepool + scope: Namespaced + versions: + - name: v1alpha3 + schema: + openAPIV3Schema: + description: DockerMachinePool is the Schema for the dockermachinepools API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DockerMachinePoolSpec defines the desired state of DockerMachinePool + properties: + providerID: + description: ProviderID is the identification ID of the Machine Pool + type: string + providerIDList: + description: ProviderIDList are the identification IDs of Machine + Pool machineq instances + items: + type: string + type: array + template: + description: Template contains the details used to build a replica + machine within the Machine Pool + properties: + customImage: + description: CustomImage allows customizing the container image + that is used for running the machine + type: string + extraMounts: + description: ExtraMounts describes additional mount points for + the node container These may be used to bind a hostPath + items: + description: Mount specifies a host volume to mount into a container. + This is a simplified version of kind v1alpha4.Mount types + properties: + containerPath: + description: Path of the mount within the container. + type: string + hostPath: + description: Path of the mount on the host. If the hostPath + doesn't exist, then runtimes should report error. If the + hostpath is a symbolic link, runtimes should follow the + symlink and mount the real destination to container. + type: string + readOnly: + description: If set, the mount is read-only. + type: boolean + type: object + type: array + preLoadImages: + description: PreLoadImages allows to pre-load images in a newly + created machine. This can be used to speed up tests by avoiding + e.g. to download CNI images on all the containers. + items: + type: string + type: array + type: object + type: object + status: + description: DockerMachinePoolStatus defines the observed state of DockerMachinePool + properties: + conditions: + description: Conditions defines current service state of the DockerMachinePool. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. The specific API may choose whether or not this + field is considered a guaranteed API. This field may not be + empty. + type: string + severity: + description: Severity provides an explicit classification of + Reason code, so the users or machines can immediately understand + the current situation and act accordingly. The Severity field + MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. + type: string + required: + - status + - type + type: object + type: array + machines: + description: Machines contains the status for each machine in the + pool + items: + properties: + addresses: + description: Addresses contains the associated addresses for + the docker machine. + items: + description: MachineAddress contains information for the node's + address. + properties: + address: + description: The machine address. + type: string + type: + description: Machine address type, one of Hostname, ExternalIP + or InternalIP. + type: string + required: + - address + - type + type: object + type: array + bootstrapped: + description: Bootstrapped is true when the kubeadm bootstrapping + has been run against this machine + type: boolean + instanceID: + description: InstanceID is the identification of the Machine + Instance within the Machine Pool + format: int32 + type: integer + providerID: + description: ProviderID is the provider identification of the + Machine Pool Instance + type: string + ready: + description: Ready denotes that the machine (docker container) + is ready + type: boolean + version: + description: Version defines the Kubernetes version for the + Machine Instance + type: string + type: object + type: array + observedGeneration: + description: The generation observed by the deployment controller. + format: int64 + type: integer + ready: + description: Ready denotes that the machine pool is ready + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/test/infrastructure/docker/config/crd/kustomization.yaml b/test/infrastructure/docker/config/crd/kustomization.yaml index 257fa4b536de..8323fd3162e5 100644 --- a/test/infrastructure/docker/config/crd/kustomization.yaml +++ b/test/infrastructure/docker/config/crd/kustomization.yaml @@ -10,6 +10,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_dockermachines.yaml - bases/infrastructure.cluster.x-k8s.io_dockerclusters.yaml - bases/infrastructure.cluster.x-k8s.io_dockermachinetemplates.yaml +- bases/exp.infrastructure.cluster.x-k8s.io_dockermachinepools.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: [] diff --git a/test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepools.yaml b/test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepools.yaml new file mode 100644 index 000000000000..14bbeb5ca607 --- /dev/null +++ b/test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepools.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: dockermachinepools.exp.infrastructure.cluster.x-k8s.io diff --git a/test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepools.yaml b/test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepools.yaml new file mode 100644 index 000000000000..6f25a71cd84f --- /dev/null +++ b/test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepools.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: dockermachinepools.exp.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert \ No newline at end of file diff --git a/test/infrastructure/docker/config/manager/manager_auth_proxy_patch.yaml b/test/infrastructure/docker/config/manager/manager_auth_proxy_patch.yaml index 6c8f9cebdf34..42d3f1771a34 100644 --- a/test/infrastructure/docker/config/manager/manager_auth_proxy_patch.yaml +++ b/test/infrastructure/docker/config/manager/manager_auth_proxy_patch.yaml @@ -21,5 +21,6 @@ spec: name: https - name: manager args: + - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=false}" - "--metrics-addr=0" - "-v=4" diff --git a/test/infrastructure/docker/config/rbac/role.yaml b/test/infrastructure/docker/config/rbac/role.yaml index 18b12e9d8e05..e38c761daa26 100644 --- a/test/infrastructure/docker/config/rbac/role.yaml +++ b/test/infrastructure/docker/config/rbac/role.yaml @@ -14,6 +14,14 @@ rules: - get - list - watch +- apiGroups: + - exp.cluster.x-k8s.io + resources: + - machinepools + verbs: + - get + - list + - watch - apiGroups: - cluster.x-k8s.io resources: @@ -63,3 +71,23 @@ rules: - get - patch - update +- apiGroups: + - exp.infrastructure.cluster.x-k8s.io + resources: + - dockermachinepools + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - exp.infrastructure.cluster.x-k8s.io + resources: + - dockermachinepools/status + verbs: + - get + - patch + - update \ No newline at end of file diff --git a/test/infrastructure/docker/examples/machine-pool.yaml b/test/infrastructure/docker/examples/machine-pool.yaml new file mode 100644 index 000000000000..de5e76641ef0 --- /dev/null +++ b/test/infrastructure/docker/examples/machine-pool.yaml @@ -0,0 +1,102 @@ +# Creates a cluster with one control-plane node and one worker node +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: my-cluster + namespace: default +spec: + clusterNetwork: + services: + cidrBlocks: ["10.96.0.0/12"] + pods: + cidrBlocks: ["192.168.0.0/16"] + serviceDomain: cluster.local + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + name: controlplane + namespace: default + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerCluster + name: my-cluster + namespace: default +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: KubeadmControlPlane +metadata: + name: controlplane + namespace: default +spec: + replicas: 1 + version: v1.18.8 + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachineTemplate + name: controlplane + namespace: default + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: + enable-hostpath-provisioner: true + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerCluster +metadata: + name: my-cluster + namespace: default +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate +metadata: + name: controlplane + namespace: default +spec: + template: + spec: {} +--- +apiVersion: exp.cluster.x-k8s.io/v1alpha3 +kind: MachinePool +metadata: + name: worker-mp-0 + namespace: default +spec: + clusterName: my-cluster + replicas: 2 + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfig + name: worker-mp-0-config + namespace: default + clusterName: my-cluster + infrastructureRef: + apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachinePool + name: worker-dmp-0 + namespace: default + version: v1.18.8 +--- +apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachinePool +metadata: + name: worker-dmp-0 + namespace: default +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfig +metadata: + name: worker-mp-0-config + namespace: default +spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% diff --git a/test/infrastructure/docker/exp/PROJECT b/test/infrastructure/docker/exp/PROJECT new file mode 100644 index 000000000000..30c1be97a308 --- /dev/null +++ b/test/infrastructure/docker/exp/PROJECT @@ -0,0 +1,7 @@ +domain: cluster.x-k8s.io +repo: sigs.k8s.io/cluster-api/test/infrastructure/docker/exp +resources: +- group: exp.infrastructure + kind: DockerMachinePool + version: v1alpha3 +version: "2" diff --git a/test/infrastructure/docker/exp/README.md b/test/infrastructure/docker/exp/README.md new file mode 100644 index 000000000000..b819cf94b89d --- /dev/null +++ b/test/infrastructure/docker/exp/README.md @@ -0,0 +1,21 @@ +# exp + +This subrepository holds experimental code and API types. + +**Warning**: Packages here are experimental and unreliable. Some may one day be promoted to the main repository, or they may be modified arbitrarily or even disappear altogether. + +In short, code in this subrepository is not subject to any compatibility or deprecation promise. + +Experiments follow a strict lifecycle: Alpha -> Beta prior to Graduation. + +For more information on graduation criteria, see: [Contributing Guidelines](../CONTRIBUTING.md#experiments) + +## Active Features + DockerMachinePool (alpha) + +## Create a new Resource +Below is an example of creating a `DockerMachinePool` resource in the experimental group. +``` +kubebuilder create api --kind DockerMachinePool --group exp.infrastructure --version v1alpha3 \ + --controller=true --resource=true --make=false +``` \ No newline at end of file diff --git a/test/infrastructure/docker/exp/api/v1alpha3/dockermachinepool_types.go b/test/infrastructure/docker/exp/api/v1alpha3/dockermachinepool_types.go new file mode 100644 index 000000000000..0b052b32f44f --- /dev/null +++ b/test/infrastructure/docker/exp/api/v1alpha3/dockermachinepool_types.go @@ -0,0 +1,143 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" +) + +const ( + // MachinePoolFinalizer allows ReconcileDockerMachinePool to clean up resources + MachinePoolFinalizer = "dockermachinepool.infrastructure.cluster.x-k8s.io" +) + +// DockerMachineTemplate defines the desired state of DockerMachine +type DockerMachineTemplate struct { + // CustomImage allows customizing the container image that is used for + // running the machine + // +optional + CustomImage string `json:"customImage,omitempty"` + + // PreLoadImages allows to pre-load images in a newly created machine. This can be used to + // speed up tests by avoiding e.g. to download CNI images on all the containers. + // +optional + PreLoadImages []string `json:"preLoadImages,omitempty"` + + // ExtraMounts describes additional mount points for the node container + // These may be used to bind a hostPath + // +optional + ExtraMounts []infrav1.Mount `json:"extraMounts,omitempty"` +} + +// DockerMachinePoolSpec defines the desired state of DockerMachinePool +type DockerMachinePoolSpec struct { + // Template contains the details used to build a replica machine within the Machine Pool + // +optional + Template DockerMachineTemplate `json:"template"` + + // ProviderID is the identification ID of the Machine Pool + // +optional + ProviderID string `json:"providerID,omitempty"` + + // ProviderIDList are the identification IDs of Machine Pool machineq instances + //+optional + ProviderIDList []string `json:"providerIDList,omitempty"` +} + +// DockerMachinePoolStatus defines the observed state of DockerMachinePool +type DockerMachinePoolStatus struct { + // Ready denotes that the machine pool is ready + // +optional + Ready bool `json:"ready"` + + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Machines contains the status for each machine in the pool + // +optional + Machines []*DockerMachinePoolInstanceStatus `json:"machines,omitempty"` + + // Conditions defines current service state of the DockerMachinePool. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +type DockerMachinePoolInstanceStatus struct { + // Addresses contains the associated addresses for the docker machine. + // +optional + Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + + // InstanceID is the identification of the Machine Instance within the Machine Pool + // +optional + InstanceID *int32 `json:"instanceID,omitempty"` + + // ProviderID is the provider identification of the Machine Pool Instance + // +optional + ProviderID *string `json:"providerID,omitempty"` + + // Version defines the Kubernetes version for the Machine Instance + // +optional + Version *string `json:"version,omitempty"` + + // Ready denotes that the machine (docker container) is ready + // +optional + Ready bool `json:"ready"` + + // Bootstrapped is true when the kubeadm bootstrapping has been run + // against this machine + // +optional + Bootstrapped bool `json:"bootstrapped,omitempty"` +} + +// +kubebuilder:resource:path=dockermachinepools,scope=Namespaced,categories=cluster-api +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// DockerMachinePool is the Schema for the dockermachinepools API +type DockerMachinePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DockerMachinePoolSpec `json:"spec,omitempty"` + Status DockerMachinePoolStatus `json:"status,omitempty"` +} + +func (c *DockerMachinePool) GetConditions() clusterv1.Conditions { + return c.Status.Conditions +} + +func (c *DockerMachinePool) SetConditions(conditions clusterv1.Conditions) { + c.Status.Conditions = conditions +} + +// +kubebuilder:object:root=true + +// DockerMachinePoolList contains a list of DockerMachinePool +type DockerMachinePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DockerMachinePool `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DockerMachinePool{}, &DockerMachinePoolList{}) +} diff --git a/test/infrastructure/docker/exp/api/v1alpha3/groupversion_info.go b/test/infrastructure/docker/exp/api/v1alpha3/groupversion_info.go new file mode 100644 index 000000000000..6a6a8d3c9fcb --- /dev/null +++ b/test/infrastructure/docker/exp/api/v1alpha3/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha3 contains API Schema definitions for the exp.infrastructure v1alpha3 API group +// +kubebuilder:object:generate=true +// +groupName=exp.infrastructure.cluster.x-k8s.io +package v1alpha3 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "exp.infrastructure.cluster.x-k8s.io", Version: "v1alpha3"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.deepcopy.go b/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.deepcopy.go new file mode 100644 index 000000000000..293faf4a6005 --- /dev/null +++ b/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.deepcopy.go @@ -0,0 +1,200 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + cluster_apiapiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" + apiv1alpha3 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePool) DeepCopyInto(out *DockerMachinePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePool. +func (in *DockerMachinePool) DeepCopy() *DockerMachinePool { + if in == nil { + return nil + } + out := new(DockerMachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerMachinePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolInstanceStatus) DeepCopyInto(out *DockerMachinePoolInstanceStatus) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]cluster_apiapiv1alpha3.MachineAddress, len(*in)) + copy(*out, *in) + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(int32) + **out = **in + } + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolInstanceStatus. +func (in *DockerMachinePoolInstanceStatus) DeepCopy() *DockerMachinePoolInstanceStatus { + if in == nil { + return nil + } + out := new(DockerMachinePoolInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolList) DeepCopyInto(out *DockerMachinePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DockerMachinePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolList. +func (in *DockerMachinePoolList) DeepCopy() *DockerMachinePoolList { + if in == nil { + return nil + } + out := new(DockerMachinePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerMachinePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolSpec) DeepCopyInto(out *DockerMachinePoolSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.ProviderIDList != nil { + in, out := &in.ProviderIDList, &out.ProviderIDList + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolSpec. +func (in *DockerMachinePoolSpec) DeepCopy() *DockerMachinePoolSpec { + if in == nil { + return nil + } + out := new(DockerMachinePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolStatus) DeepCopyInto(out *DockerMachinePoolStatus) { + *out = *in + if in.Machines != nil { + in, out := &in.Machines, &out.Machines + *out = make([]*DockerMachinePoolInstanceStatus, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(DockerMachinePoolInstanceStatus) + (*in).DeepCopyInto(*out) + } + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(cluster_apiapiv1alpha3.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolStatus. +func (in *DockerMachinePoolStatus) DeepCopy() *DockerMachinePoolStatus { + if in == nil { + return nil + } + out := new(DockerMachinePoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachineTemplate) DeepCopyInto(out *DockerMachineTemplate) { + *out = *in + if in.PreLoadImages != nil { + in, out := &in.PreLoadImages, &out.PreLoadImages + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]apiv1alpha3.Mount, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachineTemplate. +func (in *DockerMachineTemplate) DeepCopy() *DockerMachineTemplate { + if in == nil { + return nil + } + out := new(DockerMachineTemplate) + in.DeepCopyInto(out) + return out +} diff --git a/test/infrastructure/docker/exp/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/controllers/dockermachinepool_controller.go new file mode 100644 index 000000000000..12cd3b90897d --- /dev/null +++ b/test/infrastructure/docker/exp/controllers/dockermachinepool_controller.go @@ -0,0 +1,420 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/base64" + "fmt" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api/test/infrastructure/docker/docker" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/kind/pkg/cluster/constants" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + utilexp "sigs.k8s.io/cluster-api/exp/util" + infrav1exp "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/predicates" +) + +type ( + // DockerMachinePoolReconciler reconciles a DockerMachinePool object + DockerMachinePoolReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + } + + TransientError struct { + InstanceName string + Reason string + } +) + +func (e *TransientError) Error() string { + return fmt.Sprintf("container addresses for instance %s due to %s", e.InstanceName, e.Reason) +} + +func (e *TransientError) Is(target error) bool { + _, ok := target.(*TransientError) + return ok +} + +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=dockermachinepools,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=dockermachinepools/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=exp.cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch +func (r *DockerMachinePoolReconciler) Reconcile(req ctrl.Request) (res ctrl.Result, rerr error) { + ctx := context.Background() + log := r.Log.WithName("dockermachinepool").WithValues("docker-machine-pool", req.NamespacedName) + + // Fetch the DockerMachinePool instance. + dockerMachinePool := &infrav1exp.DockerMachinePool{} + if err := r.Client.Get(ctx, req.NamespacedName, dockerMachinePool); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Fetch the MachinePool. + machinePool, err := utilexp.GetOwnerMachinePool(ctx, r.Client, dockerMachinePool.ObjectMeta) + if err != nil { + return ctrl.Result{}, err + } + if machinePool == nil { + log.Info("Waiting for MachinePool Controller to set OwnerRef on DockerMachinePool") + return ctrl.Result{}, nil + } + + log = log.WithValues("machine-pool", machinePool.Name) + + // Fetch the Cluster. + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) + if err != nil { + log.Info("DockerMachinePool owner MachinePool is missing cluster label or cluster does not exist") + return ctrl.Result{}, err + } + + if cluster == nil { + log.Info(fmt.Sprintf("Please associate this machine pool with a cluster using the label %s: ", clusterv1.ClusterLabelName)) + return ctrl.Result{}, nil + } + + log = log.WithValues("cluster", cluster.Name) + + // Initialize the patch helper + patchHelper, err := patch.NewHelper(dockerMachinePool, r) + if err != nil { + return ctrl.Result{}, err + } + + // Always attempt to Patch the DockerMachinePool object and status after each reconciliation. + defer func() { + if err := patchDockerMachinePool(ctx, patchHelper, dockerMachinePool); err != nil { + log.Error(err, "failed to patch DockerMachinePool") + if rerr == nil { + rerr = err + } + } + }() + + // Add finalizer first if not exist to avoid the race condition between init and delete + if !controllerutil.ContainsFinalizer(dockerMachinePool, infrav1exp.MachinePoolFinalizer) { + controllerutil.AddFinalizer(dockerMachinePool, infrav1exp.MachinePoolFinalizer) + return ctrl.Result{}, nil + } + + // Handle deleted machines + if !dockerMachinePool.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, cluster, machinePool, dockerMachinePool, log) + } + + // Handle non-deleted machines + return r.reconcileNormal(ctx, cluster, machinePool, dockerMachinePool, log) +} + +// SetupWithManager will add watches for this controller +func (r *DockerMachinePoolReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + clusterToDockerMachinePools, err := util.ClusterToObjectsMapper(mgr.GetClient(), &infrav1exp.DockerMachinePoolList{}, mgr.GetScheme()) + if err != nil { + return err + } + + c, err := ctrl.NewControllerManagedBy(mgr). + For(&infrav1exp.DockerMachinePool{}). + WithOptions(options). + WithEventFilter(predicates.ResourceNotPaused(r.Log)). + Watches( + &source.Kind{Type: &clusterv1exp.MachinePool{}}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: utilexp.MachinePoolToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind("DockerMachinePool"), r.Log), + }, + ). + Build(r) + if err != nil { + return err + } + return c.Watch( + &source.Kind{Type: &clusterv1.Cluster{}}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: clusterToDockerMachinePools, + }, + predicates.ClusterUnpausedAndInfrastructureReady(r.Log), + ) +} + +func (r *DockerMachinePoolReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1exp.MachinePool, dockerMachinePool *infrav1exp.DockerMachinePool, log logr.Logger) (ctrl.Result, error) { + // check if there are more machines than the spec requires + realMachineCount := int32(len(dockerMachinePool.Status.Machines)) + if realMachineCount > 0 && realMachineCount > *machinePool.Spec.Replicas { + for i := *machinePool.Spec.Replicas; i < realMachineCount; i++ { + if err := deleteDockerWorkerMachine(ctx, cluster.Name, i, dockerMachinePool, log); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to delete over-provisioned docker worker machine") + } + } + } + + controllerutil.RemoveFinalizer(dockerMachinePool, infrav1exp.MachinePoolFinalizer) + return ctrl.Result{}, nil +} + +func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1exp.MachinePool, dockerMachinePool *infrav1exp.DockerMachinePool, log logr.Logger) (ctrl.Result, error) { + // Make sure bootstrap data is available and populated. + if machinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { + log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") + return ctrl.Result{}, nil + } + + if machinePool.Spec.Replicas == nil { + machinePool.Spec.Replicas = pointer.Int32Ptr(1) + } + + needToRequeue := false + for i := int32(0); i < *machinePool.Spec.Replicas; i++ { + // Create a helper for managing the docker container hosting the machine. + if err := r.reconcileDockerWorkerMachine(ctx, cluster.Name, i, machinePool, dockerMachinePool, log); err != nil { + if !errors.Is(err, &TransientError{}) { + // something unexpected happened + return ctrl.Result{}, errors.Wrapf(err, "failed to create docker worker machine") + } + // ran into a transient error which should be resolved in the future + needToRequeue = true + } + } + + // check if there are more machines than the spec requires + realMachineCount := int32(len(dockerMachinePool.Status.Machines)) + if realMachineCount > 0 && realMachineCount > *machinePool.Spec.Replicas { + for i := *machinePool.Spec.Replicas; i < realMachineCount; i++ { + if err := deleteDockerWorkerMachine(ctx, cluster.Name, i, dockerMachinePool, log); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to delete over-provisioned docker worker machine") + } + } + } + + if dockerMachinePool.Spec.ProviderID == "" { + dockerMachinePool.Spec.ProviderID = getDockerMachinePoolProviderID(cluster.Name, dockerMachinePool.Name) + } + + dockerMachinePool.Spec.ProviderIDList = []string{} + for _, instance := range dockerMachinePool.Status.Machines { + if instance.ProviderID != nil && instance.Ready { + dockerMachinePool.Spec.ProviderIDList = append(dockerMachinePool.Spec.ProviderIDList, *instance.ProviderID) + } + } + + if needToRequeue { + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + + dockerMachinePool.Status.Ready = len(dockerMachinePool.Spec.ProviderIDList) == int(*machinePool.Spec.Replicas) + log.Info("success!", "obj", dockerMachinePool) + return ctrl.Result{}, nil +} + +func (r *DockerMachinePoolReconciler) reconcileDockerWorkerMachine(ctx context.Context, clusterName string, instanceID int32, machinePool *clusterv1exp.MachinePool, dockerMachinePool *infrav1exp.DockerMachinePool, log logr.Logger) error { + instanceName := fmt.Sprintf("%s-%d", dockerMachinePool.Name, instanceID) + externalMachine, err := docker.NewMachine(clusterName, instanceName, dockerMachinePool.Spec.Template.CustomImage, log) + if err != nil { + return errors.Wrapf(err, "failed to create helper for managing the externalMachine named %s", instanceName) + } + + log = log.WithValues("docker-machine-pool-instance", instanceName) + if externalMachine.Exists() { + log.Info("instance exists") + instanceStatus := getInstanceStatusByID(dockerMachinePool, instanceID) + + // delete if status was not recorded or if version is not same as the spec + if instanceStatus == nil || *instanceStatus.Version != *machinePool.Spec.Template.Spec.Version { + currentVersion := "none" + if instanceStatus != nil && instanceStatus.Version != nil { + currentVersion = *instanceStatus.Version + } + log.Info("deleting container since versions don't match", "current", currentVersion, "desired", *machinePool.Spec.Template.Spec.Version) + if err := externalMachine.Delete(ctx); err != nil { + return errors.Wrapf(err, "failed to delete incorrect versioned docker machine with instance name %s", instanceName) + } + } + + if instanceStatus != nil && instanceStatus.Ready { + // everything is done already + return nil + } + } + + if !externalMachine.Exists() { + log.Info("instance does not exist") + if err := externalMachine.Create(ctx, constants.WorkerNodeRoleValue, machinePool.Spec.Template.Spec.Version, dockerMachinePool.Spec.Template.ExtraMounts); err != nil { + return errors.Wrapf(err, "failed to create docker machine with instance name %s", instanceName) + } + } + + instanceStatus := getInstanceStatusByID(dockerMachinePool, instanceID) + if instanceStatus == nil { + instanceStatus = &infrav1exp.DockerMachinePoolInstanceStatus{ + InstanceID: pointer.Int32Ptr(instanceID), + Version: machinePool.Spec.Template.Spec.Version, + } + + // Preload images into the container + if len(dockerMachinePool.Spec.Template.PreLoadImages) > 0 { + if err := externalMachine.PreloadLoadImages(ctx, dockerMachinePool.Spec.Template.PreLoadImages); err != nil { + return errors.Wrapf(err, "failed to pre-load images into the docker machine with instance name %s", instanceName) + } + } + + dockerMachinePool.Status.Machines = append(dockerMachinePool.Status.Machines, instanceStatus) + } + + // if the machine isn't bootstrapped, only then run bootstrap scripts + if !instanceStatus.Bootstrapped { + bootstrapData, err := r.getBootstrapData(ctx, machinePool) + if err != nil { + return errors.Wrapf(err, "failed to get bootstrap data for instance named %s", instanceName) + } + + timeoutctx, cancel := context.WithTimeout(ctx, 3*time.Minute) + defer cancel() + // Run the bootstrap script. Simulates cloud-init. + if err := externalMachine.ExecBootstrap(timeoutctx, bootstrapData); err != nil { + return errors.Wrapf(err, "failed to exec DockerMachinePool instance bootstrap for instance named %s", instanceName) + } + instanceStatus.Bootstrapped = true + } + + if instanceStatus.Addresses == nil { + // set address in machine status + machineAddress, err := externalMachine.Address(ctx) + if err != nil { + return &TransientError{ + InstanceName: instanceName, + Reason: "failed to fetch addresses for container", + } + } + + instanceStatus.Addresses = []clusterv1.MachineAddress{ + { + Type: clusterv1.MachineHostName, + Address: externalMachine.ContainerName(), + }, + { + Type: clusterv1.MachineInternalIP, + Address: machineAddress, + }, + { + Type: clusterv1.MachineExternalIP, + Address: machineAddress, + }, + } + } + + if instanceStatus.ProviderID == nil { + // Usually a cloud provider will do this, but there is no docker-cloud provider. + // Requeue if there is an error, as this is likely momentary load balancer + // state changes during control plane provisioning. + if err := externalMachine.SetNodeProviderID(ctx); err != nil { + log.Info("transient error setting the provider id") + return &TransientError{ + InstanceName: instanceName, + Reason: "failed to patch the Kubernetes node with the machine providerID", + } + } + // Set ProviderID so the Cluster API Machine Controller can pull it + providerID := externalMachine.ProviderID() + instanceStatus.ProviderID = &providerID + } + + instanceStatus.Ready = true + return nil +} + +func (r *DockerMachinePoolReconciler) getBootstrapData(ctx context.Context, machinePool *clusterv1exp.MachinePool) (string, error) { + if machinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { + return "", errors.New("error retrieving bootstrap data: linked MachinePool's bootstrap.dataSecretName is nil") + } + + s := &corev1.Secret{} + key := client.ObjectKey{Namespace: machinePool.GetNamespace(), Name: *machinePool.Spec.Template.Spec.Bootstrap.DataSecretName} + if err := r.Client.Get(ctx, key, s); err != nil { + return "", errors.Wrapf(err, "failed to retrieve bootstrap data secret for DockerMachinePool instance %s/%s", machinePool.GetNamespace(), machinePool.GetName()) + } + + value, ok := s.Data["value"] + if !ok { + return "", errors.New("error retrieving bootstrap data: secret value key is missing") + } + + return base64.StdEncoding.EncodeToString(value), nil +} + +func getDockerMachinePoolProviderID(clusterName, dockerMachinePoolName string) string { + return fmt.Sprintf("docker:////%s-dmp-%s", clusterName, dockerMachinePoolName) +} + +func deleteDockerWorkerMachine(ctx context.Context, clusterName string, instanceID int32, dockerMachinePool *infrav1exp.DockerMachinePool, log logr.Logger) error { + instanceName := fmt.Sprintf("%s-%d", dockerMachinePool.Name, instanceID) + externalMachine, err := docker.NewMachine(clusterName, instanceName, dockerMachinePool.Spec.Template.CustomImage, log) + if err != nil { + return errors.Wrapf(err, "failed to create helper for managing the externalMachine named %s", instanceName) + } + + log = log.WithValues("docker-machine-pool-instance", instanceName) + if externalMachine.Exists() { + log.Info("instance exists") + if err := externalMachine.Delete(ctx); err != nil { + return errors.Wrapf(err, "failed to delete docker machine with instance name %s", instanceName) + } + } + + return nil +} + +func getInstanceStatusByID(dockerMachinePool *infrav1exp.DockerMachinePool, instanceID int32) *infrav1exp.DockerMachinePoolInstanceStatus { + for _, machine := range dockerMachinePool.Status.Machines { + if machine.InstanceID != nil && *machine.InstanceID == instanceID { + return machine + } + } + + return nil +} + +func patchDockerMachinePool(ctx context.Context, patchHelper *patch.Helper, dockerMachinePool *infrav1exp.DockerMachinePool) error { + // TODO: add conditions + + // Patch the object, ignoring conflicts on the conditions owned by this controller. + return patchHelper.Patch( + ctx, + dockerMachinePool, + ) +} diff --git a/test/infrastructure/docker/exp/doc.go b/test/infrastructure/docker/exp/doc.go new file mode 100644 index 000000000000..8a32e6835abf --- /dev/null +++ b/test/infrastructure/docker/exp/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exp diff --git a/test/infrastructure/docker/go.mod b/test/infrastructure/docker/go.mod index 11a373b0dba5..250c78434d2f 100644 --- a/test/infrastructure/docker/go.mod +++ b/test/infrastructure/docker/go.mod @@ -6,10 +6,12 @@ require ( github.com/go-logr/logr v0.1.0 github.com/onsi/gomega v1.10.1 github.com/pkg/errors v0.9.1 + github.com/spf13/pflag v1.0.5 k8s.io/api v0.17.9 k8s.io/apimachinery v0.17.9 k8s.io/client-go v0.17.9 k8s.io/klog v1.0.0 + k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19 sigs.k8s.io/cluster-api v0.3.3 sigs.k8s.io/controller-runtime v0.5.10 sigs.k8s.io/kind v0.7.1-0.20200303021537-981bd80d3802 diff --git a/test/infrastructure/docker/go.sum b/test/infrastructure/docker/go.sum index f9f0da050fa2..fdaa44efb121 100644 --- a/test/infrastructure/docker/go.sum +++ b/test/infrastructure/docker/go.sum @@ -600,6 +600,7 @@ k8s.io/client-go v0.17.9/go.mod h1:3cM92qAd1XknA5IRkRfpJhl9OQjkYy97ZEUio70wVnI= k8s.io/cluster-bootstrap v0.17.8 h1:qee9dmkOVwngBf98zbwrij1s898EZ2aHg+ymXw1UBLU= k8s.io/cluster-bootstrap v0.17.8/go.mod h1:SC9J2Lt/MBOkxcCB04+5mYULLfDQL5kdM0BjtKaVCVU= k8s.io/code-generator v0.17.9/go.mod h1:iiHz51+oTx+Z9D0vB3CH3O4HDDPWrvZyUgUYaIE9h9M= +k8s.io/component-base v0.17.9 h1:1CmgQ367Eo6UWkfO1sl7Z99KJpbwkrs9aMY5LZTQR9s= k8s.io/component-base v0.17.9/go.mod h1:Wg22ePDK0mfTa+bEFgZHGwr0h40lXnYy6D7D+f7itFk= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/test/infrastructure/docker/hack/boilerplate.go.txt b/test/infrastructure/docker/hack/boilerplate.go.txt index 0926592d3895..4b76f1fdd88a 100644 --- a/test/infrastructure/docker/hack/boilerplate.go.txt +++ b/test/infrastructure/docker/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright YEAR The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/infrastructure/docker/main.go b/test/infrastructure/docker/main.go index a78125ffc0bd..536eed9e4dcc 100644 --- a/test/infrastructure/docker/main.go +++ b/test/infrastructure/docker/main.go @@ -18,6 +18,7 @@ package main import ( "flag" + "github.com/spf13/pflag" "math/rand" "os" "time" @@ -28,8 +29,12 @@ import ( "k8s.io/klog" "k8s.io/klog/klogr" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" + "sigs.k8s.io/cluster-api/feature" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" "sigs.k8s.io/cluster-api/test/infrastructure/docker/controllers" + infrav1exp "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1alpha3" + expcontrollers "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/controllers" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/healthz" @@ -49,24 +54,22 @@ var ( ) func init() { + klog.InitFlags(nil) + _ = scheme.AddToScheme(myscheme) _ = infrav1.AddToScheme(myscheme) + _ = infrav1exp.AddToScheme(myscheme) _ = clusterv1.AddToScheme(myscheme) + _ = clusterv1exp.AddToScheme(myscheme) // +kubebuilder:scaffold:scheme } func main() { rand.Seed(time.Now().UnixNano()) - klog.InitFlags(nil) - flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") - flag.IntVar(&concurrency, "concurrency", 10, "The number of docker machines to process simultaneously") - flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, - "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") - flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, - "The minimum interval at which watched resources are reconciled (e.g. 15m)") - flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") - flag.Parse() + initFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() ctrl.SetLogger(klogr.New()) @@ -95,6 +98,17 @@ func main() { } } +func initFlags(fs *pflag.FlagSet) { + fs.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + fs.IntVar(&concurrency, "concurrency", 10, "The number of docker machines to process simultaneously") + fs.BoolVar(&enableLeaderElection, "enable-leader-election", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, + "The minimum interval at which watched resources are reconciled (e.g. 15m)") + fs.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") + feature.MutableGates.AddFlag(fs) +} + func setupChecks(mgr ctrl.Manager) { if err := mgr.AddReadyzCheck("ping", healthz.Ping); err != nil { setupLog.Error(err, "unable to create ready check") @@ -125,6 +139,18 @@ func setupReconcilers(mgr ctrl.Manager) { setupLog.Error(err, "unable to create controller", "controller", "DockerCluster") os.Exit(1) } + + if feature.Gates.Enabled(feature.MachinePool) { + if err := (&expcontrollers.DockerMachinePoolReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("DockerMachinePool"), + }).SetupWithManager(mgr, controller.Options{ + MaxConcurrentReconciles: concurrency, + }); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "DockerMachinePool") + os.Exit(1) + } + } } func setupWebhooks(mgr ctrl.Manager) {