Skip to content

Commit

Permalink
add exp api for docker machine pool
Browse files Browse the repository at this point in the history
  • Loading branch information
devigned committed Sep 16, 2020
1 parent f8560ff commit 4238b16
Show file tree
Hide file tree
Showing 32 changed files with 1,711 additions and 139 deletions.
36 changes: 36 additions & 0 deletions exp/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,15 @@ package util

import (
"context"
"github.com/go-logr/logr"

"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)

// GetOwnerMachinePool returns the MachinePool objects owning the current resource.
Expand Down Expand Up @@ -52,3 +55,36 @@ func GetMachinePoolByName(ctx context.Context, c client.Client, namespace, name
}
return m, nil
}

// MachinePoolToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for
// MachinePool events and returns reconciliation requests for an infrastructure provider object.
func MachinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Logger) handler.ToRequestsFunc {
log = log.WithValues("machine-pool-to-infra-map-func", gvk.String())
return func(o handler.MapObject) []reconcile.Request {
log := log.WithValues("namespace", o.Meta.GetNamespace(), "name", o.Meta.GetName())
m, ok := o.Object.(*clusterv1exp.MachinePool)
if !ok {
log.V(4).Info("not a machine pool")
return nil
}

gk := gvk.GroupKind()
ref := m.Spec.Template.Spec.InfrastructureRef
// Return early if the GroupKind doesn't match what we expect.
infraGK := ref.GroupVersionKind().GroupKind()
if gk != infraGK {
log.V(4).Info("infra kind doesn't match filter group kind", infraGK.String())
return nil
}

log.V(4).Info("projecting object", "namespace", m.Namespace, "name", ref.Name)
return []reconcile.Request{
{
NamespacedName: client.ObjectKey{
Namespace: m.Namespace,
Name: ref.Name,
},
},
}
}
}
6 changes: 5 additions & 1 deletion test/e2e/config/docker-ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,26 +72,30 @@ providers:
- sourcePath: "../data/infrastructure-docker/cluster-template-ci.yaml"
targetName: "cluster-template.yaml"
- sourcePath: "../data/infrastructure-docker/cluster-template-kcp-adoption.yaml"
- sourcePath: "../data/infrastructure-docker/cluster-template-machine-pool.yaml"

variables:
KUBERNETES_VERSION: "v1.18.2"
ETCD_VERSION_UPGRADE_TO: "3.4.3-0"
COREDNS_VERSION_UPGRADE_TO: "1.6.7"
KUBERNETES_VERSION_UPGRADE_TO: "v1.18.2"
KUBERNETES_VERSION_UPGRADE_FROM: "v1.17.2"
MIN_NUMBER_OF_REPLICAS: "0"
MAX_NUMBER_OF_REPLICAS: "5"
DOCKER_SERVICE_DOMAIN: "cluster.local"
DOCKER_SERVICE_CIDRS: "10.128.0.0/12"
# IMPORTANT! This values should match the one used by the CNI provider
DOCKER_POD_CIDRS: "192.168.0.0/16"
CNI: "./data/cni/kindnet/kindnet.yaml"
EXP_CLUSTER_RESOURCE_SET: "true"
EXP_MACHINE_POOL: "true"

intervals:
default/wait-controllers: ["3m", "10s"]
default/wait-cluster: ["3m", "10s"]
default/wait-control-plane: ["10m", "10s"]
default/wait-worker-nodes: ["5m", "10s"]
default/wait-machine-pool-nodes: ["5m", "10s"]
default/wait-machine-pool-nodes: ["3m", "10s"]
default/wait-delete-cluster: ["3m", "10s"]
default/wait-machine-upgrade: ["20m", "10s"]
default/wait-machine-remediation: ["5m", "10s"]
3 changes: 3 additions & 0 deletions test/e2e/config/docker-dev.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -105,13 +105,16 @@ variables:
COREDNS_VERSION_UPGRADE_TO: "1.6.7"
KUBERNETES_VERSION_UPGRADE_TO: "v1.18.2"
KUBERNETES_VERSION_UPGRADE_FROM: "v1.17.2"
MIN_NUMBER_OF_REPLICAS: "0"
MAX_NUMBER_OF_REPLICAS: "5"
DOCKER_SERVICE_DOMAIN: "cluster.local"
DOCKER_SERVICE_CIDRS: "10.128.0.0/12"
# IMPORTANT! This values should match the one used by the CNI provider
DOCKER_POD_CIDRS: "192.168.0.0/16"
#CNI: "./data/cni/calico/calico.yaml"
CNI: "./data/cni/kindnet/kindnet.yaml"
EXP_CLUSTER_RESOURCE_SET: "true"
EXP_MACHINE_POOL: "true"

intervals:
default/wait-controllers: ["3m", "10s"]
Expand Down
118 changes: 118 additions & 0 deletions test/e2e/data/infrastructure-docker/cluster-template-machine-pool.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: DockerCluster
metadata:
name: '${ CLUSTER_NAME }'
---
apiVersion: cluster.x-k8s.io/v1alpha3
kind: Cluster
metadata:
name: '${ CLUSTER_NAME }'
labels:
cni: "${CLUSTER_NAME}-crs-0"
spec:
clusterNetwork:
services:
cidrBlocks: ['${ DOCKER_SERVICE_CIDRS }']
pods:
cidrBlocks: ['${ DOCKER_POD_CIDRS }']
serviceDomain: '${ DOCKER_SERVICE_DOMAIN }'
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: DockerCluster
name: '${ CLUSTER_NAME }'
controlPlaneRef:
kind: KubeadmControlPlane
apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
name: "${CLUSTER_NAME}-control-plane"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: DockerMachineTemplate
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
template:
spec:
extraMounts:
- containerPath: "/var/run/docker.sock"
hostPath: "/var/run/docker.sock"
---
kind: KubeadmControlPlane
apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
metadata:
name: "${ CLUSTER_NAME }-control-plane"
spec:
replicas: ${ CONTROL_PLANE_MACHINE_COUNT }
infrastructureTemplate:
kind: DockerMachineTemplate
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
name: "${CLUSTER_NAME}-control-plane"
kubeadmConfigSpec:
clusterConfiguration:
controllerManager:
extraArgs: {enable-hostpath-provisioner: 'true'}
apiServer:
certSANs: [localhost, 127.0.0.1]
initConfiguration:
nodeRegistration:
criSocket: /var/run/containerd/containerd.sock
kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'}
joinConfiguration:
nodeRegistration:
criSocket: /var/run/containerd/containerd.sock
kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'}
version: "${KUBERNETES_VERSION}"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "cni-${CLUSTER_NAME}-crs-0"
data: ${CNI_RESOURCES}
---
apiVersion: addons.cluster.x-k8s.io/v1alpha3
kind: ClusterResourceSet
metadata:
name: "${CLUSTER_NAME}-crs-0"
spec:
strategy: ApplyOnce
clusterSelector:
matchLabels:
cni: "${CLUSTER_NAME}-crs-0"
resources:
- name: "cni-${CLUSTER_NAME}-crs-0"
kind: ConfigMap
---
apiVersion: exp.cluster.x-k8s.io/v1alpha3
kind: MachinePool
metadata:
name: "${ CLUSTER_NAME }-mp-0"
spec:
clusterName: '${ CLUSTER_NAME }'
replicas: ${ WORKER_MACHINE_COUNT }
template:
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
kind: KubeadmConfig
name: "${ CLUSTER_NAME }-mp-0-config"
clusterName: '${ CLUSTER_NAME }'
infrastructureRef:
apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3
kind: DockerMachinePool
name: "${ CLUSTER_NAME }-dmp-0"
version: "${KUBERNETES_VERSION}"
---
apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3
kind: DockerMachinePool
metadata:
name: "${ CLUSTER_NAME }-dmp-0"
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
kind: KubeadmConfig
metadata:
name: "${ CLUSTER_NAME }-mp-0-config"
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
24 changes: 12 additions & 12 deletions test/e2e/kcp_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,11 @@ type KCPUpgradeSpecInput struct {
// KCPUpgradeSpec implements a test that verifies KCP to properly upgrade a control plane with 3 machines.
func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) {
var (
specName = "kcp-upgrade"
input KCPUpgradeSpecInput
namespace *corev1.Namespace
cancelWatches context.CancelFunc
result *clusterctl.ApplyClusterTemplateAndWaitResult
specName = "kcp-upgrade"
input KCPUpgradeSpecInput
namespace *corev1.Namespace
cancelWatches context.CancelFunc
clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
)

BeforeEach(func() {
Expand All @@ -71,7 +71,7 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput)
It("Should successfully upgrade Kubernetes, DNS, kube-proxy, and etcd in a single control plane cluster", func() {

By("Creating a workload cluster")
result = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
Expand All @@ -93,8 +93,8 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput)
By("Upgrading Kubernetes, DNS, kube-proxy, and etcd versions")
framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: result.Cluster,
ControlPlane: result.ControlPlane,
Cluster: clusterResources.Cluster,
ControlPlane: clusterResources.ControlPlane,
EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo),
DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo),
KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
Expand All @@ -110,7 +110,7 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput)

By("Creating a workload cluster")

result = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
Expand All @@ -132,8 +132,8 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput)
By("Upgrading Kubernetes, DNS, kube-proxy, and etcd versions")
framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: result.Cluster,
ControlPlane: result.ControlPlane,
Cluster: clusterResources.Cluster,
ControlPlane: clusterResources.ControlPlane,
EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo),
DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo),
KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
Expand All @@ -147,6 +147,6 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput)

AfterEach(func() {
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, result.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
})
}
106 changes: 106 additions & 0 deletions test/e2e/machine_pool.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
// +build e2e

/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"
"fmt"
"os"
"path/filepath"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"

"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
)

const (
MinNumberOfReplicas = "MIN_NUMBER_OF_REPLICAS"
MaxNumberOfReplicas = "MAX_NUMBER_OF_REPLICAS"
)

// MachinePoolInput is the input for MachinePoolSpec
type MachinePoolInput struct {
E2EConfig *clusterctl.E2EConfig
ClusterctlConfigPath string
BootstrapClusterProxy framework.ClusterProxy
ArtifactFolder string
SkipCleanup bool
}

// MachinePoolSpec implements a test that verifies MachinePool scale up, down and version update
func MachinePoolSpec(ctx context.Context, inputGetter func() MachinePoolInput) {
var (
specName = "machine-pool"
input MachinePoolInput
namespace *corev1.Namespace
cancelWatches context.CancelFunc
clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
)

BeforeEach(func() {
Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
input = inputGetter()
Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName)
Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName)
Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)
Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName)
Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeTo))
Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeFrom))
Expect(input.E2EConfig.Variables).To(HaveKey(MinNumberOfReplicas))
Expect(input.E2EConfig.Variables).To(HaveKey(MaxNumberOfReplicas))

// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
})

It("Should successfully create a cluster with machine pool machines", func() {
By("Creating a workload cluster")
clusterResources = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
ClusterctlConfigPath: input.ClusterctlConfigPath,
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: "machine-pool",
Namespace: namespace.Name,
ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(2),
},
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
})

By("PASSED!")
})

AfterEach(func() {
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
})
}
Loading

0 comments on commit 4238b16

Please sign in to comment.