Skip to content

Commit

Permalink
Port etcd check for upgrade test.
Browse files Browse the repository at this point in the history
  • Loading branch information
Sedef committed Apr 15, 2020
1 parent 85ce8e2 commit feff3db
Show file tree
Hide file tree
Showing 8 changed files with 142 additions and 92 deletions.
40 changes: 25 additions & 15 deletions test/e2e/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,18 @@ import (
"context"
"fmt"
"io/ioutil"
"k8s.io/apimachinery/pkg/labels"
"os"
"path/filepath"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
clusterctlconfig "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config"

corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
"sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1"
clusterctlconfig "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
Expand Down Expand Up @@ -391,7 +392,7 @@ type UpgradeControlPlaneAndWaitForUpgradeInput struct {
}

// UpgradeControlPlaneAndWaitForUpgrade upgrades a KubeadmControlPlane and waits for it to be upgraded.
func UpgradeControlPlaneAndWaitForUpgrade(ctx context.Context, input UpgradeControlPlaneAndWaitForUpgradeInput) []clusterv1.Machine {
func UpgradeControlPlaneAndWaitForUpgrade(ctx context.Context, input UpgradeControlPlaneAndWaitForUpgradeInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeControlPlaneAndWaitForUpgrade")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeControlPlaneAndWaitForUpgrade")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeControlPlaneAndWaitForUpgrade")
Expand All @@ -401,13 +402,12 @@ func UpgradeControlPlaneAndWaitForUpgrade(ctx context.Context, input UpgradeCont
Expect(input.DNSImageTag).ToNot(BeNil(), "Invalid argument. input.DNSImageTag can't be empty when calling UpgradeControlPlaneAndWaitForUpgrade")
Expect(input.IntervalsGetter).ToNot(BeNil(), "Invalid argument. input.IntervalsGetter can't be nil when calling UpgradeControlPlaneAndWaitForUpgrade")

client := input.ClusterProxy.GetClient()
mgmtClient := input.ClusterProxy.GetClient()

fmt.Fprintf(GinkgoWriter, "Patching the new kubernetes version to KCP\n")
patchHelper, err := patch.NewHelper(input.ControlPlane, client)
patchHelper, err := patch.NewHelper(input.ControlPlane, mgmtClient)
Expect(err).ToNot(HaveOccurred())

Expect(input.KubernetesUpgradeVersion).NotTo(BeNil())
input.ControlPlane.Spec.Version = input.KubernetesUpgradeVersion
input.ControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd = v1beta1.Etcd{
Local: &v1beta1.LocalEtcd{
Expand All @@ -425,27 +425,37 @@ func UpgradeControlPlaneAndWaitForUpgrade(ctx context.Context, input UpgradeCont
Expect(patchHelper.Patch(ctx, input.ControlPlane)).To(Succeed())

fmt.Fprintf(GinkgoWriter, "Waiting for machines to have the upgraded kubernetes version\n")
machines := discovery.GetMachinesByCluster(context.TODO(), discovery.GetMachinesByClusterInput{
Lister: client,
ClusterName: input.Cluster.Name,
Namespace: input.Cluster.Namespace,
})
framework.WaitForMachinesToBeUpgraded(ctx, framework.WaitForMachinesToBeUpgradedInput{
Lister: client,
Lister: mgmtClient,
Cluster: input.Cluster,
Machines: machines,
MachineCount: int(*input.ControlPlane.Spec.Replicas),
KubernetesUpgradeVersion: input.KubernetesUpgradeVersion,
}, input.IntervalsGetter("wait-machine-upgrade")...)

//TODO: add etcd and DNS checks too
fmt.Fprintf(GinkgoWriter, "Waiting for kube-proxy to have the upgraded kubernetes version\n")
workloadCluster := input.ClusterProxy.GetWorkloadCluster(context.TODO(), input.Cluster.Namespace, input.Cluster.Name)
workloadClient := workloadCluster.GetClient()
framework.WaitForKubeProxyUpgrade(ctx, framework.WaitForKubeProxyUpgradeInput{
Getter: workloadCluster.GetClient(),
Getter: workloadClient,
KubernetesVersion: input.KubernetesUpgradeVersion,
}, input.IntervalsGetter("wait-machine-upgrade")...)

fmt.Fprintf(GinkgoWriter, "Waiting for CoreDNS to have the upgraded image tag\n")
framework.WaitForDNSUpgrade(ctx, framework.WaitForDNSUpgradeInput{
Getter: workloadClient,
DNSVersion: input.DNSImageTag,
})
return machines

fmt.Fprintf(GinkgoWriter, "Waiting for etcd to have the upgraded image tag\n")
lblSelector, err := labels.Parse("component=etcd")
Expect(err).ToNot(HaveOccurred())
opt := &client.ListOptions{LabelSelector: lblSelector}
waitForPodListConditionInput := framework.WaitForPodListConditionInput{
Lister: workloadClient,
ListOptions: opt,
Condition: framework.EtcdImageTagCondition(input.EtcdImageTag, int(*input.ControlPlane.Spec.Replicas)),
}
framework.WaitForPodListCondition(ctx, waitForPodListConditionInput, input.IntervalsGetter("wait-machine-upgrade")...)
}

func valueOrDefault(v string) string {
Expand Down
43 changes: 43 additions & 0 deletions test/e2e/e2e_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,15 @@ import (
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/bootstrap"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/test/framework/discovery"
"sigs.k8s.io/cluster-api/util"
)

// Test suite flags
Expand Down Expand Up @@ -193,3 +198,41 @@ func tearDown(bootstrapClusterProvider bootstrap.ClusterProvider, bootstrapClust
bootstrapClusterProvider.Dispose(context.TODO())
}
}

func dumpSpecResourcesAndCleanup(specName string, clusterProxy framework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, intervalsGetter IntervalsGetter, skipCleanup bool) {
Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name)
// Dump all Cluster API related resources to artifacts before deleting them.
discovery.DumpAllResources(context.TODO(), discovery.DumpAllResourcesInput{
Lister: clusterProxy.GetClient(),
Namespace: namespace.Name,
LogPath: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName(), "resources"),
})

if !skipCleanup {
Byf("Deleting cluster %s/%s", cluster.Namespace, cluster.Name)
DeleteClusterAndWait(context.TODO(), DeleteClusterAndWaitInput{
Client: clusterProxy.GetClient(),
Cluster: cluster,
IntervalsGetter: intervalsGetter,
})

Byf("Deleting namespace used for hosting the %q test spec", specName)
framework.DeleteNamespace(context.TODO(), framework.DeleteNamespaceInput{
Deleter: clusterProxy.GetClient(),
Name: namespace.Name,
}, "40s", "10s") //TODO: intervals
}
cancelWatches()
}

func setupSpecNamespace(specName string, clusterProxy framework.ClusterProxy, artifactFolder string) (*corev1.Namespace, context.CancelFunc) {
Byf("Creating a namespace for hosting the %q test spec", specName)
namespace, cancelWatches := CreateNamespaceAndWatchEvents(context.TODO(), CreateNamespaceAndWatchEventsInput{
Creator: clusterProxy.GetClient(),
ClientSet: clusterProxy.GetClientSet(),
Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
LogFolder: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName()),
}) //TODO: intervals

return namespace, cancelWatches
}
2 changes: 1 addition & 1 deletion test/e2e/kcp_upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,12 @@ import (
"path/filepath"

. "github.com/onsi/ginkgo"
"sigs.k8s.io/cluster-api/util"

corev1 "k8s.io/api/core/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
)

var _ = Describe("KCP upgrade", func() {
Expand Down
40 changes: 0 additions & 40 deletions test/e2e/quick_start_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,10 @@ import (

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/cluster-api/test/framework"

corev1 "k8s.io/api/core/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/test/framework/discovery"
"sigs.k8s.io/cluster-api/util"
)

Expand Down Expand Up @@ -85,41 +83,3 @@ var _ = Describe("When following the Cluster API quick-start", func() {
dumpSpecResourcesAndCleanup(specName, bootstrapClusterProxy, artifactFolder, namespace, cancelWatches, cluster, intervalsGetter, skipCleanup)
})
})

func setupSpecNamespace(specName string, clusterProxy framework.ClusterProxy, artifactFolder string) (*corev1.Namespace, context.CancelFunc) {
Byf("Creating a namespace for hosting the %q test spec", specName)
namespace, cancelWatches := CreateNamespaceAndWatchEvents(context.TODO(), CreateNamespaceAndWatchEventsInput{
Creator: clusterProxy.GetClient(),
ClientSet: clusterProxy.GetClientSet(),
Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
LogFolder: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName()),
}) //TODO: intervals

return namespace, cancelWatches
}

func dumpSpecResourcesAndCleanup(specName string, clusterProxy framework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, intervalsGetter IntervalsGetter, skipCleanup bool) {
Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name)
// Dump all Cluster API related resources to artifacts before deleting them.
discovery.DumpAllResources(context.TODO(), discovery.DumpAllResourcesInput{
Lister: clusterProxy.GetClient(),
Namespace: namespace.Name,
LogPath: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName(), "resources"),
})

if !skipCleanup {
Byf("Deleting cluster %s/%s", cluster.Namespace, cluster.Name)
DeleteClusterAndWait(context.TODO(), DeleteClusterAndWaitInput{
Client: clusterProxy.GetClient(),
Cluster: cluster,
IntervalsGetter: intervalsGetter,
})

Byf("Deleting namespace used for hosting the %q test spec", specName)
framework.DeleteNamespace(context.TODO(), framework.DeleteNamespaceInput{
Deleter: clusterProxy.GetClient(),
Name: namespace.Name,
}, "40s", "10s") //TODO: intervals
}
cancelWatches()
}
6 changes: 3 additions & 3 deletions test/e2e/self_hosted_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,14 @@ import (

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/bootstrap"
"sigs.k8s.io/cluster-api/util"

corev1 "k8s.io/api/core/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/bootstrap"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/test/framework/discovery"
"sigs.k8s.io/cluster-api/util"
)

var _ = Describe("When testing Cluster API working on self-hosted clusters", func() {
Expand Down
2 changes: 1 addition & 1 deletion test/framework/clusterctl/e2e_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ func (c *E2EConfig) Validate() error {
// If kubernetesVersion is nil or not valid, return error.
k8sVersion := c.GetKubernetesVersion()
if k8sVersion == "" {
return errEmptyArg(fmt.Sprintf("ClusterctlVariables[%s]", kubernetesVersion))
return errEmptyArg(fmt.Sprintf("Variables[%s]", kubernetesVersion))
} else if _, err := version.ParseSemantic(k8sVersion); err != nil {
return errInvalidArg("Variables[%s]=%q", kubernetesVersion, k8sVersion)
}
Expand Down
74 changes: 68 additions & 6 deletions test/framework/control_plane.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package framework
import (
"context"
"fmt"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"

Expand Down Expand Up @@ -391,7 +390,6 @@ func AssertAllClusterAPIResourcesAreGone(ctx context.Context, input AssertAllClu
type WaitForMachinesToBeUpgradedInput struct {
Lister Lister
Cluster *clusterv1.Cluster
Machines []clusterv1.Machine
KubernetesUpgradeVersion string
MachineCount int
}
Expand All @@ -400,13 +398,19 @@ type WaitForMachinesToBeUpgradedInput struct {
func WaitForMachinesToBeUpgraded(ctx context.Context, input WaitForMachinesToBeUpgradedInput, intervals ...interface{}) {
By("ensuring all machines have upgraded kubernetes version")
Eventually(func() (int, error) {
machines := GetControlPlaneMachinesByCluster(context.TODO(), GetMachinesByClusterInput{
Lister: input.Lister,
ClusterName: input.Cluster.Name,
Namespace: input.Cluster.Namespace,
})

upgraded := 0
for _, machine := range input.Machines {
for _, machine := range machines {
if *machine.Spec.Version == input.KubernetesUpgradeVersion {
upgraded++
}
}
if len(input.Machines) > upgraded {
if len(machines) > upgraded {
return 0, errors.New("old nodes remain")
}
return upgraded, nil
Expand All @@ -421,8 +425,6 @@ type WaitForKubeProxyUpgradeInput struct {

// WaitForKubeProxyUpgrade waits until kube-proxy version matches with the kubernetes version. This is called during KCP upgrade.
func WaitForKubeProxyUpgrade(ctx context.Context, input WaitForKubeProxyUpgradeInput, intervals ...interface{}) {
By("ensuring all machines have upgraded kubernetes version")

By("ensuring kube-proxy has the correct image")

Eventually(func() (bool, error) {
Expand All @@ -437,3 +439,63 @@ func WaitForKubeProxyUpgrade(ctx context.Context, input WaitForKubeProxyUpgradeI
return false, nil
}, intervals...).Should(BeTrue())
}

// WaitForDNSUpgradeInput is the input for WaitForDNSUpgrade.
type WaitForDNSUpgradeInput struct {
Getter Getter
DNSVersion string
}

// WaitForDNSUpgrade waits until CoreDNS version matches with the CoreDNS upgrade version. This is called during KCP upgrade.
func WaitForDNSUpgrade(ctx context.Context, input WaitForDNSUpgradeInput, intervals ...interface{}) {
By("ensuring CoreDNS has the correct image")

Eventually(func() (bool, error) {
d := &appsv1.Deployment{}

if err := input.Getter.Get(ctx, ctrlclient.ObjectKey{Name: "coredns", Namespace: metav1.NamespaceSystem}, d); err != nil {
return false, err
}
if d.Spec.Template.Spec.Containers[0].Image == "k8s.gcr.io/coredns:"+input.DNSVersion {
return true, nil
}
return false, nil
}, intervals...).Should(BeTrue())
}

// TODO: The code below should go into discovery package but if we put it there, we cannot call it from here as discovery uses Lister
// TODO: that is defined here so cyclic dependency happening. We can move Lister, Getter, Deleter interfaces to another package???
type GetMachinesByClusterInput struct {
Lister Lister
ClusterName string
Namespace string
}

// GetControlPlaneMachinesByCluster returns the Machine objects for a cluster.
// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so
// it is necessary to ensure this is already happened before calling it.
func GetControlPlaneMachinesByCluster(ctx context.Context, input GetMachinesByClusterInput) []clusterv1.Machine {
options := append(byClusterOptions(input.ClusterName, input.Namespace), controlPlaneMachineOptions()...)

machineList := &clusterv1.MachineList{}
Expect(input.Lister.List(ctx, machineList, options...)).To(Succeed(), "Failed to list MachineList object for Cluster %s/%s", input.Namespace, input.ClusterName)

return machineList.Items
}

// byClusterOptions returns a set of ListOptions that allows to identify all the objects belonging to a Cluster.
func byClusterOptions(name, namespace string) []client.ListOption {
return []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
clusterv1.ClusterLabelName: name,
},
}
}

// controlPlaneMachineOptions returns a set of ListOptions that allows to get all machine objects belonging to control plane.
func controlPlaneMachineOptions() []client.ListOption {
return []client.ListOption{
client.HasLabels{clusterv1.MachineControlPlaneLabelName},
}
}
27 changes: 1 addition & 26 deletions test/framework/discovery/discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,24 +183,6 @@ func GetMachineDeploymentsByCluster(ctx context.Context, input GetMachineDeploym
return deployments
}

type GetMachinesByClusterInput struct {
Lister framework.Lister
ClusterName string
Namespace string
}

// GetMachinesByCluster returns the Machine objects for a cluster.
// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so
// it is necessary to ensure this is already happened before calling it.
func GetMachinesByCluster(ctx context.Context, input GetMachinesByClusterInput) []clusterv1.Machine {
options := append(byClusterOptions(input.ClusterName, input.Namespace), controlPlaneMachineOptions()...)

machineList := &clusterv1.MachineList{}
Expect(input.Lister.List(ctx, machineList, options...)).To(Succeed(), "Failed to list MachineList object for Cluster %s/%s", input.Namespace, input.ClusterName)

return machineList.Items
}

// DumpAllResourcesInput is the input for DumpAllResources.
type DumpAllResourcesInput struct {
Lister framework.Lister
Expand Down Expand Up @@ -262,11 +244,4 @@ func byClusterOptions(name, namespace string) []client.ListOption {
clusterv1.ClusterLabelName: name,
},
}
}

// controlPlaneMachineOptions returns a set of ListOptions that allows to get all machine objects belonging to control plane.
func controlPlaneMachineOptions() []client.ListOption {
return []client.ListOption{
client.HasLabels{clusterv1.MachineControlPlaneLabelName},
}
}
}

0 comments on commit feff3db

Please sign in to comment.