Skip to content

Commit

Permalink
Move etcd upgrade test into separate file
Browse files Browse the repository at this point in the history
Also pass in prefix to cluster generator and improve test output
  • Loading branch information
Warren Fernandes committed Mar 26, 2020
1 parent 314a00b commit e977a76
Show file tree
Hide file tree
Showing 3 changed files with 242 additions and 140 deletions.
6 changes: 3 additions & 3 deletions test/framework/workload_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func WaitForPodListCondition(ctx context.Context, input WaitForPodListConditionI
err := input.Condition(podList)
if err != nil {
// DEBUG:
fmt.Println("---", err.Error())
fmt.Println(err.Error())
return false, err
}
return true, nil
Expand All @@ -71,7 +71,7 @@ func EtcdImageTagCondition(expectedTag string, expectedCount int) podListConditi
}
}
if countWithCorrectTag != expectedCount {
return errors.Errorf("expected %d pods to have image tag %q, got %d", expectedCount, expectedTag, countWithCorrectTag)
return errors.Errorf("etcdImageTagCondition: expected %d pods to have image tag %q, got %d", expectedCount, expectedTag, countWithCorrectTag)
}

// This check is to ensure that if there are three controlplane nodes,
Expand All @@ -80,7 +80,7 @@ func EtcdImageTagCondition(expectedTag string, expectedCount int) podListConditi
// case where there are three etcd pods with the correct tag and one
// left over that has yet to be deleted.
if len(pl.Items) != expectedCount {
return errors.Errorf("expected %d pods, got %d", expectedCount, len(pl.Items))
return errors.Errorf("etcdImageTagCondition: expected %d pods, got %d", expectedCount, len(pl.Items))
}
return nil
}
Expand Down
156 changes: 19 additions & 137 deletions test/infrastructure/docker/e2e/docker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"

appsv1 "k8s.io/api/apps/v1"
Expand All @@ -41,26 +40,21 @@ import (
infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
)

var _ = Describe("Docker", func() {
Describe("Cluster Creation", func() {
var (
namespace string
clusterGen = &ClusterGenerator{}
namespace = "default"
clusterGen = newClusterGenerator("")
workloadClient ctrlclient.Client
mgmtClient ctrlclient.Client
cluster *clusterv1.Cluster
)
SetDefaultEventuallyTimeout(10 * time.Minute)
SetDefaultEventuallyPollingInterval(10 * time.Second)

BeforeEach(func() {
namespace = "default"
})

AfterEach(func() {
// Delete the workload cluster
deleteClusterInput := framework.DeleteClusterInput{
Expand Down Expand Up @@ -210,132 +204,6 @@ var _ = Describe("Docker", func() {
framework.AssertControlPlaneFailureDomains(ctx, assertControlPlaneFailureDomainInput)
})

Specify("upgrades etcd", func() {
replicas := 3
var (
infraCluster *infrav1.DockerCluster
template *infrav1.DockerMachineTemplate
err error
)
cluster, infraCluster, controlPlane, template = clusterGen.GenerateCluster(namespace, int32(replicas))
md, infraTemplate, bootstrapTemplate := GenerateMachineDeployment(cluster, 1)

// Set up the client to the management cluster
mgmtClient, err = mgmt.GetClient()
Expect(err).NotTo(HaveOccurred())

// Set up the cluster object
createClusterInput := framework.CreateClusterInput{
Creator: mgmtClient,
Cluster: cluster,
InfraCluster: infraCluster,
}
framework.CreateCluster(ctx, createClusterInput)

// Set up the KubeadmControlPlane
createKubeadmControlPlaneInput := framework.CreateKubeadmControlPlaneInput{
Creator: mgmtClient,
ControlPlane: controlPlane,
MachineTemplate: template,
}
framework.CreateKubeadmControlPlane(ctx, createKubeadmControlPlaneInput)

// Wait for the cluster to provision.
assertClusterProvisionsInput := framework.WaitForClusterToProvisionInput{
Getter: mgmtClient,
Cluster: cluster,
}
framework.WaitForClusterToProvision(ctx, assertClusterProvisionsInput)

// Wait for at least one control plane node to be ready
waitForOneKubeadmControlPlaneMachineToExistInput := framework.WaitForOneKubeadmControlPlaneMachineToExistInput{
Lister: mgmtClient,
Cluster: cluster,
ControlPlane: controlPlane,
}
framework.WaitForOneKubeadmControlPlaneMachineToExist(ctx, waitForOneKubeadmControlPlaneMachineToExistInput, "5m")

// Insatll a networking solution on the workload cluster
workloadClient, err := mgmt.GetWorkloadClient(ctx, cluster.Namespace, cluster.Name)
Expect(err).ToNot(HaveOccurred())
applyYAMLURLInput := framework.ApplyYAMLURLInput{
Client: workloadClient,
HTTPGetter: http.DefaultClient,
NetworkingURL: "https://docs.projectcalico.org/manifests/calico.yaml",
Scheme: mgmt.Scheme,
}
framework.ApplyYAMLURL(ctx, applyYAMLURLInput)

// Wait for the controlplane nodes to exist
assertKubeadmControlPlaneNodesExistInput := framework.WaitForKubeadmControlPlaneMachinesToExistInput{
Lister: mgmtClient,
Cluster: cluster,
ControlPlane: controlPlane,
}
framework.WaitForKubeadmControlPlaneMachinesToExist(ctx, assertKubeadmControlPlaneNodesExistInput, "10m", "10s")

// Create the workload nodes
createMachineDeploymentinput := framework.CreateMachineDeploymentInput{
Creator: mgmtClient,
MachineDeployment: md,
BootstrapConfigTemplate: bootstrapTemplate,
InfraMachineTemplate: infraTemplate,
}
framework.CreateMachineDeployment(ctx, createMachineDeploymentinput)

// Wait for the workload nodes to exist
waitForMachineDeploymentNodesToExistInput := framework.WaitForMachineDeploymentNodesToExistInput{
Lister: mgmtClient,
Cluster: cluster,
MachineDeployment: md,
}
framework.WaitForMachineDeploymentNodesToExist(ctx, waitForMachineDeploymentNodesToExistInput)

// Wait for the control plane to be ready
waitForControlPlaneToBeReadyInput := framework.WaitForControlPlaneToBeReadyInput{
Getter: mgmtClient,
ControlPlane: controlPlane,
}
framework.WaitForControlPlaneToBeReady(ctx, waitForControlPlaneToBeReadyInput)

// Before patching ensure all pods are ready in workload
// cluster
By("waiting for workload cluster pods to be Running")
waitForPodListConditionInput := framework.WaitForPodListConditionInput{
Lister: workloadClient,
ListOptions: &client.ListOptions{Namespace: metav1.NamespaceSystem},
Condition: framework.PhasePodCondition(corev1.PodRunning),
}
framework.WaitForPodListCondition(ctx, waitForPodListConditionInput)

By("patching KubeadmConfigSpec etcd image tag in the kubeadmControlPlane")
patchHelper, err := patch.NewHelper(controlPlane, mgmtClient)
Expect(err).ToNot(HaveOccurred())
controlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd = v1beta1.Etcd{
Local: &v1beta1.LocalEtcd{
ImageMeta: v1beta1.ImageMeta{
// TODO: Ensure that the current version of etcd
// is not 3.4.3-0 or that k8s version is 1.16
// 3.4.3-0 is the etcd version meant for k8s 1.17.x
// k8s 1.16.x clusters ususally get deployed with etcd 3.3.x
ImageTag: "3.4.3-0",
},
},
}
Expect(patchHelper.Patch(ctx, controlPlane)).To(Succeed())

By("waiting for etcd pods to have the expected image tag")
lblSelector, err := labels.Parse("component=etcd")
Expect(err).ToNot(HaveOccurred())
opt := &client.ListOptions{LabelSelector: lblSelector}
waitForPodListConditionInput = framework.WaitForPodListConditionInput{
Lister: workloadClient,
ListOptions: opt,
Condition: framework.EtcdImageTagCondition("3.4.3-0", replicas),
}
framework.WaitForPodListCondition(ctx, waitForPodListConditionInput)
})

Specify("Full upgrade", func() {
By("upgrading the control plane object to a new version")
patchHelper, err := patch.NewHelper(controlPlane, mgmtClient)
Expand Down Expand Up @@ -449,12 +317,26 @@ func GenerateMachineDeployment(cluster *clusterv1.Cluster, replicas int32) (*clu
return machineDeployment, infraTemplate, bootstrap
}

type ClusterGenerator struct {
type clusterGenerator struct {
prefix string
counter int
}

func (c *ClusterGenerator) GenerateCluster(namespace string, replicas int32) (*clusterv1.Cluster, *infrav1.DockerCluster, *controlplanev1.KubeadmControlPlane, *infrav1.DockerMachineTemplate) {
generatedName := fmt.Sprintf("test-%d", c.counter)
func newClusterGenerator(name string) *clusterGenerator {
var prefix string
if len(name) != 0 {
prefix = fmt.Sprintf("test-%s-", name)
} else {
prefix = "test-"
}

return &clusterGenerator{
prefix: prefix,
}
}

func (c *clusterGenerator) GenerateCluster(namespace string, replicas int32) (*clusterv1.Cluster, *infrav1.DockerCluster, *controlplanev1.KubeadmControlPlane, *infrav1.DockerMachineTemplate) {
generatedName := fmt.Sprintf("%s%d", c.prefix, c.counter)
c.counter++
version := "v1.16.3"

Expand Down
Loading

0 comments on commit e977a76

Please sign in to comment.