Skip to content

Commit

Permalink
MachineConfig E2E test refactored (openshift#1963)
Browse files Browse the repository at this point in the history
* Initial approach, paralelism working fine, test stil failling

Signed-off-by: Juan Manuel Parrilla Madrid <[email protected]>

* Fixing MC test and renaming channel var for signaling end of test

Signed-off-by: Juan Manuel Parrilla Madrid <[email protected]>

* Adding changes based on PR Reviews

Signed-off-by: Juan Manuel Parrilla Madrid <[email protected]>

* Changing signaling approach for a testing framework native one

Signed-off-by: Juan Manuel Parrilla Madrid <[email protected]>

Signed-off-by: Juan Manuel Parrilla Madrid <[email protected]>
  • Loading branch information
jparrill authored Jan 4, 2023
1 parent 745b725 commit b5740e5
Show file tree
Hide file tree
Showing 3 changed files with 160 additions and 128 deletions.
1 change: 1 addition & 0 deletions test/e2e/nodepool_autorepair_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (

func testNodePoolAutoRepair(parentCtx context.Context, mgmtClient crclient.Client, hostedCluster *hyperv1.HostedCluster, hostedClusterClient crclient.Client, clusterOpts core.CreateOptions) func(t *testing.T) {
return func(t *testing.T) {
t.Parallel()
g := NewWithT(t)

ctx, cancel := context.WithCancel(parentCtx)
Expand Down
276 changes: 152 additions & 124 deletions test/e2e/nodepool_machineconfig_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,22 @@ import (
"context"
_ "embed"
"encoding/json"
"fmt"
"strings"
"testing"
"time"

. "github.com/onsi/gomega"

ignitionapi "github.com/coreos/ignition/v2/config/v3_2/types"
hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/cluster/core"
hyperapi "github.com/openshift/hypershift/support/api"
e2eutil "github.com/openshift/hypershift/test/e2e/util"
mcfgv1 "github.com/openshift/hypershift/thirdparty/machineconfigoperator/pkg/apis/machineconfiguration.openshift.io/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
Expand All @@ -29,147 +33,171 @@ import (
"sigs.k8s.io/yaml"
)

func TestNodepoolMachineconfigGetsRolledout(t *testing.T) {
t.Parallel()
g := NewWithT(t)

ctx, cancel := context.WithCancel(testContext)
defer cancel()

client, err := e2eutil.GetClient()
g.Expect(err).NotTo(HaveOccurred(), "failed to get k8s client")

clusterOpts := globalOpts.DefaultClusterOptions(t)
clusterOpts.ControlPlaneAvailabilityPolicy = string(hyperv1.SingleReplica)
clusterOpts.BeforeApply = func(o crclient.Object) {
nodePool, isNodepool := o.(*hyperv1.NodePool)
if !isNodepool {
return
func testNodepoolMachineconfigGetsRolledout(parentCtx context.Context, mgmtClient crclient.Client, hostedCluster *hyperv1.HostedCluster, hostedClusterClient crclient.Client, clusterOpts core.CreateOptions) func(t *testing.T) {
return func(t *testing.T) {
t.Parallel()
g := NewWithT(t)

ctx, cancel := context.WithCancel(parentCtx)
originalNP := hyperv1.NodePool{}
defer func() {
t.Log("Test: NodePool MachineConfig finished")
cancel()
}()

// List NodePools (should exists only one)
nodePools := &hyperv1.NodePoolList{}
err := mgmtClient.List(ctx, nodePools, &crclient.ListOptions{
Namespace: hostedCluster.Namespace,
})
g.Expect(err).NotTo(HaveOccurred(), "failed getting existant nodepools")
for _, nodePool := range nodePools.Items {
if !strings.Contains(nodePool.Name, "-test-") {
originalNP = nodePool
}
}
nodePool.Spec.Management.Replace = &hyperv1.ReplaceUpgrade{
Strategy: hyperv1.UpgradeStrategyRollingUpdate,
RollingUpdate: &hyperv1.RollingUpdate{
MaxUnavailable: func(v intstr.IntOrString) *intstr.IntOrString { return &v }(intstr.FromInt(0)),
MaxSurge: func(v intstr.IntOrString) *intstr.IntOrString { return &v }(intstr.FromInt(int(*nodePool.Spec.Replicas))),
g.Expect(originalNP.Name).NotTo(BeEmpty())
g.Expect(originalNP.Name).NotTo(ContainSubstring("test"))
awsNPInfo := originalNP.Spec.Platform.AWS

// Define a new Nodepool
nodePool := &hyperv1.NodePool{
TypeMeta: metav1.TypeMeta{
Kind: "NodePool",
APIVersion: hyperv1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: hostedCluster.Name + "-" + "test-machineconfig",
Namespace: hostedCluster.Namespace,
},
Spec: hyperv1.NodePoolSpec{
Management: hyperv1.NodePoolManagement{
UpgradeType: hyperv1.UpgradeTypeReplace,
AutoRepair: true,
Replace: &hyperv1.ReplaceUpgrade{
Strategy: hyperv1.UpgradeStrategyRollingUpdate,
RollingUpdate: &hyperv1.RollingUpdate{
MaxUnavailable: func(v intstr.IntOrString) *intstr.IntOrString { return &v }(intstr.FromInt(0)),
MaxSurge: func(v intstr.IntOrString) *intstr.IntOrString { return &v }(intstr.FromInt(int(oneReplicas))),
},
},
},
ClusterName: hostedCluster.Name,
Replicas: &oneReplicas,
Release: hyperv1.Release{
Image: hostedCluster.Spec.Release.Image,
},
Platform: hyperv1.NodePoolPlatform{
Type: hostedCluster.Spec.Platform.Type,
AWS: awsNPInfo,
},
},
}
}

hostedCluster := e2eutil.CreateCluster(t, ctx, client, &clusterOpts, globalOpts.Platform, globalOpts.ArtifactDir)

// Sanity check the cluster by waiting for the nodes to report ready
t.Logf("Waiting for guest client to become available")
guestClient := e2eutil.WaitForGuestClient(t, testContext, client, hostedCluster)

// Wait for Nodes to be Ready
numNodes := int32(globalOpts.configurableClusterOptions.NodePoolReplicas * len(clusterOpts.AWSPlatform.Zones))
e2eutil.WaitForNReadyNodes(t, testContext, guestClient, numNodes, hostedCluster.Spec.Platform.Type)

// Wait for the rollout to be complete
t.Logf("Waiting for cluster rollout. Image: %s", globalOpts.LatestReleaseImage)
e2eutil.WaitForImageRollout(t, testContext, client, guestClient, hostedCluster, globalOpts.LatestReleaseImage)
err = client.Get(testContext, crclient.ObjectKeyFromObject(hostedCluster), hostedCluster)
g.Expect(err).NotTo(HaveOccurred(), "failed to get hostedcluster")

ignitionConfig := ignitionapi.Config{
Ignition: ignitionapi.Ignition{
Version: "3.2.0",
},
Storage: ignitionapi.Storage{
Files: []ignitionapi.File{{
Node: ignitionapi.Node{Path: "/etc/custom-config"},
FileEmbedded1: ignitionapi.FileEmbedded1{Contents: ignitionapi.Resource{Source: utilpointer.String("data:,content%0A")}},
}},
},
}
serializedIgnitionConfig, err := json.Marshal(ignitionConfig)
if err != nil {
t.Fatalf("failed to serialize ignition config: %v", err)
}
machineConfig := &mcfgv1.MachineConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "custom",
Labels: map[string]string{"machineconfiguration.openshift.io/role": "worker"},
},
Spec: mcfgv1.MachineConfigSpec{Config: runtime.RawExtension{Raw: serializedIgnitionConfig}},
}
gvk, err := apiutil.GVKForObject(machineConfig, hyperapi.Scheme)
if err != nil {
t.Fatalf("failed to get typeinfo for %T from scheme: %v", machineConfig, err)
}
machineConfig.SetGroupVersionKind(gvk)
serializedMachineConfig, err := yaml.Marshal(machineConfig)
if err != nil {
t.Fatalf("failed to serialize machineConfig: %v", err)
}
machineConfigConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "custom-machine-config",
Namespace: hostedCluster.Namespace,
},
Data: map[string]string{"config": string(serializedMachineConfig)},
}
if err := client.Create(ctx, machineConfigConfigMap); err != nil {
t.Fatalf("failed to create configmap for custom machineconfig: %v", err)
}
// Create NodePool for current test
err = mgmtClient.Create(ctx, nodePool)
if err != nil {
if !errors.IsAlreadyExists(err) {
t.Fatalf("failed to create nodePool %s with Autorepair function: %v", nodePool.Name, err)
}
err = nodePoolRecreate(t, ctx, nodePool, mgmtClient)
g.Expect(err).NotTo(HaveOccurred(), "failed to Create the NodePool")
}
defer nodePoolScaleDownToZero(ctx, mgmtClient, *nodePool, t)

nodepools := &hyperv1.NodePoolList{}
if err := client.List(ctx, nodepools, crclient.InNamespace(hostedCluster.Namespace)); err != nil {
t.Fatalf("failed to list nodepools in namespace %s: %v", hostedCluster.Namespace, err)
}
numNodes := oneReplicas
t.Logf("Waiting for Nodes %d\n", numNodes)
nodes := e2eutil.WaitForNReadyNodesByNodePool(t, ctx, hostedClusterClient, numNodes, hostedCluster.Spec.Platform.Type, nodePool.Name)
t.Logf("Desired replicas available for nodePool: %v", nodePool.Name)

for _, nodepool := range nodepools.Items {
if nodepool.Spec.ClusterName != hostedCluster.Name {
continue
// Wait for the rollout to be complete
t.Logf("Waiting for cluster rollout. Image: %s", globalOpts.LatestReleaseImage)
e2eutil.WaitForImageRollout(t, ctx, mgmtClient, hostedClusterClient, hostedCluster, globalOpts.LatestReleaseImage)

// MachineConfig Actions
ignitionConfig := ignitionapi.Config{
Ignition: ignitionapi.Ignition{
Version: "3.2.0",
},
Storage: ignitionapi.Storage{
Files: []ignitionapi.File{{
Node: ignitionapi.Node{Path: "/etc/custom-config"},
FileEmbedded1: ignitionapi.FileEmbedded1{Contents: ignitionapi.Resource{Source: utilpointer.String("data:,content%0A")}},
}},
},
}
np := nodepool.DeepCopy()
nodepool.Spec.Config = append(nodepool.Spec.Config, corev1.LocalObjectReference{Name: machineConfigConfigMap.Name})
if err := client.Patch(ctx, &nodepool, crclient.MergeFrom(np)); err != nil {
t.Fatalf("failed to update nodepool %s after adding machineconfig: %v", nodepool.Name, err)
serializedIgnitionConfig, err := json.Marshal(ignitionConfig)
if err != nil {
t.Fatalf("failed to serialize ignition config: %v", err)
}
}

ds := machineConfigUpdatedVerificationDS.DeepCopy()
if err := guestClient.Create(ctx, ds); err != nil {
t.Fatalf("failed to create %s DaemonSet in guestcluster: %v", ds.Name, err)
}

t.Logf("waiting for rollout of updated nodepools")
err = wait.PollImmediateWithContext(ctx, 5*time.Second, 15*time.Minute, func(ctx context.Context) (bool, error) {
if ctx.Err() != nil {
return false, err
machineConfig := &mcfgv1.MachineConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "custom",
Labels: map[string]string{"machineconfiguration.openshift.io/role": "worker"},
},
Spec: mcfgv1.MachineConfigSpec{Config: runtime.RawExtension{Raw: serializedIgnitionConfig}},
}
gvk, err := apiutil.GVKForObject(machineConfig, hyperapi.Scheme)
if err != nil {
t.Fatalf("failed to get typeinfo for %T from scheme: %v", machineConfig, err)
}
machineConfig.SetGroupVersionKind(gvk)
serializedMachineConfig, err := yaml.Marshal(machineConfig)
if err != nil {
t.Fatalf("failed to serialize machineConfig: %v", err)
}
machineConfigConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "custom-machine-config",
Namespace: hostedCluster.Namespace,
},
Data: map[string]string{"config": string(serializedMachineConfig)},
}
pods := &corev1.PodList{}
if err := guestClient.List(ctx, pods, crclient.InNamespace(ds.Namespace), crclient.MatchingLabels(ds.Spec.Selector.MatchLabels)); err != nil {
t.Logf("WARNING: failed to list pods, will retry: %v", err)
return false, nil
if err := mgmtClient.Create(ctx, machineConfigConfigMap); err != nil {
t.Fatalf("failed to create configmap for custom machineconfig: %v", err)
}
nodes := &corev1.NodeList{}
if err := guestClient.List(ctx, nodes); err != nil {
t.Logf("WARNING: failed to list nodes, will retry: %v", err)
return false, nil

np := nodePool.DeepCopy()
nodePool.Spec.Config = append(nodePool.Spec.Config, corev1.LocalObjectReference{Name: machineConfigConfigMap.Name})
if err := mgmtClient.Patch(ctx, nodePool, crclient.MergeFrom(np)); err != nil {
t.Fatalf("failed to update nodepool %s after adding machineconfig: %v", nodePool.Name, err)
}
if len(pods.Items) != len(nodes.Items) {
return false, nil

ds := machineConfigUpdatedVerificationDS.DeepCopy()
if err := hostedClusterClient.Create(ctx, ds); err != nil {
t.Fatalf("failed to create %s DaemonSet in guestcluster: %v", ds.Name, err)
}

for _, pod := range pods.Items {
if !isPodReady(&pod) {
t.Logf("waiting for rollout of updated nodepools")
err = wait.PollImmediateWithContext(ctx, 5*time.Second, 15*time.Minute, func(ctx context.Context) (bool, error) {
if ctx.Err() != nil {
return false, err
}
pods := &corev1.PodList{}
if err := hostedClusterClient.List(ctx, pods, crclient.InNamespace(ds.Namespace), crclient.MatchingLabels(ds.Spec.Selector.MatchLabels)); err != nil {
t.Logf("WARNING: failed to list pods, will retry: %v", err)
return false, nil
}
}

return true, nil
})
if err != nil {
t.Fatalf("failed waiting for all pods in the machine config update verification DS to be ready: %v", err)
}
if len(pods.Items) != len(nodes) {
return false, nil
}

e2eutil.EnsureNodeCountMatchesNodePoolReplicas(t, testContext, client, guestClient, hostedCluster.Namespace)
e2eutil.EnsureNoCrashingPods(t, ctx, client, hostedCluster)
e2eutil.EnsureAllContainersHavePullPolicyIfNotPresent(t, ctx, client, hostedCluster)
e2eutil.EnsureHCPContainersHaveResourceRequests(t, ctx, client, hostedCluster)
e2eutil.EnsureNoPodsWithTooHighPriority(t, ctx, client, hostedCluster)
for _, pod := range pods.Items {
if !isPodReady(&pod) {
return false, nil
}
}

return true, nil
})
g.Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed waiting for all pods in the machine config update verification DS to be ready: %v", err))
g.Expect(nodePool.Status.Replicas).To(BeEquivalentTo(len(nodes)))
e2eutil.EnsureNoCrashingPods(t, ctx, mgmtClient, hostedCluster)
e2eutil.EnsureAllContainersHavePullPolicyIfNotPresent(t, ctx, mgmtClient, hostedCluster)
e2eutil.EnsureHCPContainersHaveResourceRequests(t, ctx, mgmtClient, hostedCluster)
e2eutil.EnsureNoPodsWithTooHighPriority(t, ctx, mgmtClient, hostedCluster)
}
}

//go:embed nodepool_machineconfig_verification_ds.yaml
Expand Down
11 changes: 7 additions & 4 deletions test/e2e/nodepool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,15 @@ var (
func TestNodePool(t *testing.T) {
t.Parallel()
g := NewWithT(t)

ctx, cancel := context.WithCancel(testContext)

defer func() {
t.Log("Test: NodePool finished")
cancel()
}()

// Set of tests
// Each test should have their own NodePool
clusterOpts := globalOpts.DefaultClusterOptions(t)

mgmtClient, err := e2eutil.GetClient()
Expand All @@ -44,9 +46,10 @@ func TestNodePool(t *testing.T) {
guestCluster := e2eutil.CreateCluster(t, ctx, mgmtClient, &clusterOpts, globalOpts.Platform, globalOpts.ArtifactDir)
guestClient := e2eutil.WaitForGuestClient(t, ctx, mgmtClient, guestCluster)

// Set of tests
// Each test should have their own NodePool
t.Run("TestNodePoolAutoRepair", testNodePoolAutoRepair(ctx, mgmtClient, guestCluster, guestClient, clusterOpts))
t.Run("Refactored", func(t *testing.T) {
t.Run("TestNodePoolAutoRepair", testNodePoolAutoRepair(ctx, mgmtClient, guestCluster, guestClient, clusterOpts))
t.Run("TestNodepoolMachineconfigGetsRolledout", testNodepoolMachineconfigGetsRolledout(ctx, mgmtClient, guestCluster, guestClient, clusterOpts))
})
}

// nodePoolScaleDownToZero function will scaleDown the nodePool created for the current tests
Expand Down

0 comments on commit b5740e5

Please sign in to comment.