Skip to content

Commit

Permalink
Add blocking end-to-end tests for lifecycle hooks
Browse files Browse the repository at this point in the history
Signed-off-by: killianmuldoon <[email protected]>
  • Loading branch information
killianmuldoon committed Jun 29, 2022
1 parent 23cd3fe commit e6a5c4d
Show file tree
Hide file tree
Showing 4 changed files with 176 additions and 52 deletions.
134 changes: 119 additions & 15 deletions test/e2e/cluster_upgrade_runtimesdk.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,17 @@ import (
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
)

// clusterUpgradeWithRuntimeSDKSpecInput is the input for clusterUpgradeWithRuntimeSDKSpec.
Expand Down Expand Up @@ -113,7 +117,7 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
workerMachineCount = *input.WorkerMachineCount
}

// Setup a Namespace where to host objects for this spec and create a watcher for the Namespace events.
// Set up a Namespace where to host objects for this spec and create a watcher for the Namespace events.
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
})
Expand Down Expand Up @@ -156,6 +160,9 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
ControlPlaneMachineCount: pointer.Int64Ptr(controlPlaneMachineCount),
WorkerMachineCount: pointer.Int64Ptr(workerMachineCount),
},
PreWaitForCluster: func() {
beforeClusterCreateTestHandler(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, input.E2EConfig.GetIntervals(specName, "wait-cluster"))
},
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
Expand All @@ -176,6 +183,12 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
WaitForKubeProxyUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
WaitForDNSUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
WaitForEtcdUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
PreWaitForControlPlaneToBeUpgraded: func() {
beforeClusterUpgradeTestHandler(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"))
},
PreWaitForMachineDeploymentToBeUpgraded: func() {
afterControlPlaneUpgradeTestHandler(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"))
},
})

// Only attempt to upgrade MachinePools if they were provided in the template.
Expand All @@ -201,13 +214,13 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
})

By("Checking all lifecycle hooks have been called")
// Assert that each hook passed to this function is marked as "true" in the response configmap
// Assert that each hook has been called and returned "Success" during the test.
err = checkLifecycleHooks(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, map[string]string{
"BeforeClusterCreate": "",
"BeforeClusterUpgrade": "",
"AfterControlPlaneInitialized": "",
"AfterControlPlaneUpgrade": "",
"AfterClusterUpgrade": "",
"BeforeClusterCreate": "Success",
"BeforeClusterUpgrade": "Success",
"AfterControlPlaneInitialized": "Success",
"AfterControlPlaneUpgrade": "Success",
"AfterClusterUpgrade": "Success",
})
Expect(err).ToNot(HaveOccurred(), "Lifecycle hook calls were not as expected")

Expand Down Expand Up @@ -266,26 +279,117 @@ func responsesConfigMap(name string, namespace *corev1.Namespace) *corev1.Config
Name: fmt.Sprintf("%s-hookresponses", name),
Namespace: namespace.Name,
},
// Every response contain only Status:Success. The test checks whether each handler has been called at least once.
// Set the initial preloadedResponses for each of the tested hooks.
Data: map[string]string{
"BeforeClusterCreate-response": `{"Status": "Success"}`,
"BeforeClusterUpgrade-response": `{"Status": "Success"}`,
"AfterControlPlaneInitialized-response": `{"Status": "Success"}`,
"AfterControlPlaneUpgrade-response": `{"Status": "Success"}`,
"AfterClusterUpgrade-response": `{"Status": "Success"}`,
// Blocking hooks are set to Status:Failure initially. These will be changed during the test.
"BeforeClusterCreate-preloadedResponse": `{"Status": "Failure", "Message": "hook failed"}`,
"BeforeClusterUpgrade-preloadedResponse": `{"Status": "Failure", "Message": "hook failed"}`,
"AfterControlPlaneUpgrade-preloadedResponse": `{"Status": "Failure", "Message": "hook failed"}`,

// Non-blocking hooks are set to Status:Success.
"AfterControlPlaneInitialized-preloadedResponse": `{"Status": "Success"}`,
"AfterClusterUpgrade-preloadedResponse": `{"Status": "Success"}`,
},
}
}

// Check that each hook in hooks has been called at least once by checking if its actualResponseStatus is in the hook response configmap.
// If the provided hooks have both keys and values check that the values match those in the hook response configmap.
func checkLifecycleHooks(ctx context.Context, c client.Client, namespace string, clusterName string, hooks map[string]string) error {
configMap := &corev1.ConfigMap{}
configMapName := clusterName + "-hookresponses"
err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap)
Expect(err).ToNot(HaveOccurred(), "Failed to get the hook response configmap")
for hook := range hooks {
if _, ok := configMap.Data[hook+"-called"]; !ok {
for hook, expected := range hooks {
v, ok := configMap.Data[hook+"-actualResponseStatus"]
if !ok {
return errors.Errorf("hook %s call not recorded in configMap %s/%s", hook, namespace, configMapName)
}
if expected != "" && expected != v {
return errors.Errorf("hook %s was expected to be %s in configMap got %s", expected, hook, v)
}
}
return nil
}

// beforeClusterCreateTestHandler provides an unblocked function which returns true if the Cluster has entered ClusterPhaseProvisioned.
func beforeClusterCreateTestHandler(ctx context.Context, c client.Client, namespace, clusterName string, intervals []interface{}) {
runtimeHookTestHandler(ctx, c, namespace, clusterName, "BeforeClusterCreate", func() bool {
// This hook should block the Cluster from entering the "Provisioned" state.
cluster := &clusterv1.Cluster{}
Expect(c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, cluster)).To(Succeed())
return cluster.Status.Phase == string(clusterv1.ClusterPhaseProvisioned)
}, intervals)
}

// beforeClusterUpgradeTestHandler provides an unblocked function which returns true if the Cluster has controlplanev1.RollingUpdateInProgressReason in its
// ReadyCondition.
func beforeClusterUpgradeTestHandler(ctx context.Context, c client.Client, namespace, clusterName, version string, intervals []interface{}) {
runtimeHookTestHandler(ctx, c, namespace, clusterName, "BeforeClusterUpgrade", func() bool {
cluster := &clusterv1.Cluster{}
var unblocked bool

// First ensure the Cluster topology has been updated to the target Kubernetes Version.
Eventually(func() bool {
Expect(c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, cluster)).To(Succeed())
return cluster.Spec.Topology.Version == version
}).Should(BeTrue(), "BeforeClusterUpgrade blocking condition false: Cluster topology has not been updated to the target Kubernetes Version")

// Check if the Cluster is showing the RollingUpdateInProgress condition reason. If it has the update process is unblocked.
if conditions.IsFalse(cluster, clusterv1.ReadyCondition) &&
conditions.GetReason(cluster, clusterv1.ReadyCondition) == controlplanev1.RollingUpdateInProgressReason {
unblocked = true
}
return unblocked
}, intervals)
}

// afterControlPlaneUpgradeTestHandler provides an unblocked function which returns true if any MachineDeployment in the Cluster
// has upgraded to the target Kubernetes version.
func afterControlPlaneUpgradeTestHandler(ctx context.Context, c client.Client, namespace, clusterName, version string, intervals []interface{}) {
runtimeHookTestHandler(ctx, c, namespace, clusterName, "AfterControlPlaneUpgrade", func() bool {
var unblocked bool
mds := &clusterv1.MachineDeploymentList{}
Expect(c.List(ctx, mds, client.MatchingLabels{
clusterv1.ClusterLabelName: clusterName,
clusterv1.ClusterTopologyOwnedLabel: "",
})).To(Succeed())

// If any of the MachineDeployments have the target Kubernetes Version, the hook is unblocked.
for _, md := range mds.Items {
if *md.Spec.Template.Spec.Version == version {
unblocked = true
}
}
return unblocked
}, intervals)
}

func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clusterName, hookName string, condition func() bool, intervals []interface{}) {
// First check that the LifecycleHook has been called at least once.
Eventually(func() bool {
err := checkLifecycleHooks(ctx, c, namespace, clusterName, map[string]string{hookName: ""})
return err != nil
}, intervals...).Should(BeTrue(), "%s has not been called", hookName)

// condition should consistently be false as the Runtime hook is returning "Failure".
Consistently(func() bool {
return condition()
}, intervals...).Should(BeFalse(), fmt.Sprintf("%s hook blocking condition succeeded before unblocking", hookName))

// Patch the ConfigMap to set the hook response to "Success".
By(fmt.Sprintf("Setting %s response to Status:Success", hookName))

configMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: clusterName + "-hookresponses", Namespace: namespace}}
Expect(c.Get(ctx, util.ObjectKey(configMap), configMap)).To(Succeed())
patch := client.RawPatch(types.MergePatchType,
[]byte(fmt.Sprintf(`{"data":{"%s-preloadedResponse":%s}}`, hookName, "\"{\\\"Status\\\": \\\"Success\\\"}\"")))
err := c.Patch(ctx, configMap, patch)
Expect(err).ToNot(HaveOccurred())

// Expect the Hook to pass, setting the condition to true before the timeout ends.
Eventually(func() bool {
return condition()
}, intervals...).Should(BeTrue(),
fmt.Sprintf("%s hook blocking condition did not succeed after unblocking", hookName))
}
48 changes: 23 additions & 25 deletions test/extension/handlers/lifecycle/handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,18 +42,15 @@ func (h *Handler) DoBeforeClusterCreate(ctx context.Context, request *runtimehoo
log := ctrl.LoggerFrom(ctx)
log.Info("BeforeClusterCreate is called")
cluster := request.Cluster
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate); err != nil {

if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
log.Info("BeforeClusterCreate has been recorded in configmap", "cm", cluster.Name+"-hookresponses")

err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate, response)
if err != nil {
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
}

Expand All @@ -62,16 +59,16 @@ func (h *Handler) DoBeforeClusterUpgrade(ctx context.Context, request *runtimeho
log := ctrl.LoggerFrom(ctx)
log.Info("BeforeClusterUpgrade is called")
cluster := request.Cluster
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade); err != nil {

if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade, response)
if err != nil {

if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
}

Expand All @@ -80,16 +77,16 @@ func (h *Handler) DoAfterControlPlaneInitialized(ctx context.Context, request *r
log := ctrl.LoggerFrom(ctx)
log.Info("AfterControlPlaneInitialized is called")
cluster := request.Cluster
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized); err != nil {

if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized, response)
if err != nil {

if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
}

Expand All @@ -98,16 +95,16 @@ func (h *Handler) DoAfterControlPlaneUpgrade(ctx context.Context, request *runti
log := ctrl.LoggerFrom(ctx)
log.Info("AfterControlPlaneUpgrade is called")
cluster := request.Cluster
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade); err != nil {

if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade, response)
if err != nil {

if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
}

Expand All @@ -116,16 +113,16 @@ func (h *Handler) DoAfterClusterUpgrade(ctx context.Context, request *runtimehoo
log := ctrl.LoggerFrom(ctx)
log.Info("AfterClusterUpgrade is called")
cluster := request.Cluster
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade); err != nil {

if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade, response)
if err != nil {

if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
}

Expand All @@ -136,22 +133,23 @@ func (h *Handler) readResponseFromConfigMap(ctx context.Context, name, namespace
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap); err != nil {
return errors.Wrapf(err, "failed to read the ConfigMap %s/%s", namespace, configMapName)
}
if err := yaml.Unmarshal([]byte(configMap.Data[hookName+"-response"]), response); err != nil {
if err := yaml.Unmarshal([]byte(configMap.Data[hookName+"-preloadedResponse"]), response); err != nil {
return errors.Wrapf(err, "failed to read %q response information from ConfigMap", hook)
}
return nil
}

func (h *Handler) recordCallInConfigMap(ctx context.Context, name, namespace string, hook runtimecatalog.Hook) error {
func (h *Handler) recordCallInConfigMap(ctx context.Context, name, namespace string, hook runtimecatalog.Hook, response runtimehooksv1.ResponseObject) error {
hookName := runtimecatalog.HookName(hook)
configMap := &corev1.ConfigMap{}
configMapName := name + "-hookresponses"
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap); err != nil {
return errors.Wrapf(err, "failed to read the ConfigMap %s/%s", namespace, configMapName)
}

// Patch the actualResponseStatus with the returned value
patch := client.RawPatch(types.MergePatchType,
[]byte(fmt.Sprintf(`{"data":{"%s-called":"true"}}`, hookName)))
[]byte(fmt.Sprintf(`{"data":{"%s-actualResponseStatus":"%s"}}`, hookName, response.GetStatus()))) //nolint:gocritic
if err := h.Client.Patch(ctx, configMap, patch); err != nil {
return errors.Wrapf(err, "failed to update the ConfigMap %s/%s", namespace, configMapName)
}
Expand Down
Loading

0 comments on commit e6a5c4d

Please sign in to comment.