From 2aabed531b4ed2567c5443498728add017f15ac8 Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Mon, 6 May 2024 18:20:42 +0200 Subject: [PATCH] test: filter cluster-wide objects asserted in ResourceVersion tests to exclude objects of parallel tests --- cmd/clusterctl/client/cluster/ownergraph.go | 17 +++++++-- test/e2e/cluster_upgrade_runtimesdk.go | 9 +++++ test/e2e/cluster_upgrade_runtimesdk_test.go | 7 ++++ test/e2e/quick_start_test.go | 16 ++++---- test/framework/finalizers_helpers.go | 20 +++++----- test/framework/ownerreference_helpers.go | 42 ++++++++++++++------- test/framework/resourceversion_helpers.go | 10 ++--- 7 files changed, 82 insertions(+), 39 deletions(-) diff --git a/cmd/clusterctl/client/cluster/ownergraph.go b/cmd/clusterctl/client/cluster/ownergraph.go index e3e2553e96ce..d5f7286eca7b 100644 --- a/cmd/clusterctl/client/cluster/ownergraph.go +++ b/cmd/clusterctl/client/cluster/ownergraph.go @@ -39,11 +39,18 @@ type OwnerGraphNode struct { Owners []metav1.OwnerReference } +// GetOwnerGraphFilterFunction allows filtering the objects returned by GetOwnerGraph. +// The function has to return true for objects which should be kept. +// NOTE: this function signature is exposed to allow implementation of E2E tests; there is +// no guarantee about the stability of this API. Using this test with providers may require +// a custom implementation of this function, or the OwnerGraph it returns. +type GetOwnerGraphFilterFunction func(u unstructured.Unstructured) bool + // GetOwnerGraph returns a graph with all the objects considered by clusterctl move as nodes and the OwnerReference relationship between those objects as edges. // NOTE: this data structure is exposed to allow implementation of E2E tests verifying that CAPI can properly rebuild its // own owner references; there is no guarantee about the stability of this API. Using this test with providers may require // a custom implementation of this function, or the OwnerGraph it returns. -func GetOwnerGraph(ctx context.Context, namespace, kubeconfigPath string) (OwnerGraph, error) { +func GetOwnerGraph(ctx context.Context, namespace, kubeconfigPath string, filterFn GetOwnerGraphFilterFunction) (OwnerGraph, error) { p := newProxy(Kubeconfig{Path: kubeconfigPath, Context: ""}) invClient := newInventoryClient(p, nil) @@ -57,14 +64,14 @@ func GetOwnerGraph(ctx context.Context, namespace, kubeconfigPath string) (Owner // graph.Discovery can not be used here as it will use the latest APIVersion for ownerReferences - not those // present in the object `metadata.ownerReferences`. - owners, err := discoverOwnerGraph(ctx, namespace, graph) + owners, err := discoverOwnerGraph(ctx, namespace, graph, filterFn) if err != nil { return OwnerGraph{}, errors.Wrap(err, "failed to discovery ownerGraph types") } return owners, nil } -func discoverOwnerGraph(ctx context.Context, namespace string, o *objectGraph) (OwnerGraph, error) { +func discoverOwnerGraph(ctx context.Context, namespace string, o *objectGraph, filterFn GetOwnerGraphFilterFunction) (OwnerGraph, error) { selectors := []client.ListOption{} if namespace != "" { selectors = append(selectors, client.InNamespace(namespace)) @@ -102,6 +109,10 @@ func discoverOwnerGraph(ctx context.Context, namespace string, o *objectGraph) ( } } for _, obj := range objList.Items { + // Exclude the objects via the filter function. + if filterFn != nil && !filterFn(obj) { + continue + } // Exclude the kube-root-ca.crt ConfigMap from the owner graph. if obj.GetKind() == "ConfigMap" && obj.GetName() == "kube-root-ca.crt" { continue diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index a8290992e107..19c41ad797a6 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -85,6 +85,10 @@ type clusterUpgradeWithRuntimeSDKSpecInput struct { // Allows to inject a function to be run after test namespace is created. // If not specified, this is a no-op. PostNamespaceCreated func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace string) + + // Allows to inject a function to be run after the cluster is upgraded. + // If not specified, this is a no-op. + PostMachinesProvisioned func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace, workloadClusterName string) } // clusterUpgradeWithRuntimeSDKSpec implements a spec that upgrades a cluster and runs the Kubernetes conformance suite. @@ -260,6 +264,11 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"), }) + if input.PostMachinesProvisioned != nil { + log.Logf("Calling PostMachinesProvisioned for cluster %s", klog.KRef(namespace.Name, clusterResources.Cluster.Name)) + input.PostMachinesProvisioned(input.BootstrapClusterProxy, namespace.Name, clusterName) + } + By("Dumping resources and deleting the workload cluster; deletion waits for BeforeClusterDeleteHook to gate the operation") dumpAndDeleteCluster(ctx, input.BootstrapClusterProxy, namespace.Name, clusterName, input.ArtifactFolder) diff --git a/test/e2e/cluster_upgrade_runtimesdk_test.go b/test/e2e/cluster_upgrade_runtimesdk_test.go index 9f54da50b9b6..27c870ccce2f 100644 --- a/test/e2e/cluster_upgrade_runtimesdk_test.go +++ b/test/e2e/cluster_upgrade_runtimesdk_test.go @@ -24,6 +24,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/utils/ptr" + + "sigs.k8s.io/cluster-api/test/framework" ) var _ = Describe("When upgrading a workload cluster using ClusterClass with RuntimeSDK [ClusterClass]", func() { @@ -41,6 +43,11 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with Runt ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, InfrastructureProvider: ptr.To("docker"), + PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { + // This check ensures that the resourceVersions are stable, i.e. it verifies there are no + // continuous reconciles when everything should be stable. + framework.ValidateResourceVersionStable(ctx, proxy, namespace, framework.GetOwnerGraphFilterByClusterNameFunc(clusterName)) + }, // "upgrades" is the same as the "topology" flavor but with an additional MachinePool. Flavor: ptr.To("upgrades-runtimesdk"), } diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go index d86179823c7a..7a51de0ba89a 100644 --- a/test/e2e/quick_start_test.go +++ b/test/e2e/quick_start_test.go @@ -39,7 +39,7 @@ var _ = Describe("When following the Cluster API quick-start", func() { InfrastructureProvider: ptr.To("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. - framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, + framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, framework.GetOwnerGraphFilterByClusterNameFunc(clusterName), framework.CoreOwnerReferenceAssertion, framework.ExpOwnerReferenceAssertions, framework.DockerInfraOwnerReferenceAssertions, @@ -48,7 +48,7 @@ var _ = Describe("When following the Cluster API quick-start", func() { framework.KubernetesReferenceAssertions, ) // This check ensures that owner references are correctly updated to the correct apiVersion. - framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, + framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, framework.GetOwnerGraphFilterByClusterNameFunc(clusterName), framework.CoreOwnerReferenceAssertion, framework.ExpOwnerReferenceAssertions, framework.DockerInfraOwnerReferenceAssertions, @@ -57,7 +57,7 @@ var _ = Describe("When following the Cluster API quick-start", func() { framework.KubernetesReferenceAssertions, ) // This check ensures that finalizers are resilient - i.e. correctly re-reconciled - when removed. - framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, + framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, framework.GetOwnerGraphFilterByClusterNameFunc(clusterName), framework.CoreFinalizersAssertion, framework.KubeadmControlPlaneFinalizersAssertion, framework.ExpFinalizersAssertion, @@ -65,7 +65,7 @@ var _ = Describe("When following the Cluster API quick-start", func() { ) // This check ensures that the resourceVersions are stable, i.e. it verifies there are no // continuous reconciles when everything should be stable. - framework.ValidateResourceVersionStable(ctx, proxy, namespace) + framework.ValidateResourceVersionStable(ctx, proxy, namespace, framework.GetOwnerGraphFilterByClusterNameFunc(clusterName)) }, } }) @@ -83,7 +83,7 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ InfrastructureProvider: ptr.To("docker"), // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { - framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, + framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, framework.GetOwnerGraphFilterByClusterNameFunc(clusterName), framework.CoreOwnerReferenceAssertion, framework.ExpOwnerReferenceAssertions, framework.DockerInfraOwnerReferenceAssertions, @@ -92,7 +92,7 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ framework.KubernetesReferenceAssertions, ) // This check ensures that owner references are correctly updated to the correct apiVersion. - framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, + framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, framework.GetOwnerGraphFilterByClusterNameFunc(clusterName), framework.CoreOwnerReferenceAssertion, framework.ExpOwnerReferenceAssertions, framework.DockerInfraOwnerReferenceAssertions, @@ -101,7 +101,7 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ framework.KubernetesReferenceAssertions, ) // This check ensures that finalizers are resilient - i.e. correctly re-reconciled - when removed. - framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, + framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, framework.GetOwnerGraphFilterByClusterNameFunc(clusterName), framework.CoreFinalizersAssertion, framework.KubeadmControlPlaneFinalizersAssertion, framework.ExpFinalizersAssertion, @@ -109,7 +109,7 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ ) // This check ensures that the resourceVersions are stable, i.e. it verifies there are no // continuous reconciles when everything should be stable. - framework.ValidateResourceVersionStable(ctx, proxy, namespace) + framework.ValidateResourceVersionStable(ctx, proxy, namespace, framework.GetOwnerGraphFilterByClusterNameFunc(clusterName)) }, } }) diff --git a/test/framework/finalizers_helpers.go b/test/framework/finalizers_helpers.go index cdc544769a92..568853364ca3 100644 --- a/test/framework/finalizers_helpers.go +++ b/test/framework/finalizers_helpers.go @@ -64,13 +64,13 @@ var KubeadmControlPlaneFinalizersAssertion = map[string][]string{ } // ValidateFinalizersResilience checks that expected finalizers are in place, deletes them, and verifies that expected finalizers are properly added again. -func ValidateFinalizersResilience(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, finalizerAssertions ...map[string][]string) { +func ValidateFinalizersResilience(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, finalizerAssertions ...map[string][]string) { clusterKey := client.ObjectKey{Namespace: namespace, Name: clusterName} allFinalizerAssertions, err := concatenateFinalizerAssertions(finalizerAssertions...) Expect(err).ToNot(HaveOccurred()) // Collect all objects where finalizers were initially set - objectsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace, allFinalizerAssertions) + objectsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace, allFinalizerAssertions, ownerGraphFilterFunction) // Setting the paused property on the Cluster resource will pause reconciliations, thereby having no effect on Finalizers. // This also makes debugging easier. @@ -79,18 +79,18 @@ func ValidateFinalizersResilience(ctx context.Context, proxy ClusterProxy, names // We are testing the worst-case scenario, i.e. all finalizers are deleted. // Once all Clusters are paused remove all the Finalizers from all objects in the graph. // The reconciliation loop should be able to recover from this, by adding the required Finalizers back. - removeFinalizers(ctx, proxy, namespace) + removeFinalizers(ctx, proxy, namespace, ownerGraphFilterFunction) // Unpause the cluster. setClusterPause(ctx, proxy.GetClient(), clusterKey, false) // Check that the Finalizers are as expected after further reconciliations. - assertFinalizersExist(ctx, proxy, namespace, objectsWithFinalizers, allFinalizerAssertions) + assertFinalizersExist(ctx, proxy, namespace, objectsWithFinalizers, allFinalizerAssertions, ownerGraphFilterFunction) } // removeFinalizers removes all Finalizers from objects in the owner graph. -func removeFinalizers(ctx context.Context, proxy ClusterProxy, namespace string) { - graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath()) +func removeFinalizers(ctx context.Context, proxy ClusterProxy, namespace string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) { + graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction) Expect(err).ToNot(HaveOccurred()) for _, object := range graph { ref := object.Object @@ -107,8 +107,8 @@ func removeFinalizers(ctx context.Context, proxy ClusterProxy, namespace string) } } -func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace string, allFinalizerAssertions map[string][]string) map[string]*unstructured.Unstructured { - graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath()) +func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace string, allFinalizerAssertions map[string][]string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) map[string]*unstructured.Unstructured { + graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction) Expect(err).ToNot(HaveOccurred()) objsWithFinalizers := map[string]*unstructured.Unstructured{} @@ -134,10 +134,10 @@ func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace } // assertFinalizersExist ensures that current Finalizers match those in the initialObjectsWithFinalizers. -func assertFinalizersExist(ctx context.Context, proxy ClusterProxy, namespace string, initialObjsWithFinalizers map[string]*unstructured.Unstructured, allFinalizerAssertions map[string][]string) { +func assertFinalizersExist(ctx context.Context, proxy ClusterProxy, namespace string, initialObjsWithFinalizers map[string]*unstructured.Unstructured, allFinalizerAssertions map[string][]string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) { Eventually(func() error { var allErrs []error - finalObjsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace, allFinalizerAssertions) + finalObjsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace, allFinalizerAssertions, ownerGraphFilterFunction) for objKindNamespacedName, obj := range initialObjsWithFinalizers { // verify if finalizers for this resource were set on reconcile diff --git a/test/framework/ownerreference_helpers.go b/test/framework/ownerreference_helpers.go index a755ce6a9a0e..b81c90db3173 100644 --- a/test/framework/ownerreference_helpers.go +++ b/test/framework/ownerreference_helpers.go @@ -45,14 +45,14 @@ import ( ) // ValidateOwnerReferencesOnUpdate checks that expected owner references are updated to the correct apiVersion. -func ValidateOwnerReferencesOnUpdate(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, assertFuncs ...map[string]func(reference []metav1.OwnerReference) error) { +func ValidateOwnerReferencesOnUpdate(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, assertFuncs ...map[string]func(reference []metav1.OwnerReference) error) { clusterKey := client.ObjectKey{Namespace: namespace, Name: clusterName} // Pause the cluster. setClusterPause(ctx, proxy.GetClient(), clusterKey, true) // Change the version of the OwnerReferences on each object in the Graph to "v1alpha1" - changeOwnerReferencesAPIVersion(ctx, proxy, namespace) + changeOwnerReferencesAPIVersion(ctx, proxy, namespace, ownerGraphFilterFunction) // Unpause the cluster. setClusterPause(ctx, proxy.GetClient(), clusterKey, false) @@ -66,13 +66,13 @@ func ValidateOwnerReferencesOnUpdate(ctx context.Context, proxy ClusterProxy, na forceClusterResourceSetReconcile(ctx, proxy.GetClient(), namespace) // Check that the ownerReferences have updated their apiVersions to current versions after reconciliation. - AssertOwnerReferences(namespace, proxy.GetKubeconfigPath(), assertFuncs...) + AssertOwnerReferences(namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction, assertFuncs...) } // ValidateOwnerReferencesResilience checks that expected owner references are in place, deletes them, and verifies that expect owner references are properly rebuilt. -func ValidateOwnerReferencesResilience(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, assertFuncs ...map[string]func(reference []metav1.OwnerReference) error) { +func ValidateOwnerReferencesResilience(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, assertFuncs ...map[string]func(reference []metav1.OwnerReference) error) { // Check that the ownerReferences are as expected on the first iteration. - AssertOwnerReferences(namespace, proxy.GetKubeconfigPath(), assertFuncs...) + AssertOwnerReferences(namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction, assertFuncs...) clusterKey := client.ObjectKey{Namespace: namespace, Name: clusterName} @@ -88,7 +88,7 @@ func ValidateOwnerReferencesResilience(ctx context.Context, proxy ClusterProxy, setClusterPause(ctx, proxy.GetClient(), clusterKey, true) // Once all Clusters are paused remove the OwnerReference from all objects in the graph. - removeOwnerReferences(ctx, proxy, namespace) + removeOwnerReferences(ctx, proxy, namespace, ownerGraphFilterFunction) // Unpause the cluster. setClusterPause(ctx, proxy.GetClient(), clusterKey, false) @@ -98,10 +98,10 @@ func ValidateOwnerReferencesResilience(ctx context.Context, proxy ClusterProxy, forceClusterClassReconcile(ctx, proxy.GetClient(), clusterKey) // Check that the ownerReferences are as expected after additional reconciliations. - AssertOwnerReferences(namespace, proxy.GetKubeconfigPath(), assertFuncs...) + AssertOwnerReferences(namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction, assertFuncs...) } -func AssertOwnerReferences(namespace, kubeconfigPath string, assertFuncs ...map[string]func(reference []metav1.OwnerReference) error) { +func AssertOwnerReferences(namespace, kubeconfigPath string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, assertFuncs ...map[string]func(reference []metav1.OwnerReference) error) { allAssertFuncs := map[string]func(reference []metav1.OwnerReference) error{} for _, m := range assertFuncs { for k, v := range m { @@ -112,7 +112,7 @@ func AssertOwnerReferences(namespace, kubeconfigPath string, assertFuncs ...map[ allErrs := []error{} ctx := context.Background() - graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, kubeconfigPath) + graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, kubeconfigPath, ownerGraphFilterFunction) // Sometimes the conversion-webhooks are not ready yet / cert-managers ca-injector // may not yet have injected the new ca bundle after the upgrade. // If this is the case we return an error to retry. @@ -382,6 +382,22 @@ func setClusterPause(ctx context.Context, cli client.Client, clusterKey types.Na Expect(cli.Patch(ctx, cluster, pausePatch)).To(Succeed()) } +// GetOwnerGraphFilterByClusterNameFunc is used in e2e tests where the owner graph gets queried +// to filter out cluster-wide objects which don't have the clusterName in their +// object name. This avoids assertions on objects which are part of in-parallel +// running tests like ExtensionConfig. +func GetOwnerGraphFilterByClusterNameFunc(clusterName string) func(u unstructured.Unstructured) bool { + return func(u unstructured.Unstructured) bool { + // Ignore cluster-wide objects which don't have the clusterName in their object + // name to avoid asserting on cluster-wide objects which get created or deleted + // by tests which run in-parallel (e.g. ExtensionConfig). + if u.GetNamespace() == "" && !strings.Contains(u.GetName(), clusterName) { + return false + } + return true + } +} + // forceClusterClassReconcile force reconciliation of the ClusterClass associated with the Cluster if one exists. If the // Cluster has no ClusterClass this is a no-op. func forceClusterClassReconcile(ctx context.Context, cli client.Client, clusterKey types.NamespacedName) { @@ -406,8 +422,8 @@ func forceClusterResourceSetReconcile(ctx context.Context, cli client.Client, na } } -func changeOwnerReferencesAPIVersion(ctx context.Context, proxy ClusterProxy, namespace string) { - graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath()) +func changeOwnerReferencesAPIVersion(ctx context.Context, proxy ClusterProxy, namespace string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) { + graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction) Expect(err).ToNot(HaveOccurred()) for _, object := range graph { ref := object.Object @@ -434,8 +450,8 @@ func changeOwnerReferencesAPIVersion(ctx context.Context, proxy ClusterProxy, na } } -func removeOwnerReferences(ctx context.Context, proxy ClusterProxy, namespace string) { - graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath()) +func removeOwnerReferences(ctx context.Context, proxy ClusterProxy, namespace string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) { + graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction) Expect(err).ToNot(HaveOccurred()) for _, object := range graph { ref := object.Object diff --git a/test/framework/resourceversion_helpers.go b/test/framework/resourceversion_helpers.go index 2cfda5cfa3bc..835298ab99e2 100644 --- a/test/framework/resourceversion_helpers.go +++ b/test/framework/resourceversion_helpers.go @@ -29,11 +29,11 @@ import ( ) // ValidateResourceVersionStable checks that resource versions are stable. -func ValidateResourceVersionStable(ctx context.Context, proxy ClusterProxy, namespace string) { +func ValidateResourceVersionStable(ctx context.Context, proxy ClusterProxy, namespace string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) { // Wait until resource versions are stable for a bit. var previousResourceVersions map[string]string Eventually(func(g Gomega) { - objectsWithResourceVersion, err := getObjectsWithResourceVersion(ctx, proxy, namespace) + objectsWithResourceVersion, err := getObjectsWithResourceVersion(ctx, proxy, namespace, ownerGraphFilterFunction) g.Expect(err).ToNot(HaveOccurred()) defer func() { @@ -46,14 +46,14 @@ func ValidateResourceVersionStable(ctx context.Context, proxy ClusterProxy, name // Verify resource versions are stable for a while. Consistently(func(g Gomega) { - objectsWithResourceVersion, err := getObjectsWithResourceVersion(ctx, proxy, namespace) + objectsWithResourceVersion, err := getObjectsWithResourceVersion(ctx, proxy, namespace, ownerGraphFilterFunction) g.Expect(err).ToNot(HaveOccurred()) g.Expect(objectsWithResourceVersion).To(Equal(previousResourceVersions)) }, 2*time.Minute, 15*time.Second).Should(Succeed(), "Resource versions didn't stay stable") } -func getObjectsWithResourceVersion(ctx context.Context, proxy ClusterProxy, namespace string) (map[string]string, error) { - graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath()) +func getObjectsWithResourceVersion(ctx context.Context, proxy ClusterProxy, namespace string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) (map[string]string, error) { + graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction) if err != nil { return nil, err }