From 913ba7fcaf0c2002d2cf9db1d7005f4a86cac93d Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Tue, 7 May 2024 12:57:42 +0200 Subject: [PATCH] review fixes --- cmd/clusterctl/client/cluster/ownergraph.go | 21 +++++++++++++++++++-- test/e2e/cluster_upgrade_runtimesdk.go | 6 +++--- test/e2e/cluster_upgrade_runtimesdk_test.go | 5 +++-- test/e2e/quick_start_test.go | 17 +++++++++-------- test/framework/ownerreference_helpers.go | 16 ---------------- 5 files changed, 34 insertions(+), 31 deletions(-) diff --git a/cmd/clusterctl/client/cluster/ownergraph.go b/cmd/clusterctl/client/cluster/ownergraph.go index d5f7286eca7b..2049e8743eac 100644 --- a/cmd/clusterctl/client/cluster/ownergraph.go +++ b/cmd/clusterctl/client/cluster/ownergraph.go @@ -42,10 +42,27 @@ type OwnerGraphNode struct { // GetOwnerGraphFilterFunction allows filtering the objects returned by GetOwnerGraph. // The function has to return true for objects which should be kept. // NOTE: this function signature is exposed to allow implementation of E2E tests; there is -// no guarantee about the stability of this API. Using this test with providers may require -// a custom implementation of this function, or the OwnerGraph it returns. +// no guarantee about the stability of this API. type GetOwnerGraphFilterFunction func(u unstructured.Unstructured) bool +// FilterClusterObjectsWithNameFilter is used in e2e tests where the owner graph +// gets queried to filter out cluster-wide objects which don't have the s in their +// object name. This avoids assertions on objects which are part of in-parallel +// running tests like ExtensionConfig. +// NOTE: this function signature is exposed to allow implementation of E2E tests; there is +// no guarantee about the stability of this API. +func FilterClusterObjectsWithNameFilter(s string) func(u unstructured.Unstructured) bool { + return func(u unstructured.Unstructured) bool { + // Ignore cluster-wide objects which don't have the clusterName in their object + // name to avoid asserting on cluster-wide objects which get created or deleted + // by tests which run in-parallel (e.g. ExtensionConfig). + if u.GetNamespace() == "" && !strings.Contains(u.GetName(), s) { + return false + } + return true + } +} + // GetOwnerGraph returns a graph with all the objects considered by clusterctl move as nodes and the OwnerReference relationship between those objects as edges. // NOTE: this data structure is exposed to allow implementation of E2E tests verifying that CAPI can properly rebuild its // own owner references; there is no guarantee about the stability of this API. Using this test with providers may require diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index 19c41ad797a6..a113cd1f6ec5 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -88,7 +88,7 @@ type clusterUpgradeWithRuntimeSDKSpecInput struct { // Allows to inject a function to be run after the cluster is upgraded. // If not specified, this is a no-op. - PostMachinesProvisioned func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace, workloadClusterName string) + PostUpgrade func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace, workloadClusterName string) } // clusterUpgradeWithRuntimeSDKSpec implements a spec that upgrades a cluster and runs the Kubernetes conformance suite. @@ -264,9 +264,9 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"), }) - if input.PostMachinesProvisioned != nil { + if input.PostUpgrade != nil { log.Logf("Calling PostMachinesProvisioned for cluster %s", klog.KRef(namespace.Name, clusterResources.Cluster.Name)) - input.PostMachinesProvisioned(input.BootstrapClusterProxy, namespace.Name, clusterName) + input.PostUpgrade(input.BootstrapClusterProxy, namespace.Name, clusterResources.Cluster.Name) } By("Dumping resources and deleting the workload cluster; deletion waits for BeforeClusterDeleteHook to gate the operation") diff --git a/test/e2e/cluster_upgrade_runtimesdk_test.go b/test/e2e/cluster_upgrade_runtimesdk_test.go index 0eaaf939e612..c5c27ff92337 100644 --- a/test/e2e/cluster_upgrade_runtimesdk_test.go +++ b/test/e2e/cluster_upgrade_runtimesdk_test.go @@ -25,6 +25,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/utils/ptr" + clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" "sigs.k8s.io/cluster-api/test/framework" ) @@ -43,10 +44,10 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with Runt ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, InfrastructureProvider: ptr.To("docker"), - PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { + PostUpgrade: func(proxy framework.ClusterProxy, namespace, clusterName string) { // This check ensures that the resourceVersions are stable, i.e. it verifies there are no // continuous reconciles when everything should be stable. - framework.ValidateResourceVersionStable(ctx, proxy, namespace, framework.SkipClusterObjectsWithoutNameContains(clusterName)) + framework.ValidateResourceVersionStable(ctx, proxy, namespace, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName)) }, // "upgrades" is the same as the "topology" flavor but with an additional MachinePool. Flavor: ptr.To("upgrades-runtimesdk"), diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go index 423dc8e53a86..5d918afd56c9 100644 --- a/test/e2e/quick_start_test.go +++ b/test/e2e/quick_start_test.go @@ -24,6 +24,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/utils/ptr" + clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/kubetest" ) @@ -39,7 +40,7 @@ var _ = Describe("When following the Cluster API quick-start", func() { InfrastructureProvider: ptr.To("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. - framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, framework.SkipClusterObjectsWithoutNameContains(clusterName), + framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), framework.CoreOwnerReferenceAssertion, framework.ExpOwnerReferenceAssertions, framework.DockerInfraOwnerReferenceAssertions, @@ -48,7 +49,7 @@ var _ = Describe("When following the Cluster API quick-start", func() { framework.KubernetesReferenceAssertions, ) // This check ensures that owner references are correctly updated to the correct apiVersion. - framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, framework.SkipClusterObjectsWithoutNameContains(clusterName), + framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), framework.CoreOwnerReferenceAssertion, framework.ExpOwnerReferenceAssertions, framework.DockerInfraOwnerReferenceAssertions, @@ -57,7 +58,7 @@ var _ = Describe("When following the Cluster API quick-start", func() { framework.KubernetesReferenceAssertions, ) // This check ensures that finalizers are resilient - i.e. correctly re-reconciled - when removed. - framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, framework.SkipClusterObjectsWithoutNameContains(clusterName), + framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), framework.CoreFinalizersAssertion, framework.KubeadmControlPlaneFinalizersAssertion, framework.ExpFinalizersAssertion, @@ -65,7 +66,7 @@ var _ = Describe("When following the Cluster API quick-start", func() { ) // This check ensures that the resourceVersions are stable, i.e. it verifies there are no // continuous reconciles when everything should be stable. - framework.ValidateResourceVersionStable(ctx, proxy, namespace, framework.SkipClusterObjectsWithoutNameContains(clusterName)) + framework.ValidateResourceVersionStable(ctx, proxy, namespace, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName)) }, } }) @@ -83,7 +84,7 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ InfrastructureProvider: ptr.To("docker"), // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { - framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, framework.SkipClusterObjectsWithoutNameContains(clusterName), + framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), framework.CoreOwnerReferenceAssertion, framework.ExpOwnerReferenceAssertions, framework.DockerInfraOwnerReferenceAssertions, @@ -92,7 +93,7 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ framework.KubernetesReferenceAssertions, ) // This check ensures that owner references are correctly updated to the correct apiVersion. - framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, framework.SkipClusterObjectsWithoutNameContains(clusterName), + framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), framework.CoreOwnerReferenceAssertion, framework.ExpOwnerReferenceAssertions, framework.DockerInfraOwnerReferenceAssertions, @@ -101,7 +102,7 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ framework.KubernetesReferenceAssertions, ) // This check ensures that finalizers are resilient - i.e. correctly re-reconciled - when removed. - framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, framework.SkipClusterObjectsWithoutNameContains(clusterName), + framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), framework.CoreFinalizersAssertion, framework.KubeadmControlPlaneFinalizersAssertion, framework.ExpFinalizersAssertion, @@ -109,7 +110,7 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ ) // This check ensures that the resourceVersions are stable, i.e. it verifies there are no // continuous reconciles when everything should be stable. - framework.ValidateResourceVersionStable(ctx, proxy, namespace, framework.SkipClusterObjectsWithoutNameContains(clusterName)) + framework.ValidateResourceVersionStable(ctx, proxy, namespace, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName)) }, } }) diff --git a/test/framework/ownerreference_helpers.go b/test/framework/ownerreference_helpers.go index 68772e2306e4..9fb0c277b443 100644 --- a/test/framework/ownerreference_helpers.go +++ b/test/framework/ownerreference_helpers.go @@ -382,22 +382,6 @@ func setClusterPause(ctx context.Context, cli client.Client, clusterKey types.Na Expect(cli.Patch(ctx, cluster, pausePatch)).To(Succeed()) } -// SkipClusterObjectsWithoutNameContains is used in e2e tests where the owner graph -// gets queried to filter out cluster-wide objects which don't have the clusterName -// in their object name. This avoids assertions on objects which are part of in-parallel -// running tests like ExtensionConfig. -func SkipClusterObjectsWithoutNameContains(clusterName string) func(u unstructured.Unstructured) bool { - return func(u unstructured.Unstructured) bool { - // Ignore cluster-wide objects which don't have the clusterName in their object - // name to avoid asserting on cluster-wide objects which get created or deleted - // by tests which run in-parallel (e.g. ExtensionConfig). - if u.GetNamespace() == "" && !strings.Contains(u.GetName(), clusterName) { - return false - } - return true - } -} - // forceClusterClassReconcile force reconciliation of the ClusterClass associated with the Cluster if one exists. If the // Cluster has no ClusterClass this is a no-op. func forceClusterClassReconcile(ctx context.Context, cli client.Client, clusterKey types.NamespacedName) {