Skip to content

Commit

Permalink
[release-1.7] šŸ› test: filter cluster-wide objects asserted in Resourcā€¦
Browse files Browse the repository at this point in the history
ā€¦eVersion tests to exclude objects of parallel tests (#10570)

* test: filter cluster-wide objects asserted in ResourceVersion tests to exclude objects of parallel tests

* review fixes

* review fixes

---------

Co-authored-by: Christian Schlotter <[email protected]>
  • Loading branch information
k8s-infra-cherrypick-robot and chrischdi authored May 7, 2024
1 parent c4fc57b commit 512dc36
Show file tree
Hide file tree
Showing 7 changed files with 85 additions and 39 deletions.
34 changes: 31 additions & 3 deletions cmd/clusterctl/client/cluster/ownergraph.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,35 @@ type OwnerGraphNode struct {
Owners []metav1.OwnerReference
}

// GetOwnerGraphFilterFunction allows filtering the objects returned by GetOwnerGraph.
// The function has to return true for objects which should be kept.
// NOTE: this function signature is exposed to allow implementation of E2E tests; there is
// no guarantee about the stability of this API.
type GetOwnerGraphFilterFunction func(u unstructured.Unstructured) bool

// FilterClusterObjectsWithNameFilter is used in e2e tests where the owner graph
// gets queried to filter out cluster-wide objects which don't have the s in their
// object name. This avoids assertions on objects which are part of in-parallel
// running tests like ExtensionConfig.
// NOTE: this function signature is exposed to allow implementation of E2E tests; there is
// no guarantee about the stability of this API.
func FilterClusterObjectsWithNameFilter(s string) func(u unstructured.Unstructured) bool {
return func(u unstructured.Unstructured) bool {
// Ignore cluster-wide objects which don't have the clusterName in their object
// name to avoid asserting on cluster-wide objects which get created or deleted
// by tests which run in-parallel (e.g. ExtensionConfig).
if u.GetNamespace() == "" && !strings.Contains(u.GetName(), s) {
return false
}
return true
}
}

// GetOwnerGraph returns a graph with all the objects considered by clusterctl move as nodes and the OwnerReference relationship between those objects as edges.
// NOTE: this data structure is exposed to allow implementation of E2E tests verifying that CAPI can properly rebuild its
// own owner references; there is no guarantee about the stability of this API. Using this test with providers may require
// a custom implementation of this function, or the OwnerGraph it returns.
func GetOwnerGraph(ctx context.Context, namespace, kubeconfigPath string) (OwnerGraph, error) {
func GetOwnerGraph(ctx context.Context, namespace, kubeconfigPath string, filterFn GetOwnerGraphFilterFunction) (OwnerGraph, error) {
p := newProxy(Kubeconfig{Path: kubeconfigPath, Context: ""})
invClient := newInventoryClient(p, nil)

Expand All @@ -57,14 +81,14 @@ func GetOwnerGraph(ctx context.Context, namespace, kubeconfigPath string) (Owner

// graph.Discovery can not be used here as it will use the latest APIVersion for ownerReferences - not those
// present in the object `metadata.ownerReferences`.
owners, err := discoverOwnerGraph(ctx, namespace, graph)
owners, err := discoverOwnerGraph(ctx, namespace, graph, filterFn)
if err != nil {
return OwnerGraph{}, errors.Wrap(err, "failed to discovery ownerGraph types")
}
return owners, nil
}

func discoverOwnerGraph(ctx context.Context, namespace string, o *objectGraph) (OwnerGraph, error) {
func discoverOwnerGraph(ctx context.Context, namespace string, o *objectGraph, filterFn GetOwnerGraphFilterFunction) (OwnerGraph, error) {
selectors := []client.ListOption{}
if namespace != "" {
selectors = append(selectors, client.InNamespace(namespace))
Expand Down Expand Up @@ -102,6 +126,10 @@ func discoverOwnerGraph(ctx context.Context, namespace string, o *objectGraph) (
}
}
for _, obj := range objList.Items {
// Exclude the objects via the filter function.
if filterFn != nil && !filterFn(obj) {
continue
}
// Exclude the kube-root-ca.crt ConfigMap from the owner graph.
if obj.GetKind() == "ConfigMap" && obj.GetName() == "kube-root-ca.crt" {
continue
Expand Down
9 changes: 9 additions & 0 deletions test/e2e/cluster_upgrade_runtimesdk.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,10 @@ type clusterUpgradeWithRuntimeSDKSpecInput struct {
// Allows to inject a function to be run after test namespace is created.
// If not specified, this is a no-op.
PostNamespaceCreated func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace string)

// Allows to inject a function to be run after the cluster is upgraded.
// If not specified, this is a no-op.
PostUpgrade func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace, workloadClusterName string)
}

// clusterUpgradeWithRuntimeSDKSpec implements a spec that upgrades a cluster and runs the Kubernetes conformance suite.
Expand Down Expand Up @@ -260,6 +264,11 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"),
})

if input.PostUpgrade != nil {
log.Logf("Calling PostMachinesProvisioned for cluster %s", klog.KRef(namespace.Name, clusterResources.Cluster.Name))
input.PostUpgrade(input.BootstrapClusterProxy, namespace.Name, clusterResources.Cluster.Name)
}

By("Dumping resources and deleting the workload cluster; deletion waits for BeforeClusterDeleteHook to gate the operation")
dumpAndDeleteCluster(ctx, input.BootstrapClusterProxy, namespace.Name, clusterName, input.ArtifactFolder)

Expand Down
8 changes: 8 additions & 0 deletions test/e2e/cluster_upgrade_runtimesdk_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/utils/ptr"

clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster"
"sigs.k8s.io/cluster-api/test/framework"
)

var _ = Describe("When upgrading a workload cluster using ClusterClass with RuntimeSDK [ClusterClass]", func() {
Expand All @@ -41,6 +44,11 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with Runt
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
InfrastructureProvider: ptr.To("docker"),
PostUpgrade: func(proxy framework.ClusterProxy, namespace, clusterName string) {
// This check ensures that the resourceVersions are stable, i.e. it verifies there are no
// continuous reconciles when everything should be stable.
framework.ValidateResourceVersionStable(ctx, proxy, namespace, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName))
},
// "upgrades" is the same as the "topology" flavor but with an additional MachinePool.
Flavor: ptr.To("upgrades-runtimesdk"),
}
Expand Down
17 changes: 9 additions & 8 deletions test/e2e/quick_start_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
. "github.com/onsi/gomega"
"k8s.io/utils/ptr"

clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/kubetest"
)
Expand All @@ -39,7 +40,7 @@ var _ = Describe("When following the Cluster API quick-start", func() {
InfrastructureProvider: ptr.To("docker"),
PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) {
// This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed.
framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName,
framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName),
framework.CoreOwnerReferenceAssertion,
framework.ExpOwnerReferenceAssertions,
framework.DockerInfraOwnerReferenceAssertions,
Expand All @@ -48,7 +49,7 @@ var _ = Describe("When following the Cluster API quick-start", func() {
framework.KubernetesReferenceAssertions,
)
// This check ensures that owner references are correctly updated to the correct apiVersion.
framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName,
framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName),
framework.CoreOwnerReferenceAssertion,
framework.ExpOwnerReferenceAssertions,
framework.DockerInfraOwnerReferenceAssertions,
Expand All @@ -57,15 +58,15 @@ var _ = Describe("When following the Cluster API quick-start", func() {
framework.KubernetesReferenceAssertions,
)
// This check ensures that finalizers are resilient - i.e. correctly re-reconciled - when removed.
framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName,
framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName),
framework.CoreFinalizersAssertion,
framework.KubeadmControlPlaneFinalizersAssertion,
framework.ExpFinalizersAssertion,
framework.DockerInfraFinalizersAssertion,
)
// This check ensures that the resourceVersions are stable, i.e. it verifies there are no
// continuous reconciles when everything should be stable.
framework.ValidateResourceVersionStable(ctx, proxy, namespace)
framework.ValidateResourceVersionStable(ctx, proxy, namespace, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName))
},
}
})
Expand All @@ -83,7 +84,7 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [
InfrastructureProvider: ptr.To("docker"),
// This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed.
PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) {
framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName,
framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName),
framework.CoreOwnerReferenceAssertion,
framework.ExpOwnerReferenceAssertions,
framework.DockerInfraOwnerReferenceAssertions,
Expand All @@ -92,7 +93,7 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [
framework.KubernetesReferenceAssertions,
)
// This check ensures that owner references are correctly updated to the correct apiVersion.
framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName,
framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName),
framework.CoreOwnerReferenceAssertion,
framework.ExpOwnerReferenceAssertions,
framework.DockerInfraOwnerReferenceAssertions,
Expand All @@ -101,15 +102,15 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [
framework.KubernetesReferenceAssertions,
)
// This check ensures that finalizers are resilient - i.e. correctly re-reconciled - when removed.
framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName,
framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName),
framework.CoreFinalizersAssertion,
framework.KubeadmControlPlaneFinalizersAssertion,
framework.ExpFinalizersAssertion,
framework.DockerInfraFinalizersAssertion,
)
// This check ensures that the resourceVersions are stable, i.e. it verifies there are no
// continuous reconciles when everything should be stable.
framework.ValidateResourceVersionStable(ctx, proxy, namespace)
framework.ValidateResourceVersionStable(ctx, proxy, namespace, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName))
},
}
})
Expand Down
20 changes: 10 additions & 10 deletions test/framework/finalizers_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,13 @@ var KubeadmControlPlaneFinalizersAssertion = map[string][]string{
}

// ValidateFinalizersResilience checks that expected finalizers are in place, deletes them, and verifies that expected finalizers are properly added again.
func ValidateFinalizersResilience(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, finalizerAssertions ...map[string][]string) {
func ValidateFinalizersResilience(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, finalizerAssertions ...map[string][]string) {
clusterKey := client.ObjectKey{Namespace: namespace, Name: clusterName}
allFinalizerAssertions, err := concatenateFinalizerAssertions(finalizerAssertions...)
Expect(err).ToNot(HaveOccurred())

// Collect all objects where finalizers were initially set
objectsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace, allFinalizerAssertions)
objectsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace, allFinalizerAssertions, ownerGraphFilterFunction)

// Setting the paused property on the Cluster resource will pause reconciliations, thereby having no effect on Finalizers.
// This also makes debugging easier.
Expand All @@ -79,18 +79,18 @@ func ValidateFinalizersResilience(ctx context.Context, proxy ClusterProxy, names
// We are testing the worst-case scenario, i.e. all finalizers are deleted.
// Once all Clusters are paused remove all the Finalizers from all objects in the graph.
// The reconciliation loop should be able to recover from this, by adding the required Finalizers back.
removeFinalizers(ctx, proxy, namespace)
removeFinalizers(ctx, proxy, namespace, ownerGraphFilterFunction)

// Unpause the cluster.
setClusterPause(ctx, proxy.GetClient(), clusterKey, false)

// Check that the Finalizers are as expected after further reconciliations.
assertFinalizersExist(ctx, proxy, namespace, objectsWithFinalizers, allFinalizerAssertions)
assertFinalizersExist(ctx, proxy, namespace, objectsWithFinalizers, allFinalizerAssertions, ownerGraphFilterFunction)
}

// removeFinalizers removes all Finalizers from objects in the owner graph.
func removeFinalizers(ctx context.Context, proxy ClusterProxy, namespace string) {
graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath())
func removeFinalizers(ctx context.Context, proxy ClusterProxy, namespace string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) {
graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction)
Expect(err).ToNot(HaveOccurred())
for _, object := range graph {
ref := object.Object
Expand All @@ -107,8 +107,8 @@ func removeFinalizers(ctx context.Context, proxy ClusterProxy, namespace string)
}
}

func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace string, allFinalizerAssertions map[string][]string) map[string]*unstructured.Unstructured {
graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath())
func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace string, allFinalizerAssertions map[string][]string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) map[string]*unstructured.Unstructured {
graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction)
Expect(err).ToNot(HaveOccurred())

objsWithFinalizers := map[string]*unstructured.Unstructured{}
Expand All @@ -134,10 +134,10 @@ func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace
}

// assertFinalizersExist ensures that current Finalizers match those in the initialObjectsWithFinalizers.
func assertFinalizersExist(ctx context.Context, proxy ClusterProxy, namespace string, initialObjsWithFinalizers map[string]*unstructured.Unstructured, allFinalizerAssertions map[string][]string) {
func assertFinalizersExist(ctx context.Context, proxy ClusterProxy, namespace string, initialObjsWithFinalizers map[string]*unstructured.Unstructured, allFinalizerAssertions map[string][]string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) {
Eventually(func() error {
var allErrs []error
finalObjsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace, allFinalizerAssertions)
finalObjsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace, allFinalizerAssertions, ownerGraphFilterFunction)

for objKindNamespacedName, obj := range initialObjsWithFinalizers {
// verify if finalizers for this resource were set on reconcile
Expand Down
Loading

0 comments on commit 512dc36

Please sign in to comment.