diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index 6f3283182f8f..a59d0e8eccad 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -27,6 +27,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" @@ -332,11 +333,9 @@ func (o *objectMover) move(graph *objectGraph, toProxy Proxy, mutators ...Resour return errors.Wrap(err, "error pausing ClusterClasses") } - // Ensure all the expected target namespaces are in place before creating objects. - log.V(1).Info("Creating target namespaces, if missing") - if err := o.ensureNamespaces(graph, toProxy); err != nil { - return err - } + // Nb. DO NOT call ensureNamespaces at this point because: + // - namespace will be ensured to exist before creating the resource. + // - If it's done here, we might create a namespace that can end up unused on target cluster (due to mutators). // Define the move sequence by processing the ownerReference chain, so we ensure that a Kubernetes object is moved only after its owners. // The sequence is bases on object graph nodes, each one representing a Kubernetes object; nodes are grouped, so bulk of nodes can be moved in parallel. e.g. @@ -353,6 +352,10 @@ func (o *objectMover) move(graph *objectGraph, toProxy Proxy, mutators ...Resour } } + // Nb. mutators used after this point (after creating the resources on target clusters) are mainly intended for + // using the right namespace to fetch the resource from the target cluster. + // mutators affecting non metadata fields are no-op after this point. + // Delete all objects group by group in reverse order. log.Info("Deleting objects from the source cluster") for groupIndex := len(moveSequence.groups) - 1; groupIndex >= 0; groupIndex-- { @@ -599,17 +602,20 @@ func patchCluster(proxy Proxy, n *node, patch client.Patch, mutators ...Resource return err } - // Get the ClusterClass from the server - clusterObj := &unstructured.Unstructured{} - clusterObj.SetAPIVersion(clusterv1.GroupVersion.String()) - clusterObj.SetKind(clusterv1.ClusterKind) - clusterObj.SetName(n.identity.Name) - clusterObj.SetNamespace(n.identity.Namespace) - for _, mutator := range mutators { - if err = mutator(clusterObj); err != nil { - return errors.Wrapf(err, "error applying resource mutator to %q %s/%s", - clusterObj.GroupVersionKind(), clusterObj.GetNamespace(), clusterObj.GetName()) - } + // Since the patch has been generated already in caller of this function, the ONLY affect that mutators can have + // here is on namespace of the resource. + clusterObj, err := applyMutators(&clusterv1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: clusterv1.ClusterKind, + APIVersion: clusterv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: n.identity.Name, + Namespace: n.identity.Namespace, + }, + }, mutators...) + if err != nil { + return err } if err := cFrom.Get(ctx, client.ObjectKeyFromObject(clusterObj), clusterObj); err != nil { @@ -631,20 +637,20 @@ func pauseClusterClass(proxy Proxy, n *node, pause bool, mutators ...ResourceMut return errors.Wrap(err, "error creating client") } - // Get the ClusterClass from the server - clusterClass := &unstructured.Unstructured{} - clusterClass.SetAPIVersion(clusterv1.GroupVersion.String()) - clusterClass.SetKind(clusterv1.ClusterClassKind) - clusterClass.SetName(n.identity.Name) - clusterClass.SetNamespace(n.identity.Namespace) - for _, mutator := range mutators { - if err = mutator(clusterClass); err != nil { - return errors.Wrapf(err, "error applying resource mutator to %q %s/%s", - clusterClass.GroupVersionKind(), clusterClass.GetNamespace(), clusterClass.GetName()) - } - } - if err := cFrom.Get(ctx, client.ObjectKeyFromObject(clusterClass), clusterClass); err != nil { - return errors.Wrapf(err, "error reading ClusterClass %s/%s", n.identity.Namespace, n.identity.Name) + // Since the patch has been generated already in caller of this function, the ONLY affect that mutators can have + // here is on namespace of the resource. + clusterClass, err := applyMutators(&clusterv1.ClusterClass{ + TypeMeta: metav1.TypeMeta{ + Kind: clusterv1.ClusterClassKind, + APIVersion: clusterv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: n.identity.Name, + Namespace: n.identity.Namespace, + }, + }, mutators...) + if err != nil { + return err } patchHelper, err := patch.NewHelper(clusterClass, cFrom) @@ -892,17 +898,16 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy, muta return err } - for _, mutator := range mutators { - if err = mutator(obj); err != nil { - return errors.Wrapf(err, "error applying resource mutator to %q %s/%s", - obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName()) - } + obj, err = applyMutators(obj, mutators...) + if err != nil { + return err } // Applying mutators MAY change the namespace, so ensure the namespace exists before creating the resource. if !nodeToCreate.isGlobal && !existingNamespaces.Has(obj.GetNamespace()) { if err = o.ensureNamespace(toProxy, obj.GetNamespace()); err != nil { return err } + existingNamespaces.Insert(obj.GetNamespace()) } oldManagedFields := obj.GetManagedFields() if err := cTo.Create(ctx, obj); err != nil { @@ -1224,3 +1229,22 @@ func patchTopologyManagedFields(ctx context.Context, oldManagedFields []metav1.M } return nil } + +func applyMutators(object client.Object, mutators ...ResourceMutatorFunc) (*unstructured.Unstructured, error) { + if object == nil { + return nil, nil + } + u := &unstructured.Unstructured{} + to, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object) + if err != nil { + return nil, err + } + u.SetUnstructuredContent(to) + for _, mutator := range mutators { + if err := mutator(u); err != nil { + return nil, errors.Wrapf(err, "error applying resource mutator to %q %s/%s", + u.GroupVersionKind(), object.GetNamespace(), object.GetName()) + } + } + return u, nil +} diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index b45279d6cae0..ade86d12733c 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -18,7 +18,6 @@ package cluster import ( "fmt" - "math/rand" "os" "path/filepath" "strings" @@ -30,6 +29,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" @@ -50,6 +50,32 @@ var moveTests = []struct { wantMoveGroups [][]string wantErr bool }{ + { + name: "Cluster with ClusterClass", + fields: moveTestsFields{ + objs: func() []client.Object { + objs := test.NewFakeClusterClass("ns1", "class1").Objs() + objs = append(objs, test.NewFakeCluster("ns1", "foo").WithTopologyClass("class1").Objs()...) + return deduplicateObjects(objs) + }(), + }, + wantMoveGroups: [][]string{ + { // group 1 + "cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class1", + }, + { // group 2 + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureClusterTemplate, ns1/class1", + "controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlaneTemplate, ns1/class1", + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo", + }, + { // group 3 + "/v1, Kind=Secret, ns1/foo-ca", + "/v1, Kind=Secret, ns1/foo-kubeconfig", + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo", + }, + }, + wantErr: false, + }, { name: "Cluster", fields: moveTestsFields{ @@ -1163,9 +1189,82 @@ func Test_objectMover_move_dryRun(t *testing.T) { } func Test_objectMover_move(t *testing.T) { + // NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process + for _, tt := range moveTests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. + graph := getObjectGraphWithObjs(tt.fields.objs) + + // Get all the types to be considered for discovery + g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + + // trigger discovery the content of the source cluster + g.Expect(graph.Discovery("")).To(Succeed()) + + // gets a fakeProxy to an empty cluster with all the required CRDs + toProxy := getFakeProxyWithCRDs() + + // Run move + mover := objectMover{ + fromProxy: graph.proxy, + } + err := mover.move(graph, toProxy) + + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + + g.Expect(err).NotTo(HaveOccurred()) + + // check that the objects are removed from the source cluster and are created in the target cluster + csFrom, err := graph.proxy.NewClient() + g.Expect(err).NotTo(HaveOccurred()) + + csTo, err := toProxy.NewClient() + g.Expect(err).NotTo(HaveOccurred()) + + for _, node := range graph.uidToNode { + key := client.ObjectKey{ + Namespace: node.identity.Namespace, + Name: node.identity.Name, + } + + // objects are deleted from the source cluster + oFrom := &unstructured.Unstructured{} + oFrom.SetAPIVersion(node.identity.APIVersion) + oFrom.SetKind(node.identity.Kind) + + err := csFrom.Get(ctx, key, oFrom) + if err == nil { + if !node.isGlobal && !node.isGlobalHierarchy { + t.Errorf("%v not deleted in source cluster", key) + continue + } + } else if !apierrors.IsNotFound(err) { + t.Errorf("error = %v when checking for %v deleted in source cluster", err, key) + continue + } + + // objects are created in the target cluster + oTo := &unstructured.Unstructured{} + oTo.SetAPIVersion(node.identity.APIVersion) + oTo.SetKind(node.identity.Kind) + + if err := csTo.Get(ctx, key, oTo); err != nil { + t.Errorf("error = %v when checking for %v created in target cluster", err, key) + continue + } + } + }) + } +} + +func Test_objectMover_move_with_Mutator(t *testing.T) { // NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process // we use same mutator function for all tests and validate outcome based on input. - randSource := rand.NewSource(time.Now().Unix()) for _, tt := range moveTests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) @@ -1216,20 +1315,12 @@ func Test_objectMover_move(t *testing.T) { // gets a fakeProxy to an empty cluster with all the required CRDs toProxy := getFakeProxyWithCRDs() - // Run move + // Run move with mutators mover := objectMover{ fromProxy: graph.proxy, } - // choose to include/exclude mutator randomly - includeMutator := randSource.Int63()%2 == 0 - var mutators []ResourceMutatorFunc - if includeMutator { - mutators = append(mutators, namespaceMutator) - } - - err := mover.move(graph, toProxy, mutators...) - + err := mover.move(graph, toProxy, namespaceMutator) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -1270,24 +1361,20 @@ func Test_objectMover_move(t *testing.T) { oTo := &unstructured.Unstructured{} oTo.SetAPIVersion(node.identity.APIVersion) oTo.SetKind(node.identity.Kind) - if includeMutator { - if key.Namespace != "" { - key.Namespace = toNamespace - } + if !node.isGlobal { + key.Namespace = toNamespace } if err := csTo.Get(ctx, key, oTo); err != nil { t.Errorf("error = %v when checking for %v created in target cluster", err, key) continue } - if includeMutator { - if fields, knownKind := updateKnownKinds[oTo.GetKind()]; knownKind { - for _, nsField := range fields { - value, exists, err := unstructured.NestedFieldNoCopy(oTo.Object, nsField...) - g.Expect(err).To(BeNil()) - if exists { - g.Expect(value).To(Equal(toNamespace)) - } + if fields, knownKind := updateKnownKinds[oTo.GetKind()]; knownKind { + for _, nsField := range fields { + value, exists, err := unstructured.NestedFieldNoCopy(oTo.Object, nsField...) + g.Expect(err).To(BeNil()) + if exists { + g.Expect(value).To(Equal(toNamespace)) } } } @@ -1998,7 +2085,7 @@ func Test_createTargetObject(t *testing.T) { fromProxy: tt.args.fromProxy, } - err := mover.createTargetObject(tt.args.node, tt.args.toProxy, nil, nil) + err := mover.createTargetObject(tt.args.node, tt.args.toProxy, nil, sets.New[string]()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return