From 07b5e0a41b87e6a100461d6ac4dd46d1172254e7 Mon Sep 17 00:00:00 2001 From: Yuvaraj Kakaraparthi Date: Wed, 19 Jan 2022 08:33:27 -0800 Subject: [PATCH] rename topology-dry run to 'topology plan' and other improvements --- cmd/clusterctl/client/client.go | 4 +- cmd/clusterctl/client/client_test.go | 4 +- ...-cluster.yaml => existing-my-cluster.yaml} | 126 ------- .../existing-my-second-cluster.yaml | 142 ++++++++ .../assets/topology-test/mock-CRDs.yaml | 39 +++ .../modified-CP-dockermachinetemplate.yaml | 14 + ...-cluster.yaml => modified-my-cluster.yaml} | 2 +- .../topology-test/my-cluster-class.yaml | 85 +++++ .../objects-in-different-namespaces.yaml | 41 +++ cmd/clusterctl/client/cluster/topology.go | 151 ++++---- .../client/cluster/topology_test.go | 248 ++++++++++--- cmd/clusterctl/client/topology.go | 22 +- cmd/clusterctl/cmd/alpha.go | 2 +- cmd/clusterctl/cmd/topology.go | 295 +--------------- cmd/clusterctl/cmd/topology_plan.go | 331 ++++++++++++++++++ 15 files changed, 964 insertions(+), 542 deletions(-) rename cmd/clusterctl/client/cluster/assets/topology-test/{existing-clusterclass-and-cluster.yaml => existing-my-cluster.yaml} (50%) create mode 100644 cmd/clusterctl/client/cluster/assets/topology-test/existing-my-second-cluster.yaml create mode 100644 cmd/clusterctl/client/cluster/assets/topology-test/mock-CRDs.yaml create mode 100644 cmd/clusterctl/client/cluster/assets/topology-test/modified-CP-dockermachinetemplate.yaml rename cmd/clusterctl/client/cluster/assets/topology-test/{modified-cluster.yaml => modified-my-cluster.yaml} (95%) create mode 100644 cmd/clusterctl/client/cluster/assets/topology-test/my-cluster-class.yaml create mode 100644 cmd/clusterctl/client/cluster/assets/topology-test/objects-in-different-namespaces.yaml create mode 100644 cmd/clusterctl/cmd/topology_plan.go diff --git a/cmd/clusterctl/client/client.go b/cmd/clusterctl/client/client.go index a39c544863d0..ec32c11adc7a 100644 --- a/cmd/clusterctl/client/client.go +++ b/cmd/clusterctl/client/client.go @@ -89,8 +89,8 @@ type AlphaClient interface { RolloutResume(options RolloutOptions) error // RolloutUndo provides rollout rollback of cluster-api resources RolloutUndo(options RolloutOptions) error - // DryRunTopology dry runs the topology reconciler - DryRunTopology(options DryRunOptions) (*cluster.DryRunOutput, error) + // TopologyPlan dry runs the topology reconciler + TopologyPlan(options TopologyPlanOptions) (*TopologyPlanOutput, error) } // YamlPrinter exposes methods that prints the processed template and diff --git a/cmd/clusterctl/client/client_test.go b/cmd/clusterctl/client/client_test.go index 66bf1fb0a178..85342294263e 100644 --- a/cmd/clusterctl/client/client_test.go +++ b/cmd/clusterctl/client/client_test.go @@ -148,8 +148,8 @@ func (f fakeClient) RolloutUndo(options RolloutOptions) error { return f.internalClient.RolloutUndo(options) } -func (f fakeClient) DryRunTopology(options DryRunOptions) (*cluster.DryRunOutput, error) { - return f.internalClient.DryRunTopology(options) +func (f fakeClient) TopologyPlan(options TopologyPlanOptions) (*cluster.TopologyPlanOutput, error) { + return f.internalClient.TopologyPlan(options) } // newFakeClient returns a clusterctl client that allows to execute tests on a set of fake config, fake repositories and fake clusters. diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/existing-clusterclass-and-cluster.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-cluster.yaml similarity index 50% rename from cmd/clusterctl/client/cluster/assets/topology-test/existing-clusterclass-and-cluster.yaml rename to cmd/clusterctl/client/cluster/assets/topology-test/existing-my-cluster.yaml index 23bcc5daecdc..d44fa0f34f6b 100644 --- a/cmd/clusterctl/client/cluster/assets/topology-test/existing-clusterclass-and-cluster.yaml +++ b/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-cluster.yaml @@ -1,129 +1,3 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - cluster.x-k8s.io/provider: infrastructure-docker - cluster.x-k8s.io/v1beta1: v1beta1 - name: dockerclusters.infrastructure.cluster.x-k8s.io ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - cluster.x-k8s.io/provider: infrastructure-docker - cluster.x-k8s.io/v1beta1: v1beta1 - name: dockerclustertemplates.infrastructure.cluster.x-k8s.io ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - cluster.x-k8s.io/provider: control-plane-kubeadm - cluster.x-k8s.io/v1beta1: v1beta1 - name: kubeadmcontrolplanetemplates.controlplane.cluster.x-k8s.io ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - cluster.x-k8s.io/provider: control-plane-kubeadm - cluster.x-k8s.io/v1beta1: v1beta1 - name: kubeadmcontrolplanes.controlplane.cluster.x-k8s.io ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - cluster.x-k8s.io/provider: infrastructure-docker - cluster.x-k8s.io/v1beta1: v1beta1 - name: dockermachinetemplates.infrastructure.cluster.x-k8s.io ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: ClusterClass -metadata: - name: my-cluster-class - namespace: default -spec: - controlPlane: - ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 - kind: KubeadmControlPlaneTemplate - name: control-plane - namespace: default - machineInfrastructure: - ref: - kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - name: "control-plane" - namespace: default - infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: DockerClusterTemplate - name: my-cluster - namespace: default ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: DockerClusterTemplate -metadata: - name: my-cluster - namespace: default -spec: - template: - spec: {} ---- -kind: KubeadmControlPlaneTemplate -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 -metadata: - name: "control-plane" - namespace: default -spec: - template: - spec: - replicas: 1 - machineTemplate: - nodeDrainTimeout: 1s - infrastructureRef: - kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - name: "control-plane" - namespace: default - kubeadmConfigSpec: - clusterConfiguration: - controllerManager: - extraArgs: { enable-hostpath-provisioner: 'true' } - apiServer: - certSANs: [ localhost, 127.0.0.1 ] - initConfiguration: - nodeRegistration: - criSocket: /var/run/containerd/containerd.sock - kubeletExtraArgs: - # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd - # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 - cgroup-driver: cgroupfs - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' - joinConfiguration: - nodeRegistration: - criSocket: /var/run/containerd/containerd.sock - kubeletExtraArgs: - # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd - # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 - cgroup-driver: cgroupfs - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' - version: v1.21.2 ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: DockerMachineTemplate -metadata: - name: "control-plane" - namespace: default -spec: - template: - spec: - extraMounts: - - containerPath: "/var/run/docker.sock" - hostPath: "/var/run/docker.sock" ---- apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-second-cluster.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-second-cluster.yaml new file mode 100644 index 000000000000..e2b21678e8b8 --- /dev/null +++ b/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-second-cluster.yaml @@ -0,0 +1,142 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "my-second-cluster" + namespace: default + labels: + cni: kindnet +spec: + clusterNetwork: + services: + cidrBlocks: ["10.128.0.0/12"] + pods: + cidrBlocks: ["192.168.0.0/16"] + serviceDomain: "cluster.local" + controlPlaneEndpoint: + host: 172.19.0.4 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: my-second-cluster-fwbpf + namespace: default + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: my-second-cluster-zrq96 + namespace: default + topology: + class: my-cluster-class + version: v1.21.2 + controlPlane: + metadata: {} + replicas: 1 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + annotations: + cluster.x-k8s.io/cloned-from-groupkind: DockerClusterTemplate.infrastructure.cluster.x-k8s.io + cluster.x-k8s.io/cloned-from-name: my-cluster + finalizers: + - dockercluster.infrastructure.cluster.x-k8s.io + labels: + cluster.x-k8s.io/cluster-name: my-cluster + topology.cluster.x-k8s.io/owned: "" + name: my-second-cluster-zrq96 + namespace: default +spec: + controlPlaneEndpoint: + host: 172.19.0.4 + port: 6443 + loadBalancer: {} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + annotations: + cluster.x-k8s.io/cloned-from-groupkind: KubeadmControlPlaneTemplate.controlplane.cluster.x-k8s.io + cluster.x-k8s.io/cloned-from-name: control-plane + finalizers: + - kubeadm.controlplane.cluster.x-k8s.io + labels: + cluster.x-k8s.io/cluster-name: my-cluster + topology.cluster.x-k8s.io/owned: "" + name: my-second-cluster-fwbpf + namespace: default + ownerReferences: + - apiVersion: cluster.x-k8s.io/v1beta1 + blockOwnerDeletion: true + controller: true + kind: Cluster + name: my-second-cluster + uid: 3ba5ce4f-d279-4edb-8ade-62a2381d11a9 +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + dns: {} + etcd: {} + networking: {} + scheduler: {} + initConfiguration: + localAPIEndpoint: {} + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + joinConfiguration: + discovery: {} + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: my-cluster-control-plane-44cd4 + namespace: default + metadata: + labels: + cluster.x-k8s.io/cluster-name: my-cluster + topology.cluster.x-k8s.io/owned: "" + nodeDrainTimeout: 1s + replicas: 1 + rolloutStrategy: + rollingUpdate: + maxSurge: 1 + type: RollingUpdate + version: v1.21.2 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + annotations: + cluster.x-k8s.io/cloned-from-groupkind: DockerMachineTemplate.infrastructure.cluster.x-k8s.io + cluster.x-k8s.io/cloned-from-name: control-plane + labels: + cluster.x-k8s.io/cluster-name: my-cluster + topology.cluster.x-k8s.io/owned: "" + name: my-second-cluster-control-plane-44cd4 + namespace: default + ownerReferences: + - apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + name: my-second-cluster + uid: 3ba5ce4f-d279-4edb-8ade-62a2381d11a9 +spec: + template: + spec: + extraMounts: + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/mock-CRDs.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/mock-CRDs.yaml new file mode 100644 index 000000000000..20fdfc1aade8 --- /dev/null +++ b/cmd/clusterctl/client/cluster/assets/topology-test/mock-CRDs.yaml @@ -0,0 +1,39 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-docker + cluster.x-k8s.io/v1beta1: v1beta1 + name: dockerclusters.infrastructure.cluster.x-k8s.io +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-docker + cluster.x-k8s.io/v1beta1: v1beta1 + name: dockerclustertemplates.infrastructure.cluster.x-k8s.io +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + cluster.x-k8s.io/provider: control-plane-kubeadm + cluster.x-k8s.io/v1beta1: v1beta1 + name: kubeadmcontrolplanetemplates.controlplane.cluster.x-k8s.io +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + cluster.x-k8s.io/provider: control-plane-kubeadm + cluster.x-k8s.io/v1beta1: v1beta1 + name: kubeadmcontrolplanes.controlplane.cluster.x-k8s.io +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-docker + cluster.x-k8s.io/v1beta1: v1beta1 + name: dockermachinetemplates.infrastructure.cluster.x-k8s.io \ No newline at end of file diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/modified-CP-dockermachinetemplate.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/modified-CP-dockermachinetemplate.yaml new file mode 100644 index 000000000000..18fa502056a3 --- /dev/null +++ b/cmd/clusterctl/client/cluster/assets/topology-test/modified-CP-dockermachinetemplate.yaml @@ -0,0 +1,14 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: "control-plane" + namespace: default +spec: + template: + metadata: + labels: + docker-machine-template: test-template-control-plane + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" \ No newline at end of file diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/modified-cluster.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/modified-my-cluster.yaml similarity index 95% rename from cmd/clusterctl/client/cluster/assets/topology-test/modified-cluster.yaml rename to cmd/clusterctl/client/cluster/assets/topology-test/modified-my-cluster.yaml index de08d2ea8749..7d0d7db1809f 100644 --- a/cmd/clusterctl/client/cluster/assets/topology-test/modified-cluster.yaml +++ b/cmd/clusterctl/client/cluster/assets/topology-test/modified-my-cluster.yaml @@ -17,4 +17,4 @@ spec: version: v1.21.2 controlPlane: metadata: {} - replicas: 3 + replicas: 3 \ No newline at end of file diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/my-cluster-class.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/my-cluster-class.yaml new file mode 100644 index 000000000000..d16eb0cc114f --- /dev/null +++ b/cmd/clusterctl/client/cluster/assets/topology-test/my-cluster-class.yaml @@ -0,0 +1,85 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: my-cluster-class + namespace: default +spec: + controlPlane: + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: control-plane + namespace: default + machineInfrastructure: + ref: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: "control-plane" + namespace: default + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerClusterTemplate + name: my-cluster + namespace: default +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerClusterTemplate +metadata: + name: my-cluster + namespace: default +spec: + template: + spec: {} +--- +kind: KubeadmControlPlaneTemplate +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "control-plane" + namespace: default +spec: + template: + spec: + replicas: 1 + machineTemplate: + nodeDrainTimeout: 1s + infrastructureRef: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: "control-plane" + namespace: default + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: { enable-hostpath-provisioner: 'true' } + apiServer: + certSANs: [ localhost, 127.0.0.1 ] + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + cgroup-driver: cgroupfs + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + cgroup-driver: cgroupfs + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + version: v1.21.2 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: "control-plane" + namespace: default +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/objects-in-different-namespaces.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/objects-in-different-namespaces.yaml new file mode 100644 index 000000000000..1be92eebacfc --- /dev/null +++ b/cmd/clusterctl/client/cluster/assets/topology-test/objects-in-different-namespaces.yaml @@ -0,0 +1,41 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "my-cluster" + namespace: namespace-one + labels: + cni: kindnet +spec: + clusterNetwork: + services: + cidrBlocks: ["10.128.0.0/12"] + pods: + cidrBlocks: ["192.168.0.0/16"] + serviceDomain: "cluster.local" + topology: + class: my-cluster-class + version: v1.21.2 + controlPlane: + metadata: {} + replicas: 1 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "my-second-cluster" + namespace: namespace-two + labels: + cni: kindnet +spec: + clusterNetwork: + services: + cidrBlocks: ["10.128.0.0/12"] + pods: + cidrBlocks: ["192.168.0.0/16"] + serviceDomain: "cluster.local" + topology: + class: my-second-cluster-class + version: v1.21.2 + controlPlane: + metadata: {} + replicas: 1 \ No newline at end of file diff --git a/cmd/clusterctl/client/cluster/topology.go b/cmd/clusterctl/client/cluster/topology.go index 475281d7b8b4..40ca1fa1d230 100644 --- a/cmd/clusterctl/client/cluster/topology.go +++ b/cmd/clusterctl/client/cluster/topology.go @@ -53,7 +53,7 @@ const ( // TopologyClient has methods to work with ClusterClass and ManagedTopologies. type TopologyClient interface { - DryRun(in *DryRunInput) (*DryRunOutput, error) + Plan(in *TopologyPlanInput) (*TopologyPlanOutput, error) } // topologyClient implements TopologyClient. @@ -73,37 +73,38 @@ func newTopologyClient(proxy Proxy, inventoryClient InventoryClient) TopologyCli } } -// DryRunInput defines the input for DryRun function. -type DryRunInput struct { +// TopologyPlanInput defines the input for the Plan function. +type TopologyPlanInput struct { Objs []*unstructured.Unstructured TargetClusterName string + TargetNamespace string } // PatchSummary defined the patch observed on an object. type PatchSummary = dryrun.PatchSummary -// ChangeSummary defines all the changes detected by the Dryrun execution. +// ChangeSummary defines all the changes detected by the plan operation. type ChangeSummary = dryrun.ChangeSummary -// DryRunOutput defines the output of the DryRun function. -type DryRunOutput struct { +// TopologyPlanOutput defines the output of the Plan function. +type TopologyPlanOutput struct { // Clusters is the list clusters affected by the input. Clusters []client.ObjectKey // ClusterClasses is the list of clusters affected by the input. ClusterClasses []client.ObjectKey // ReconciledCluster is the cluster on which the topology reconciler loop is executed. // If there is only one affected cluster then it becomes the ReconciledCluster. If not, - // the ReconciledCluster is chosen using addition information in the DryRunInput. + // the ReconciledCluster is chosen using additional information in the TopologyPlanInput. // ReconciledCluster can be empty if no single target cluster is provided. - ReconciledCluster client.ObjectKey + ReconciledCluster *client.ObjectKey // ChangeSummary is the full list of changes (objects created, modified and deleted) observed // on the ReconciledCluster. ChangeSummary is empty if ReconciledCluster is empty. *ChangeSummary } -// DryRun performs a dry run execution of the topology reconciler using the given inputs. +// Plan performs a dry run execution of the topology reconciler using the given inputs. // It returns a summary of the changes observed during the execution. -func (t *topologyClient) DryRun(in *DryRunInput) (*DryRunOutput, error) { +func (t *topologyClient) Plan(in *TopologyPlanInput) (*TopologyPlanOutput, error) { ctx := context.TODO() log := logf.Log @@ -128,16 +129,16 @@ func (t *topologyClient) DryRun(in *DryRunInput) (*DryRunOutput, error) { } } - // Prepare the inputs for a dry run operation. This includes steps like setting missing namespaces on objects + // Prepare the inputs for dry running the reconciler. This includes steps like setting missing namespaces on objects // and adjusting cluster objects to reflect updated state. if err := t.prepareInput(ctx, in, c); err != nil { return nil, errors.Wrap(err, "failed preparing input") } // Run defaulting and validation on core CAPI objects - Cluster and ClusterClasses. // This mimics the defaulting and validation webhooks that will run on the objects during a real execution. - // Running defaulting and validation on these objects helps to improve the UX of using the dry run operation. + // Running defaulting and validation on these objects helps to improve the UX of using the plan operation. // This is especially important when working with Clusters and ClusterClasses that use variable and patches. - if err := t.defaultAndValidate(ctx, in, c); err != nil { + if err := t.runDefaultAndValidationWebhooks(ctx, in, c); err != nil { return nil, errors.Wrap(err, "failed defaulting and validation on input objects") } @@ -148,7 +149,7 @@ func (t *topologyClient) DryRun(in *DryRunInput) (*DryRunOutput, error) { } // Add mock CRDs of all the provider objects in the input to the list used when initializing the dry run client. // Adding these CRDs makes sure that UpdateReferenceAPIContract calls in the reconciler can work. - for _, o := range t.generateCRDs(in) { + for _, o := range t.generateCRDs(in.Objs) { objs = append(objs, o) } @@ -164,7 +165,7 @@ func (t *topologyClient) DryRun(in *DryRunInput) (*DryRunOutput, error) { return nil, errors.Wrap(err, "failed calculating affected Clusters") } - res := &DryRunOutput{ + res := &TopologyPlanOutput{ Clusters: affectedClusters, ClusterClasses: affectedClusterClasses, ChangeSummary: &dryrun.ChangeSummary{}, @@ -174,12 +175,15 @@ func (t *topologyClient) DryRun(in *DryRunInput) (*DryRunOutput, error) { // Full changeset is only generated for the target cluster. var targetCluster *client.ObjectKey if in.TargetClusterName != "" { - // Check if the target cluster is among the list of affected clusters and use that - // as the target cluster. - if res := matchCluster(affectedClusters, in.TargetClusterName); res != nil { - targetCluster = res + // Check if the target cluster is among the list of affected clusters and use that. + target := client.ObjectKey{ + Namespace: in.TargetNamespace, + Name: in.TargetClusterName, + } + if inList(affectedClusters, target) { + targetCluster = &target } else { - return nil, fmt.Errorf("target cluster %q is not among the list of affected clusters", in.TargetClusterName) + return nil, fmt.Errorf("target cluster %q is not among the list of affected clusters", target.String()) } } else if len(affectedClusters) == 1 { // If no target cluster is specified and if there is only one affected cluster, use that as the target cluster. @@ -187,12 +191,12 @@ func (t *topologyClient) DryRun(in *DryRunInput) (*DryRunOutput, error) { } if targetCluster == nil { - // If there is no target cluster then return here. We will + // There is no target cluster, return here. We will // not generate a full change summary. return res, nil } - res.ReconciledCluster = *targetCluster + res.ReconciledCluster = targetCluster reconciler := &clustertopologycontroller.Reconciler{ Client: dryRunClient, APIReader: dryRunClient, @@ -204,7 +208,7 @@ func (t *topologyClient) DryRun(in *DryRunInput) (*DryRunOutput, error) { if _, err := reconciler.Reconcile(ctx, request); err != nil { return nil, errors.Wrap(err, "failed to dry run the topology controller") } - + // Calculate changes observed by dry run client. changes, err := dryRunClient.Changes(ctx) if err != nil { return nil, errors.Wrap(err, "failed to get changes made by the topology controller") @@ -214,13 +218,25 @@ func (t *topologyClient) DryRun(in *DryRunInput) (*DryRunOutput, error) { return res, nil } -// validateInput checks that the dryrun input does not violate any of the below expectations: +// validateInput checks that the topology plan input does not violate any of the below expectations: // - no more than 1 cluster in the input. // - no more than 1 clusterclass in the input. -func (t *topologyClient) validateInput(in *DryRunInput) error { - if len(uniqueNamespaces(in.Objs)) != 1 { +func (t *topologyClient) validateInput(in *TopologyPlanInput) error { + // Check all the objects in the input belong to the same namespace. + // Note: It is okay if all the objects in the input do not have any namespace. + // In such case, the list of unique namespaces will be [""]. + namespaces := uniqueNamespaces(in.Objs) + if len(namespaces) != 1 { return fmt.Errorf("all the objects in the input should belong to the same namespace") } + + ns := namespaces[0] + // If the objects have a non empty namespace make sure that it matches the TargetNamespace. + if ns != "" && in.TargetNamespace != "" && ns != in.TargetNamespace { + return fmt.Errorf("the namespace from the provided object(s) %q does not match the namespace %q", ns, in.TargetNamespace) + } + in.TargetNamespace = ns + clusterCount, clusterClassCount := len(getClusters(in.Objs)), len(getClusterClasses(in.Objs)) if clusterCount > maxClusterPerInput || clusterClassCount > maxClusterClassesPerInput { return fmt.Errorf( @@ -238,8 +254,8 @@ func (t *topologyClient) validateInput(in *DryRunInput) error { // - Set the target namespace on the objects if not set (this operation is generally done by kubectl) // - Prepare cluster objects so that the state of the cluster, if modified, correctly represents // the expected changes. -func (t *topologyClient) prepareInput(ctx context.Context, in *DryRunInput, apiReader client.Reader) error { - if err := t.setMissingNamespaces(in); err != nil { +func (t *topologyClient) prepareInput(ctx context.Context, in *TopologyPlanInput, apiReader client.Reader) error { + if err := t.setMissingNamespaces(in.TargetNamespace, in.Objs); err != nil { return errors.Wrap(err, "failed to set missing namespaces") } @@ -251,18 +267,22 @@ func (t *topologyClient) prepareInput(ctx context.Context, in *DryRunInput, apiR // setMissingNamespaces sets the object to the current namespace on objects // that are missing the namespace field. -func (t *topologyClient) setMissingNamespaces(in *DryRunInput) error { - currentNamespace := metav1.NamespaceDefault - // If a cluster is available use the current namespace as defined in its kubeconfig. - if err := t.proxy.CheckClusterAvailable(); err == nil { - currentNamespace, err = t.proxy.CurrentNamespace() - if err != nil { - return errors.Wrap(err, "failed to get current namespace") +func (t *topologyClient) setMissingNamespaces(currentNamespace string, objs []*unstructured.Unstructured) error { + if currentNamespace == "" { + // If TargetNamespace is not provided use "default" namespace. + currentNamespace = metav1.NamespaceDefault + // If a cluster is available use the current namespace as defined in its kubeconfig. + if err := t.proxy.CheckClusterAvailable(); err == nil { + currentNamespace, err = t.proxy.CurrentNamespace() + if err != nil { + return errors.Wrap(err, "failed to get current namespace") + } } } - for i := range in.Objs { - if in.Objs[i].GetNamespace() == "" { - in.Objs[i].SetNamespace(currentNamespace) + // Set namespace on objects that do not have namespace value. + for i := range objs { + if objs[i].GetNamespace() == "" { + objs[i].SetNamespace(currentNamespace) } } return nil @@ -324,14 +344,14 @@ func (t *topologyClient) prepareClusters(ctx context.Context, clusters []*unstru return nil } -// defaultAndValidate runs the defaulting and validation webhooks on the +// runDefaultAndValidationWebhooks runs the defaulting and validation webhooks on the // ClusterClass and Cluster objects in the input thus replicating the real kube-apiserver flow // when applied. // Nb. Perform ValidateUpdate only if the object is already in the cluster. In all other cases, // ValidateCreate is performed. // *Important Note*: We cannot perform defaulting and validation on provider objects as we do not have access to // that code. -func (t *topologyClient) defaultAndValidate(ctx context.Context, in *DryRunInput, apiReader client.Reader) error { +func (t *topologyClient) runDefaultAndValidationWebhooks(ctx context.Context, in *TopologyPlanInput, apiReader client.Reader) error { // Enable the ClusterTopology feature gate so that the defaulter and validators do not complain. // Note: We don't need to disable it later because the CLI is short lived. if err := feature.Gates.(featuregate.MutableFeatureGate).Set(fmt.Sprintf("%s=%v", feature.ClusterTopology, true)); err != nil { @@ -475,11 +495,11 @@ func getTemplates(objs []*unstructured.Unstructured) []*unstructured.Unstructure // generateCRDs creates mock CRD objects for all the provider specific objects in the input. // These CRD objects will be added to the dry run client for UpdateReferenceAPIContract // to work as expected. -func (t *topologyClient) generateCRDs(in *DryRunInput) []*apiextensionsv1.CustomResourceDefinition { +func (t *topologyClient) generateCRDs(objs []*unstructured.Unstructured) []*apiextensionsv1.CustomResourceDefinition { crds := []*apiextensionsv1.CustomResourceDefinition{} crdMap := map[string]bool{} var gvk schema.GroupVersionKind - for _, obj := range in.Objs { + for _, obj := range objs { gvk = obj.GroupVersionKind() if strings.HasSuffix(gvk.Group, ".cluster.x-k8s.io") && !crdMap[gvk.String()] { crd := &apiextensionsv1.CustomResourceDefinition{ @@ -501,18 +521,19 @@ func (t *topologyClient) generateCRDs(in *DryRunInput) []*apiextensionsv1.Custom return crds } -func (t *topologyClient) affectedClusterClasses(ctx context.Context, in *DryRunInput, c client.Reader) ([]client.ObjectKey, error) { +func (t *topologyClient) affectedClusterClasses(ctx context.Context, in *TopologyPlanInput, c client.Reader) ([]client.ObjectKey, error) { affectedClusterClasses := map[client.ObjectKey]bool{} + ccList := &clusterv1.ClusterClassList{} + if err := c.List( + ctx, + ccList, + client.InNamespace(in.TargetNamespace), + ); err != nil { + return nil, errors.Wrapf(err, "failed to list ClusterClasses in namespace %s", in.TargetNamespace) + } + // Each of the ClusterClass that uses any of the Templates in the input is an affected ClusterClass. for _, template := range getTemplates(in.Objs) { - ccList := &clusterv1.ClusterClassList{} - if err := c.List( - ctx, - ccList, - client.InNamespace(template.GetNamespace()), - ); err != nil { - return nil, errors.Wrapf(err, "failed to list ClusterClasses using Template Kind=%s %s/%s", template.GetKind(), template.GetNamespace(), template.GetName()) - } for i := range ccList.Items { if clusterClassUsesTemplate(&ccList.Items[i], objToRef(template)) { affectedClusterClasses[client.ObjectKeyFromObject(&ccList.Items[i])] = true @@ -532,23 +553,23 @@ func (t *topologyClient) affectedClusterClasses(ctx context.Context, in *DryRunI return affectedClusterClassesList, nil } -func (t *topologyClient) affectedClusters(ctx context.Context, in *DryRunInput, c client.Reader) ([]client.ObjectKey, error) { +func (t *topologyClient) affectedClusters(ctx context.Context, in *TopologyPlanInput, c client.Reader) ([]client.ObjectKey, error) { affectedClusters := map[client.ObjectKey]bool{} - affectedClusterClasses, err := t.affectedClusterClasses(ctx, in, c) if err != nil { return nil, errors.Wrap(err, "failed to get list of affected ClusterClasses") } + clusterList := &clusterv1.ClusterList{} + if err := c.List( + ctx, + clusterList, + client.InNamespace(in.TargetNamespace), + ); err != nil { + return nil, errors.Wrapf(err, "failed to list Clusters in namespace %s", in.TargetNamespace) + } + // Each of the Cluster that uses the ClusterClass in the input is an affected cluster. for _, cc := range affectedClusterClasses { - clusterList := &clusterv1.ClusterList{} - if err := c.List( - ctx, - clusterList, - client.InNamespace(cc.Namespace), - ); err != nil { - return nil, errors.Wrapf(err, "failed to list Clusters using ClusterClass %s/%s", cc.Namespace, cc.Name) - } for i := range clusterList.Items { if clusterList.Items[i].Spec.Topology != nil && clusterList.Items[i].Spec.Topology.Class == cc.Name { affectedClusters[client.ObjectKeyFromObject(&clusterList.Items[i])] = true @@ -568,13 +589,13 @@ func (t *topologyClient) affectedClusters(ctx context.Context, in *DryRunInput, return affectedClustersList, nil } -func matchCluster(list []client.ObjectKey, name string) *client.ObjectKey { +func inList(list []client.ObjectKey, target client.ObjectKey) bool { for _, i := range list { - if i.Name == name { - return &i + if i == target { + return true } } - return nil + return false } // filterObjects returns a new list of objects after dropping all the objects that match any of the given GVKs. diff --git a/cmd/clusterctl/client/cluster/topology_test.go b/cmd/clusterctl/client/cluster/topology_test.go index dfd07b2b7c8a..e8a5e4a3809a 100644 --- a/cmd/clusterctl/client/cluster/topology_test.go +++ b/cmd/clusterctl/client/cluster/topology_test.go @@ -35,17 +35,33 @@ var ( //go:embed assets/topology-test/new-clusterclass-and-cluster.yaml newClusterClassAndClusterYAML []byte - //go:embed assets/topology-test/existing-clusterclass-and-cluster.yaml - existingClusterClassAndClusterYAML []byte + //go:embed assets/topology-test/mock-CRDs.yaml + mockCRDsYAML []byte + + //go:embed assets/topology-test/my-cluster-class.yaml + existingMyClusterClassYAML []byte + + //go:embed assets/topology-test/existing-my-cluster.yaml + existingMyClusterYAML []byte + + //go:embed assets/topology-test/existing-my-second-cluster.yaml + existingMySecondClusterYAML []byte // modifiedClusterYAML changes the control plane replicas from 1 to 3. - //go:embed assets/topology-test/modified-cluster.yaml - modifiedClusterYAML []byte + //go:embed assets/topology-test/modified-my-cluster.yaml + modifiedMyClusterYAML []byte + + // modifiedDockerMachineTemplateYAML adds metadat to the docker machine used by the control plane template.. + //go:embed assets/topology-test/modified-CP-dockermachinetemplate.yaml + modifiedDockerMachineTemplateYAML []byte + + //go:embed assets/topology-test/objects-in-different-namespaces.yaml + objsInDifferentNamespacesYAML []byte ) -func Test_topologyClient_DryRun(t *testing.T) { +func Test_topologyClient_Plan(t *testing.T) { type args struct { - in *DryRunInput + in *TopologyPlanInput } type item struct { kind string @@ -55,6 +71,7 @@ func Test_topologyClient_DryRun(t *testing.T) { type out struct { affectedClusters []client.ObjectKey affectedClusterClasses []client.ObjectKey + reconciledCluster *client.ObjectKey created []item modified []item deleted []item @@ -69,15 +86,9 @@ func Test_topologyClient_DryRun(t *testing.T) { { name: "Input with new ClusterClass and new Cluster", args: args{ - in: func() *DryRunInput { - objs, err := utilyaml.ToUnstructured(newClusterClassAndClusterYAML) - if err != nil { - panic(err) - } - return &DryRunInput{ - Objs: convertToPtrSlice(objs), - } - }(), + in: &TopologyPlanInput{ + Objs: mustToUnstructured(newClusterClassAndClusterYAML), + }, }, want: out{ created: []item{ @@ -95,47 +106,159 @@ func Test_topologyClient_DryRun(t *testing.T) { {kind: "Cluster", namespace: "default", namePrefix: "my-cluster"}, }, affectedClusters: func() []client.ObjectKey { - cluster := client.ObjectKey{} - cluster.Namespace = "default" - cluster.Name = "my-cluster" + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} return []client.ObjectKey{cluster} }(), affectedClusterClasses: func() []client.ObjectKey { - cc := client.ObjectKey{} - cc.Namespace = "default" - cc.Name = "my-cluster-class" + cc := client.ObjectKey{Namespace: "default", Name: "my-cluster-class"} return []client.ObjectKey{cc} }(), + reconciledCluster: &client.ObjectKey{Namespace: "default", Name: "my-cluster"}, }, wantErr: false, }, { name: "Modifying an existing Cluster", - existingObjects: func() []*unstructured.Unstructured { - objs, err := utilyaml.ToUnstructured(existingClusterClassAndClusterYAML) - if err != nil { - panic(err) - } - return convertToPtrSlice(objs) - }(), + existingObjects: mustToUnstructured( + mockCRDsYAML, + existingMyClusterClassYAML, + existingMyClusterYAML, + ), + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(modifiedMyClusterYAML), + }, + }, + want: out{ + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + return []client.ObjectKey{cluster} + }(), + affectedClusterClasses: []client.ObjectKey{}, + modified: []item{ + {kind: "KubeadmControlPlane", namespace: "default", namePrefix: "my-cluster-"}, + }, + reconciledCluster: &client.ObjectKey{Namespace: "default", Name: "my-cluster"}, + }, + wantErr: false, + }, + { + name: "Modifying an existing DockerMachineTemplate. Template used by Control Plane of an existing Cluster.", + existingObjects: mustToUnstructured( + mockCRDsYAML, + existingMyClusterClassYAML, + existingMyClusterYAML, + ), + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(modifiedDockerMachineTemplateYAML), + }, + }, + want: out{ + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + return []client.ObjectKey{cluster} + }(), + affectedClusterClasses: func() []client.ObjectKey { + cc := client.ObjectKey{Namespace: "default", Name: "my-cluster-class"} + return []client.ObjectKey{cc} + }(), + modified: []item{ + {kind: "KubeadmControlPlane", namespace: "default", namePrefix: "my-cluster-"}, + }, + created: []item{ + // Modifying the DockerClusterTemplate will result in template rotation. A new template will be created + // and used by KCP. + {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-control-plane-"}, + }, + reconciledCluster: &client.ObjectKey{Namespace: "default", Name: "my-cluster"}, + }, + wantErr: false, + }, + { + name: "Modifying an existing DockerMachineTemplate. Affects multiple clusters. Target Cluster not specified.", + existingObjects: mustToUnstructured( + mockCRDsYAML, + existingMyClusterClassYAML, + existingMyClusterYAML, + existingMySecondClusterYAML, + ), args: args{ - in: func() *DryRunInput { - objs, err := utilyaml.ToUnstructured(modifiedClusterYAML) - if err != nil { - panic(err) - } - return &DryRunInput{ - Objs: convertToPtrSlice(objs), - } + in: &TopologyPlanInput{ + Objs: mustToUnstructured(modifiedDockerMachineTemplateYAML), + }, + }, + want: out{ + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + cluster2 := client.ObjectKey{Namespace: "default", Name: "my-second-cluster"} + return []client.ObjectKey{cluster, cluster2} }(), + affectedClusterClasses: func() []client.ObjectKey { + cc := client.ObjectKey{Namespace: "default", Name: "my-cluster-class"} + return []client.ObjectKey{cc} + }(), + modified: []item{}, + created: []item{}, + reconciledCluster: nil, + }, + wantErr: false, + }, + { + name: "Modifying an existing DockerMachineTemplate. Affects multiple clusters. Target Cluster specified.", + existingObjects: mustToUnstructured( + mockCRDsYAML, + existingMyClusterClassYAML, + existingMyClusterYAML, + existingMySecondClusterYAML, + ), + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(modifiedDockerMachineTemplateYAML), + TargetClusterName: "my-cluster", + }, }, want: out{ + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + cluster2 := client.ObjectKey{Namespace: "default", Name: "my-second-cluster"} + return []client.ObjectKey{cluster, cluster2} + }(), + affectedClusterClasses: func() []client.ObjectKey { + cc := client.ObjectKey{Namespace: "default", Name: "my-cluster-class"} + return []client.ObjectKey{cc} + }(), modified: []item{ {kind: "KubeadmControlPlane", namespace: "default", namePrefix: "my-cluster-"}, }, + created: []item{ + // Modifying the DockerClusterTemplate will result in template rotation. A new template will be created + // and used by KCP. + {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-control-plane-"}, + }, + reconciledCluster: &client.ObjectKey{Namespace: "default", Name: "my-cluster"}, }, wantErr: false, }, + { + name: "Input with objects in different namespaces should return error", + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(objsInDifferentNamespacesYAML), + }, + }, + wantErr: true, + }, + { + name: "Input with TargetNamespace different from objects in input should return error", + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(newClusterClassAndClusterYAML), + TargetNamespace: "different-namespace", + }, + }, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -152,46 +275,67 @@ func Test_topologyClient_DryRun(t *testing.T) { inventoryClient, ) - res, err := tc.DryRun(tt.args.in) + res, err := tc.Plan(tt.args.in) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } + // The plan should function should not return any error. g.Expect(err).NotTo(HaveOccurred()) + + // Check affected ClusterClasses. + g.Expect(len(res.ClusterClasses)).To(Equal(len(tt.want.affectedClusterClasses))) for _, cc := range tt.want.affectedClusterClasses { g.Expect(res.ClusterClasses).To(ContainElement(cc)) } + + // Check affected Clusters. + g.Expect(len(res.Clusters)).To(Equal(len(tt.want.affectedClusters))) for _, cluster := range tt.want.affectedClusters { g.Expect(res.Clusters).To(ContainElement(cluster)) } + + // Check the reconciled cluster. + if tt.want.reconciledCluster == nil { + g.Expect(res.ReconciledCluster).To(BeNil()) + } else { + g.Expect(res.ReconciledCluster).NotTo(BeNil()) + g.Expect(*res.ReconciledCluster).To(Equal(*tt.want.reconciledCluster)) + } + + // Check the created objects. for _, created := range tt.want.created { - g.Expect(res.Created).To(ContainElement(MatchDryRunOutputItem(created.kind, created.namespace, created.namePrefix))) + g.Expect(res.Created).To(ContainElement(MatchTopologyPlanOutputItem(created.kind, created.namespace, created.namePrefix))) } + + // Check the modified objects. actualModifiedObjs := []*unstructured.Unstructured{} for _, m := range res.Modified { actualModifiedObjs = append(actualModifiedObjs, m.After) } for _, modified := range tt.want.modified { - g.Expect(actualModifiedObjs).To(ContainElement(MatchDryRunOutputItem(modified.kind, modified.namespace, modified.namePrefix))) + g.Expect(actualModifiedObjs).To(ContainElement(MatchTopologyPlanOutputItem(modified.kind, modified.namespace, modified.namePrefix))) } + + // Check the deleted objects. for _, deleted := range tt.want.deleted { - g.Expect(res.Deleted).To(ContainElement(MatchDryRunOutputItem(deleted.kind, deleted.namespace, deleted.namePrefix))) + g.Expect(res.Deleted).To(ContainElement(MatchTopologyPlanOutputItem(deleted.kind, deleted.namespace, deleted.namePrefix))) } }) } } -func MatchDryRunOutputItem(kind, namespace, namePrefix string) types.GomegaMatcher { - return &dryRunOutputItemMatcher{kind, namespace, namePrefix} +func MatchTopologyPlanOutputItem(kind, namespace, namePrefix string) types.GomegaMatcher { + return &topologyPlanOutputItemMatcher{kind, namespace, namePrefix} } -type dryRunOutputItemMatcher struct { +type topologyPlanOutputItemMatcher struct { kind string namespace string namePrefix string } -func (m *dryRunOutputItemMatcher) Match(actual interface{}) (bool, error) { +func (m *topologyPlanOutputItemMatcher) Match(actual interface{}) (bool, error) { obj := actual.(*unstructured.Unstructured) if obj.GetKind() != m.kind { return false, nil @@ -205,11 +349,11 @@ func (m *dryRunOutputItemMatcher) Match(actual interface{}) (bool, error) { return true, nil } -func (m *dryRunOutputItemMatcher) FailureMessage(actual interface{}) string { +func (m *topologyPlanOutputItemMatcher) FailureMessage(actual interface{}) string { return fmt.Sprintf("Expected item Kind=%s, Namespace=%s, Name(prefix)=%s to be present", m.kind, m.namespace, m.namePrefix) } -func (m *dryRunOutputItemMatcher) NegatedFailureMessage(actual interface{}) string { +func (m *topologyPlanOutputItemMatcher) NegatedFailureMessage(actual interface{}) string { return fmt.Sprintf("Expected item Kind=%s, Namespace=%s, Name(prefix)=%s not to be present", m.kind, m.namespace, m.namePrefix) } @@ -220,3 +364,15 @@ func convertToPtrSlice(objs []unstructured.Unstructured) []*unstructured.Unstruc } return res } + +func mustToUnstructured(rawyamls ...[]byte) []*unstructured.Unstructured { + objects := []unstructured.Unstructured{} + for _, raw := range rawyamls { + objs, err := utilyaml.ToUnstructured(raw) + if err != nil { + panic(err) + } + objects = append(objects, objs...) + } + return convertToPtrSlice(objects) +} diff --git a/cmd/clusterctl/client/topology.go b/cmd/clusterctl/client/topology.go index 08104eaba865..8e65bb3547db 100644 --- a/cmd/clusterctl/client/topology.go +++ b/cmd/clusterctl/client/topology.go @@ -22,34 +22,40 @@ import ( "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" ) -// DryRunOptions define options for DryRunTopology. -type DryRunOptions struct { +// TopologyPlanOptions define options for TopologyPlan. +type TopologyPlanOptions struct { // Kubeconfig defines the kubeconfig to use for accessing the management cluster. If empty, // default rules for kubeconfig discovery will be used. Kubeconfig Kubeconfig - // Objs is the list of objects that are input to the dryrun operation. + // Objs is the list of objects that are input to the topology plan (dry run) operation. // The objects can be among new/modified clusters, new/modifed ClusterClasses and new/modified templates. Objs []*unstructured.Unstructured // Cluster is the name of the cluster to dryrun reconcile if multiple clusters are affected by the input. Cluster string + + // Namespace is the target namespace for the operation. + // This namespace is used as default for objects with missing namespaces. + // If the namespace of any of the input objects conflicts with Namespace an error is returned. + Namespace string } -// DryRunOutput defines the output of the dry run execution. -type DryRunOutput = cluster.DryRunOutput +// TopologyPlanOutput defines the output of the topology plan operation. +type TopologyPlanOutput = cluster.TopologyPlanOutput -// DryRunTopology performs a dry run execution of the topology reconciler using the given inputs. +// TopologyPlan performs a dry run execution of the topology reconciler using the given inputs. // It returns a summary of the changes observed during the execution. -func (c *clusterctlClient) DryRunTopology(options DryRunOptions) (*DryRunOutput, error) { +func (c *clusterctlClient) TopologyPlan(options TopologyPlanOptions) (*TopologyPlanOutput, error) { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return nil, err } - out, err := clusterClient.Topology().DryRun(&cluster.DryRunInput{ + out, err := clusterClient.Topology().Plan(&cluster.TopologyPlanInput{ Objs: options.Objs, TargetClusterName: options.Cluster, + TargetNamespace: options.Namespace, }) return out, err diff --git a/cmd/clusterctl/cmd/alpha.go b/cmd/clusterctl/cmd/alpha.go index f47012c3a83d..a975a128e49b 100644 --- a/cmd/clusterctl/cmd/alpha.go +++ b/cmd/clusterctl/cmd/alpha.go @@ -29,7 +29,7 @@ var alphaCmd = &cobra.Command{ func init() { // Alpha commands should be added here. alphaCmd.AddCommand(rolloutCmd) - alphaCmd.AddCommand(topologyDryRunCmd) + alphaCmd.AddCommand(topologyCmd) RootCmd.AddCommand(alphaCmd) } diff --git a/cmd/clusterctl/cmd/topology.go b/cmd/clusterctl/cmd/topology.go index df9b533d05c9..5920e16261a3 100644 --- a/cmd/clusterctl/cmd/topology.go +++ b/cmd/clusterctl/cmd/topology.go @@ -17,298 +17,11 @@ limitations under the License. package cmd import ( - "fmt" - "os" - "path" - "sort" - - "github.com/olekukonko/tablewriter" - "github.com/pkg/errors" "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - crclient "sigs.k8s.io/controller-runtime/pkg/client" - - "sigs.k8s.io/cluster-api/cmd/clusterctl/client" - "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" - utilyaml "sigs.k8s.io/cluster-api/util/yaml" ) -type topologyDryRunOptions struct { - kubeconfig string - kubeconfigContext string - file string - cluster string - outDir string -} - -var dr = &topologyDryRunOptions{} - -var topologyDryRunCmd = &cobra.Command{ - Use: "topology-dryrun", - Short: "Dry run changes to clusters that use managed topologies.", - Long: LongDesc(` - Provide the list of objects that would be created, modified and deleted when an input file is applied. - The input can be a file with a new/modified cluster, new/modified ClusterClass, new/modified templates. - Details about the objects that will be created and modified will be stored in a path passed using --output-directory. - - This command can also be run without a real cluster. In such cases, the input should contain all the objects needed - to perform a dry run. - - Note: Among all the objects in the input defaulting and validation will be performed only for Cluster - and ClusterClasses. All other objects in the input are expected to be valid and have default values. - `), - Example: Examples(` - # List all the objects that will be created and modified when creating a new cluster. - clusterctl alpha topology-dryrun -f new-cluster.yaml -o output/ - - # List the changes when modifying a cluster. - clusterctl alpha topology-dryrun -f modified-cluster.yaml -o output/ - - # List all the objects that will be created and modified when creating a new cluster along with a new ClusterClass. - clusterctl alpha topology-dryrun -f new-cluster-and-cluster-class.yaml -o output/ - - # List the clusters impacted by a ClusterClass change. - clusterctl alpha topology-dryrun -f modified-cluster-class.yaml -o output/ - - # List the changes to "cluster1" when a ClusterClass is changed. - clusterctl alpha topology-dryrun -f modified-cluster-class.yaml --cluster "cluster1" -o output/ - - # List the clusters and ClusterClasses impacted by a template change. - clusterctl alpha topology-dryrun -f modified-template.yaml -o output/ - `), - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runTopologyDryRun() - }, -} - -func init() { - topologyDryRunCmd.Flags().StringVar(&initOpts.kubeconfig, "kubeconfig", "", - "Path to the kubeconfig for the management cluster. If unspecified, default discovery rules apply.") - topologyDryRunCmd.Flags().StringVar(&initOpts.kubeconfigContext, "kubeconfig-context", "", - "Context to be used within the kubeconfig file. If empty, current context will be used.") - - topologyDryRunCmd.Flags().StringVarP(&dr.file, "file", "f", "", "path to the file with new or modified resources to be applied; the file should not contain more than one Cluster or more than one ClusterClass") - topologyDryRunCmd.Flags().StringVarP(&dr.cluster, "cluster", "c", "", "name of the target cluster; this parameter is required when more than one cluster is affected") - topologyDryRunCmd.Flags().StringVarP(&dr.outDir, "output-directory", "o", "", "output directory to write details about created/modified objects") - - if err := topologyDryRunCmd.MarkFlagRequired("file"); err != nil { - panic(err) - } - if err := topologyDryRunCmd.MarkFlagRequired("output-directory"); err != nil { - panic(err) - } -} - -func runTopologyDryRun() error { - c, err := client.New(cfgFile) - if err != nil { - return err - } - - raw, err := os.ReadFile(dr.file) - if err != nil { - return errors.Wrap(err, "failed to read input file") - } - - objs, err := utilyaml.ToUnstructured(raw) - if err != nil { - return errors.Wrap(err, "failed to convert file to list of objects") - } - - out, err := c.DryRunTopology(client.DryRunOptions{ - Kubeconfig: client.Kubeconfig{Path: dr.kubeconfig, Context: dr.kubeconfigContext}, - Objs: convertToPtrSlice(objs), - Cluster: dr.cluster, - }) - if err != nil { - return err - } - return printDryRunOutput(out, dr.outDir) -} - -func printDryRunOutput(out *cluster.DryRunOutput, outdir string) error { - printAffectedClusterClasses(out) - printAffectedClusters(out) - if len(out.Clusters) == 0 { - // No affected clusters. Return early. - return nil - } - if out.ReconciledCluster.Name == "" { - fmt.Printf("No target cluster identified. Use --cluster to specify a target cluster to get detailed changes.") - } else { - printChangeSummary(out) - if err := writeOutputFiles(out, outdir); err != nil { - return errors.Wrap(err, "failed to write output files of target cluster changes") - } - } - fmt.Printf("\n") - return nil -} - -func printAffectedClusterClasses(out *cluster.DryRunOutput) { - if len(out.ClusterClasses) == 0 { - // If there are no affected ClusterClasses return early. Nothing more to do here. - fmt.Printf("No ClusterClasses will be affected by the changes.\n") - return - } - fmt.Printf("The following ClusterClasses will be affected by the changes:\n") - for _, cc := range out.ClusterClasses { - fmt.Printf(" * %s/%s\n", cc.Namespace, cc.Name) - } - fmt.Printf("\n") -} - -func printAffectedClusters(out *cluster.DryRunOutput) { - if len(out.Clusters) == 0 { - // if there are not affected Clusters return early. Nothing more to do here. - fmt.Printf("No Clusters will be affected by the changes.\n") - return - } - fmt.Printf("The following Clusters will be affected by the changes:\n") - for _, cluster := range out.Clusters { - fmt.Printf(" * %s/%s\n", cluster.Namespace, cluster.Name) - } - fmt.Printf("\n") -} - -func printChangeSummary(out *cluster.DryRunOutput) { - if len(out.Created) == 0 && len(out.Modified) == 0 && len(out.Deleted) == 0 { - fmt.Printf("No changes detected for Cluster %q.\n", fmt.Sprintf("%s/%s", out.ReconciledCluster.Namespace, out.ReconciledCluster.Name)) - return - } - - fmt.Printf("Changes for Cluster %q: \n", fmt.Sprintf("%s/%s", out.ReconciledCluster.Namespace, out.ReconciledCluster.Name)) - table := tablewriter.NewWriter(os.Stdout) - table.SetHeader([]string{"Namespace", "Kind", "Name", "Action"}) - table.SetCenterSeparator("") - table.SetColumnSeparator("") - table.SetRowSeparator("") - table.SetHeaderLine(false) - table.SetBorder(false) - - // Add the created rows. - sort.Slice(out.Created, func(i, j int) bool { return lessByKindAndName(out.Created[i], out.Created[j]) }) - for _, c := range out.Created { - addRow(table, c, "created", tablewriter.FgGreenColor) - } - - // Add the modified rows. - sort.Slice(out.Modified, func(i, j int) bool { return lessByKindAndName(out.Modified[i].After, out.Modified[j].After) }) - for _, m := range out.Modified { - addRow(table, m.After, "modified", tablewriter.FgYellowColor) - } - - // Add the deleted rows. - sort.Slice(out.Deleted, func(i, j int) bool { return lessByKindAndName(out.Deleted[i], out.Deleted[j]) }) - for _, d := range out.Deleted { - addRow(table, d, "deleted", tablewriter.FgRedColor) - } - fmt.Printf("\n") - table.Render() - fmt.Printf("\n") -} - -func writeOutputFiles(out *cluster.DryRunOutput, outDir string) error { - if _, err := os.Stat(outDir); os.IsNotExist(err) { - return fmt.Errorf("output directory %q does not exist", outDir) - } - - // Write created files - createdDir := path.Join(outDir, "created") - if err := os.MkdirAll(createdDir, 0750); err != nil { - return errors.Wrapf(err, "failed to create %q directory", createdDir) - } - for _, c := range out.Created { - yaml, err := utilyaml.FromUnstructured([]unstructured.Unstructured{*c}) - if err != nil { - return errors.Wrap(err, "failed to convert object to yaml") - } - fileName := fmt.Sprintf("%s_%s_%s.yaml", c.GetKind(), c.GetNamespace(), c.GetName()) - filePath := path.Join(createdDir, fileName) - if err := os.WriteFile(filePath, yaml, 0600); err != nil { - return errors.Wrapf(err, "failed to write yaml to file %q", filePath) - } - } - if len(out.Created) != 0 { - fmt.Printf("Created objects are written to directory %q\n", createdDir) - } - - // Write modified files - modifiedDir := path.Join(outDir, "modified") - if err := os.MkdirAll(modifiedDir, 0750); err != nil { - return errors.Wrapf(err, "failed to create %q directory", modifiedDir) - } - for _, m := range out.Modified { - // Write the modified object to file. - fileNameModified := fmt.Sprintf("%s_%s_%s.modified.yaml", m.After.GetKind(), m.After.GetNamespace(), m.After.GetName()) - filePathModified := path.Join(modifiedDir, fileNameModified) - if err := writeObjectToFile(filePathModified, m.After); err != nil { - return errors.Wrap(err, "failed to write modified object to file") - } - - // Write the original object to file. - fileNameOriginal := fmt.Sprintf("%s_%s_%s.original.yaml", m.Before.GetKind(), m.Before.GetNamespace(), m.Before.GetName()) - filePathOriginal := path.Join(modifiedDir, fileNameOriginal) - if err := writeObjectToFile(filePathOriginal, m.Before); err != nil { - return errors.Wrap(err, "failed to write original object to file") - } - - // Calculate the diff and write to a file. - patch := crclient.MergeFrom(m.Before) - diff, err := patch.Data(m.After) - if err != nil { - return errors.Wrapf(err, "failed to calculate diff of modified object %s/%s", m.After.GetNamespace(), m.After.GetName()) - } - patchFileName := fmt.Sprintf("%s_%s_%s.patch", m.After.GetKind(), m.After.GetNamespace(), m.After.GetName()) - patchFilePath := path.Join(modifiedDir, patchFileName) - if err := os.WriteFile(patchFilePath, diff, 0600); err != nil { - return errors.Wrapf(err, "failed to write diff to file %q", patchFilePath) - } - } - if len(out.Modified) != 0 { - fmt.Printf("Modified objects are written to directory %q\n", modifiedDir) - } - - return nil -} - -func writeObjectToFile(filePath string, obj *unstructured.Unstructured) error { - yaml, err := utilyaml.FromUnstructured([]unstructured.Unstructured{*obj}) - if err != nil { - return errors.Wrap(err, "failed to convert object to yaml") - } - if err := os.WriteFile(filePath, yaml, 0600); err != nil { - return errors.Wrapf(err, "failed to write yaml to file %q", filePath) - } - return nil -} - -func convertToPtrSlice(objs []unstructured.Unstructured) []*unstructured.Unstructured { - res := []*unstructured.Unstructured{} - for i := range objs { - res = append(res, &objs[i]) - } - return res -} - -func lessByKindAndName(a, b *unstructured.Unstructured) bool { - if a.GetKind() == b.GetKind() { - return a.GetName() < b.GetName() - } - return a.GetKind() < b.GetKind() -} - -func addRow(table *tablewriter.Table, o *unstructured.Unstructured, action string, actionColor int) { - table.Rich( - []string{ - o.GetNamespace(), - o.GetKind(), - o.GetName(), - action, - }, - []tablewriter.Colors{ - {}, {}, {}, {actionColor}, - }, - ) +var topologyCmd = &cobra.Command{ + Use: "topology", + Short: "Commands for ClusterClass based clusters.", + Long: `Commands for ClusterClass based clusters.`, } diff --git a/cmd/clusterctl/cmd/topology_plan.go b/cmd/clusterctl/cmd/topology_plan.go new file mode 100644 index 000000000000..22e39c612e37 --- /dev/null +++ b/cmd/clusterctl/cmd/topology_plan.go @@ -0,0 +1,331 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "fmt" + "os" + "path" + "sort" + + "github.com/google/go-cmp/cmp" + "github.com/olekukonko/tablewriter" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + + "sigs.k8s.io/cluster-api/cmd/clusterctl/client" + "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" + utilyaml "sigs.k8s.io/cluster-api/util/yaml" +) + +type topologyPlanOptions struct { + kubeconfig string + kubeconfigContext string + files []string + cluster string + namespace string + outDir string +} + +var tp = &topologyPlanOptions{} + +var topologyPlanCmd = &cobra.Command{ + Use: "plan", + Short: "List the changes to clusters that use managed topologies for a given input.", + Long: LongDesc(` + Provide the list of objects that would be created, modified and deleted when an input file is applied. + The input can be a file with a new/modified cluster, new/modified ClusterClass, new/modified templates. + Details about the objects that will be created and modified will be stored in a path passed using --output-directory. + + This command can also be run without a real cluster. In such cases, the input should contain all the objects needed. + + Note: Among all the objects in the input defaulting and validation will be performed only for Cluster + and ClusterClasses. All other objects in the input are expected to be valid and have default values. + `), + Example: Examples(` + # List all the objects that will be created and modified when creating a new cluster. + clusterctl alpha topology plan -f new-cluster.yaml -o output/ + + # List the changes when modifying a cluster. + clusterctl alpha topology plan -f modified-cluster.yaml -o output/ + + # List all the objects that will be created and modified when creating a new cluster along with a new ClusterClass. + clusterctl alpha topology plan -f new-cluster-and-cluster-class.yaml -o output/ + + # List the clusters impacted by a ClusterClass change. + clusterctl alpha topology plan -f modified-cluster-class.yaml -o output/ + + # List the changes to "cluster1" when a ClusterClass is changed. + clusterctl alpha topology plan -f modified-cluster-class.yaml --cluster "cluster1" -o output/ + + # List the clusters and ClusterClasses impacted by a template change. + clusterctl alpha topology plan -f modified-template.yaml -o output/ + `), + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runTopologyPlan() + }, +} + +func init() { + topologyPlanCmd.Flags().StringVar(&initOpts.kubeconfig, "kubeconfig", "", + "Path to the kubeconfig for the management cluster. If unspecified, default discovery rules apply.") + topologyPlanCmd.Flags().StringVar(&initOpts.kubeconfigContext, "kubeconfig-context", "", + "Context to be used within the kubeconfig file. If empty, current context will be used.") + + topologyPlanCmd.Flags().StringArrayVarP(&tp.files, "file", "f", nil, "path to the file with new or modified resources to be applied; the file should not contain more than one Cluster or more than one ClusterClass") + topologyPlanCmd.Flags().StringVarP(&tp.cluster, "cluster", "c", "", "name of the target cluster; this parameter is required when more than one cluster is affected") + topologyPlanCmd.Flags().StringVarP(&tp.namespace, "namespace", "n", "", "target namespace for the operation. If specified, it is used as default namespace for objects with missing namespace") + topologyPlanCmd.Flags().StringVarP(&tp.outDir, "output-directory", "o", "", "output directory to write details about created/modified objects") + + if err := topologyPlanCmd.MarkFlagRequired("file"); err != nil { + panic(err) + } + if err := topologyPlanCmd.MarkFlagRequired("output-directory"); err != nil { + panic(err) + } + + topologyCmd.AddCommand(topologyPlanCmd) +} + +func runTopologyPlan() error { + c, err := client.New(cfgFile) + if err != nil { + return err + } + + objs := []unstructured.Unstructured{} + for _, f := range tp.files { + raw, err := os.ReadFile(f) //nolint:gosec + if err != nil { + return errors.Wrapf(err, "failed to read input file %q", f) + } + objects, err := utilyaml.ToUnstructured(raw) + if err != nil { + return errors.Wrapf(err, "failed to convert file %q to list of objects", f) + } + objs = append(objs, objects...) + } + + out, err := c.TopologyPlan(client.TopologyPlanOptions{ + Kubeconfig: client.Kubeconfig{Path: tp.kubeconfig, Context: tp.kubeconfigContext}, + Objs: convertToPtrSlice(objs), + Cluster: tp.cluster, + Namespace: tp.namespace, + }) + if err != nil { + return err + } + return printTopologyPlanOutput(out, tp.outDir) +} + +func printTopologyPlanOutput(out *cluster.TopologyPlanOutput, outdir string) error { + printAffectedClusterClasses(out) + printAffectedClusters(out) + if len(out.Clusters) == 0 { + // No affected clusters. Return early. + return nil + } + if out.ReconciledCluster == nil { + fmt.Printf("No target cluster identified. Use --cluster to specify a target cluster to get detailed changes.") + } else { + printChangeSummary(out) + if err := writeOutputFiles(out, outdir); err != nil { + return errors.Wrap(err, "failed to write output files of target cluster changes") + } + } + fmt.Printf("\n") + return nil +} + +func printAffectedClusterClasses(out *cluster.TopologyPlanOutput) { + if len(out.ClusterClasses) == 0 { + // If there are no affected ClusterClasses return early. Nothing more to do here. + fmt.Printf("No ClusterClasses will be affected by the changes.\n") + return + } + fmt.Printf("The following ClusterClasses will be affected by the changes:\n") + for _, cc := range out.ClusterClasses { + fmt.Printf(" * %s/%s\n", cc.Namespace, cc.Name) + } + fmt.Printf("\n") +} + +func printAffectedClusters(out *cluster.TopologyPlanOutput) { + if len(out.Clusters) == 0 { + // if there are not affected Clusters return early. Nothing more to do here. + fmt.Printf("No Clusters will be affected by the changes.\n") + return + } + fmt.Printf("The following Clusters will be affected by the changes:\n") + for _, cluster := range out.Clusters { + fmt.Printf(" * %s/%s\n", cluster.Namespace, cluster.Name) + } + fmt.Printf("\n") +} + +func printChangeSummary(out *cluster.TopologyPlanOutput) { + if len(out.Created) == 0 && len(out.Modified) == 0 && len(out.Deleted) == 0 { + fmt.Printf("No changes detected for Cluster %q.\n", fmt.Sprintf("%s/%s", out.ReconciledCluster.Namespace, out.ReconciledCluster.Name)) + return + } + + fmt.Printf("Changes for Cluster %q: \n", fmt.Sprintf("%s/%s", out.ReconciledCluster.Namespace, out.ReconciledCluster.Name)) + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Namespace", "Kind", "Name", "Action"}) + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + table.SetRowSeparator("") + table.SetHeaderLine(false) + table.SetBorder(false) + + // Add the created rows. + sort.Slice(out.Created, func(i, j int) bool { return lessByKindAndName(out.Created[i], out.Created[j]) }) + for _, c := range out.Created { + addRow(table, c, "created", tablewriter.FgGreenColor) + } + + // Add the modified rows. + sort.Slice(out.Modified, func(i, j int) bool { return lessByKindAndName(out.Modified[i].After, out.Modified[j].After) }) + for _, m := range out.Modified { + addRow(table, m.After, "modified", tablewriter.FgYellowColor) + } + + // Add the deleted rows. + sort.Slice(out.Deleted, func(i, j int) bool { return lessByKindAndName(out.Deleted[i], out.Deleted[j]) }) + for _, d := range out.Deleted { + addRow(table, d, "deleted", tablewriter.FgRedColor) + } + fmt.Printf("\n") + table.Render() + fmt.Printf("\n") +} + +func writeOutputFiles(out *cluster.TopologyPlanOutput, outDir string) error { + if _, err := os.Stat(outDir); os.IsNotExist(err) { + return fmt.Errorf("output directory %q does not exist", outDir) + } + + // Write created files + createdDir := path.Join(outDir, "created") + if err := os.MkdirAll(createdDir, 0750); err != nil { + return errors.Wrapf(err, "failed to create %q directory", createdDir) + } + for _, c := range out.Created { + yaml, err := utilyaml.FromUnstructured([]unstructured.Unstructured{*c}) + if err != nil { + return errors.Wrap(err, "failed to convert object to yaml") + } + fileName := fmt.Sprintf("%s_%s_%s.yaml", c.GetKind(), c.GetNamespace(), c.GetName()) + filePath := path.Join(createdDir, fileName) + if err := os.WriteFile(filePath, yaml, 0600); err != nil { + return errors.Wrapf(err, "failed to write yaml to file %q", filePath) + } + } + if len(out.Created) != 0 { + fmt.Printf("Created objects are written to directory %q\n", createdDir) + } + + // Write modified files + modifiedDir := path.Join(outDir, "modified") + if err := os.MkdirAll(modifiedDir, 0750); err != nil { + return errors.Wrapf(err, "failed to create %q directory", modifiedDir) + } + for _, m := range out.Modified { + // Write the modified object to file. + fileNameModified := fmt.Sprintf("%s_%s_%s.modified.yaml", m.After.GetKind(), m.After.GetNamespace(), m.After.GetName()) + filePathModified := path.Join(modifiedDir, fileNameModified) + if err := writeObjectToFile(filePathModified, m.After); err != nil { + return errors.Wrap(err, "failed to write modified object to file") + } + + // Write the original object to file. + fileNameOriginal := fmt.Sprintf("%s_%s_%s.original.yaml", m.Before.GetKind(), m.Before.GetNamespace(), m.Before.GetName()) + filePathOriginal := path.Join(modifiedDir, fileNameOriginal) + if err := writeObjectToFile(filePathOriginal, m.Before); err != nil { + return errors.Wrap(err, "failed to write original object to file") + } + + // Calculate the jsonpatch and write to a file. + patch := crclient.MergeFrom(m.Before) + jsonPatch, err := patch.Data(m.After) + if err != nil { + return errors.Wrapf(err, "failed to calculate jsonpatch of modified object %s/%s", m.After.GetNamespace(), m.After.GetName()) + } + patchFileName := fmt.Sprintf("%s_%s_%s.jsonpatch", m.After.GetKind(), m.After.GetNamespace(), m.After.GetName()) + patchFilePath := path.Join(modifiedDir, patchFileName) + if err := os.WriteFile(patchFilePath, jsonPatch, 0600); err != nil { + return errors.Wrapf(err, "failed to write jsonpatch to file %q", patchFilePath) + } + + // Calculate the diff and write to a file. + diff := cmp.Diff(m.Before, m.After) + diffFileName := fmt.Sprintf("%s_%s_%s.diff", m.After.GetKind(), m.After.GetNamespace(), m.After.GetName()) + diffFilePath := path.Join(modifiedDir, diffFileName) + if err := os.WriteFile(diffFilePath, []byte(diff), 0600); err != nil { + return errors.Wrapf(err, "failed to write diff to file %q", diffFilePath) + } + } + if len(out.Modified) != 0 { + fmt.Printf("Modified objects are written to directory %q\n", modifiedDir) + } + + return nil +} + +func writeObjectToFile(filePath string, obj *unstructured.Unstructured) error { + yaml, err := utilyaml.FromUnstructured([]unstructured.Unstructured{*obj}) + if err != nil { + return errors.Wrap(err, "failed to convert object to yaml") + } + if err := os.WriteFile(filePath, yaml, 0600); err != nil { + return errors.Wrapf(err, "failed to write yaml to file %q", filePath) + } + return nil +} + +func convertToPtrSlice(objs []unstructured.Unstructured) []*unstructured.Unstructured { + res := []*unstructured.Unstructured{} + for i := range objs { + res = append(res, &objs[i]) + } + return res +} + +func lessByKindAndName(a, b *unstructured.Unstructured) bool { + if a.GetKind() == b.GetKind() { + return a.GetName() < b.GetName() + } + return a.GetKind() < b.GetKind() +} + +func addRow(table *tablewriter.Table, o *unstructured.Unstructured, action string, actionColor int) { + table.Rich( + []string{ + o.GetNamespace(), + o.GetKind(), + o.GetName(), + action, + }, + []tablewriter.Colors{ + {}, {}, {}, {actionColor}, + }, + ) +}