From 43e9c56cf13472c581ca4ebe48c35cfe5b5dd6b6 Mon Sep 17 00:00:00 2001 From: mkatari Date: Fri, 6 Dec 2024 03:46:12 +0530 Subject: [PATCH] [Doc] correct ceph migration steps update/correct few ceph rbd migration steps --- ...proc_migrating-mgr-from-controller-nodes.adoc | 4 ++-- ...ing-mon-from-controller-nodes-drain-host.adoc | 16 ++++++++-------- ...rating-mon-from-controller-nodes-network.adoc | 9 ++++++++- ...g-mon-from-controller-nodes-redeploy-mon.adoc | 6 +++--- 4 files changed, 21 insertions(+), 14 deletions(-) diff --git a/docs_user/modules/proc_migrating-mgr-from-controller-nodes.adoc b/docs_user/modules/proc_migrating-mgr-from-controller-nodes.adoc index d0b9a947a..912f7988f 100644 --- a/docs_user/modules/proc_migrating-mgr-from-controller-nodes.adoc +++ b/docs_user/modules/proc_migrating-mgr-from-controller-nodes.adoc @@ -53,10 +53,10 @@ $ sudo nft list ruleset | grep ceph_mgr label to the target node: + ---- -$ ceph orch host label add mgr; done +$ sudo cephadm shell -- ceph orch host label add mgr ---- -. Repeat steps 1-3 for each target node that hosts a Ceph Manager daemon. +. Repeat steps 1-7 for each target node that hosts a Ceph Manager daemon. . Get the Ceph Manager spec: + diff --git a/docs_user/modules/proc_migrating-mon-from-controller-nodes-drain-host.adoc b/docs_user/modules/proc_migrating-mon-from-controller-nodes-drain-host.adoc index 20d17e05c..dd1ccf5dc 100644 --- a/docs_user/modules/proc_migrating-mon-from-controller-nodes-drain-host.adoc +++ b/docs_user/modules/proc_migrating-mon-from-controller-nodes-drain-host.adoc @@ -2,7 +2,7 @@ = Draining the source node -Drain the existing Controller nodes and remove the source node host from the {CephCluster} cluster. +Drain the source node and remove the source node host from the {CephCluster} cluster. .Procedure @@ -19,7 +19,7 @@ $ sudo cp -R /etc/ceph $HOME/ceph_client_backup $ sudo cephadm shell -- ceph mgr stat ---- -. Fail the `ceph-mgr` if it is active on the source node or target node: +. Fail the `ceph-mgr` if it is active on the source node: + ---- $ sudo cephadm shell -- ceph mgr fail @@ -31,28 +31,28 @@ $ sudo cephadm shell -- ceph mgr fail + ---- $ for label in mon mgr _admin; do - sudo cephadm shell -- ceph orch host rm label $label; + sudo cephadm shell -- ceph orch host label rm $label; done ---- + * Replace `` with the hostname of the source node. -. Remove the running Ceph Monitor daemon from the source node: +. (Optional) Ensure that you remove the Ceph Monitor daemon from the source node if it is still running: + ---- -$ sudo cephadm shell -- ceph orch daemon rm mon. --force" +$ sudo cephadm shell -- ceph orch daemon rm mon. --force ---- -. Drain the source node: +. Drain the source node to remove any leftover daemons: + ---- -$ sudo cephadm shell -- ceph drain +$ sudo cephadm shell -- ceph orch host drain ---- . Remove the source node host from the {CephCluster} cluster: + ---- -$ sudo cephadm shell -- ceph orch host rm --force" +$ sudo cephadm shell -- ceph orch host rm --force ---- + [NOTE] diff --git a/docs_user/modules/proc_migrating-mon-from-controller-nodes-network.adoc b/docs_user/modules/proc_migrating-mon-from-controller-nodes-network.adoc index d6877938b..bd02d50a6 100644 --- a/docs_user/modules/proc_migrating-mon-from-controller-nodes-network.adoc +++ b/docs_user/modules/proc_migrating-mon-from-controller-nodes-network.adoc @@ -11,12 +11,19 @@ IP address migration assumes that the target nodes are originally deployed by // w/ an EDPM node that has already been adopted. .Procedure -. Get the original Ceph Monitor IP address from the existing `/etc/ceph/ceph.conf` file on the `mon_host` line, for example: +. Get the original Ceph Monitor IP addresses from `$HOME/ceph_client_backup/ceph.conf` file on the `mon_host` line, for example: + ---- mon_host = [v2:172.17.3.60:3300/0,v1:172.17.3.60:6789/0] [v2:172.17.3.29:3300/0,v1:172.17.3.29:6789/0] [v2:172.17.3.53:3300/0,v1:172.17.3.53:6789/0] ---- +. Match the IP address retrieved in the previous step with the storage network IP addresses on the source node, and find the Ceph Monitor IP address: +---- +[tripleo-admin@controller-0 ~]$ ip -o -4 a | grep 172.17.3 +9: vlan30 inet 172.17.3.60/24 brd 172.17.3.255 scope global vlan30\ valid_lft forever preferred_lft forever +9: vlan30 inet 172.17.3.13/32 brd 172.17.3.255 scope global vlan30\ valid_lft forever preferred_lft forever +---- + . Confirm that the Ceph Monitor IP address is present in the `os-net-config` configuration that is located in the `/etc/os-net-config` directory on the source node: + ---- diff --git a/docs_user/modules/proc_migrating-mon-from-controller-nodes-redeploy-mon.adoc b/docs_user/modules/proc_migrating-mon-from-controller-nodes-redeploy-mon.adoc index d5da10db5..d3dd69cbb 100644 --- a/docs_user/modules/proc_migrating-mon-from-controller-nodes-redeploy-mon.adoc +++ b/docs_user/modules/proc_migrating-mon-from-controller-nodes-redeploy-mon.adoc @@ -39,7 +39,7 @@ The Ceph Monitor daemons are marked as `unmanaged`, and you can now redeploy the . Delete the existing Ceph Monitor on the target node: + ---- -$ sudo cephadm shell -- ceph orch daemon add rm mon. --force +$ sudo cephadm shell -- ceph orch daemon rm mon. --force ---- + * Replace `` with the hostname of the target node that is included in the {Ceph} cluster. @@ -84,7 +84,7 @@ The new Ceph Monitor runs on the target node with the original IP address. . Identify the running `mgr`: + ---- -$ sudo cephadm shell -- mgr stat +$ sudo cephadm shell -- ceph mgr stat ---- + . Refresh the Ceph Manager information by force-failing it: @@ -101,5 +101,5 @@ $ sudo cephadm shell -- ceph orch reconfig osd.default_drive_group .Next steps -Repeat the procedure for each node that you want to decommission. +Repeat the procedure starting from step xref:draining-the-source-node_{context}[Draining the source node] for each node that you want to decommission. Proceed to the next step xref:verifying-the-cluster-after-ceph-mon-migration_{context}[Verifying the {CephCluster} cluster after Ceph Monitor migration].