Skip to content

Commit

Permalink
Set PeerReady condition status to true upon relocation initiation
Browse files Browse the repository at this point in the history
Previously, the PeerReady condition status was set to false after the relocation setup between
the source and target clusters was completed and before entering the clean up phase. This led
to a misleading indication that the peer was ready for a new action before getting to that point.
Consequently, users can initiate another action, assuming that the PeerReady condition being true
meant it was ready for the next step.

To rectify this issue, this commit ensures that as soon as the relocation action is initiated,
the PeerReady condition status is immediately switched to true. This change accurately reflects
the actual readiness of the peer, preventing any misconceptions about initiating additional actions
prematurely.

Signed-off-by: Benamar Mekhissi <[email protected]>
  • Loading branch information
Benamar Mekhissi committed Aug 3, 2023
1 parent e9a78fc commit edc6685
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions controllers/drplacementcontrol.go
Original file line number Diff line number Diff line change
Expand Up @@ -741,7 +741,7 @@ func (d *DRPCInstance) RunRelocate() (bool, error) {
return !done, fmt.Errorf(errMsg)
}

if !d.validatePeerReady() {
if d.getLastDRState() != rmn.Relocating && !d.validatePeerReady() {
return !done, fmt.Errorf("clean up on secondaries pending (%+v)", d.instance)
}

Expand Down Expand Up @@ -1048,6 +1048,9 @@ func (d *DRPCInstance) relocate(preferredCluster, preferredClusterNamespace stri
d.setDRState(drState)
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), "Starting relocation")
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
metav1.ConditionFalse, rmn.ReasonNotStarted,
fmt.Sprintf("Started relocation to cluster %q", preferredCluster))

// Setting up relocation ensures that all VRGs in all managed cluster are secondaries
err := d.setupRelocation(preferredCluster)
Expand All @@ -1069,9 +1072,6 @@ func (d *DRPCInstance) relocate(preferredCluster, preferredClusterNamespace stri
d.setDRState(rmn.Relocated)
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), "Completed")
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
metav1.ConditionFalse, rmn.ReasonNotStarted,
fmt.Sprintf("Started relocation to cluster %q", preferredCluster))

d.log.Info("Relocation completed", "State", d.getLastDRState())

Expand Down

0 comments on commit edc6685

Please sign in to comment.