Skip to content

Commit

Permalink
drpc: set ProgressionWaitOnUserToCleanUp along with placement update
Browse files Browse the repository at this point in the history
For discovered apps, we want user to perform the cleanup of the
workload. We should advertise the progression to them at the same time
when we ask OCM/ACM to perform the cleanup.

Signed-off-by: Raghavendra Talur <[email protected]>
  • Loading branch information
raghavendra-talur committed Nov 11, 2024
1 parent e0e3ab3 commit 520eabb
Showing 1 changed file with 48 additions and 19 deletions.
67 changes: 48 additions & 19 deletions internal/controller/drplacementcontrol.go
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,13 @@ func (d *DRPCInstance) RunFailover() (bool, error) {
return !done, nil
}

err := d.ensurePlacementAndUserCleanup(failoverCluster)
if err != nil {
d.log.Info("Failed to ensure placement and user cleanup")

return !done, err
}

return d.ensureActionCompleted(failoverCluster)
} else if yes, err := d.mwExistsAndPlacementUpdated(failoverCluster); yes || err != nil {
// We have to wait for the VRG to appear on the failoverCluster or
Expand Down Expand Up @@ -863,6 +870,11 @@ func (d *DRPCInstance) RunRelocate() (bool, error) {
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
metav1.ConditionTrue, string(d.instance.Status.Phase), "Completed")

err := d.ensurePlacement(preferredCluster)
if err != nil {
return !done, err
}

return d.ensureActionCompleted(preferredCluster)
}

Expand Down Expand Up @@ -896,6 +908,28 @@ func (d *DRPCInstance) RunRelocate() (bool, error) {
return d.relocate(preferredCluster, preferredClusterNamespace, rmn.Relocating)
}

func (d *DRPCInstance) ensurePlacementAndUserCleanup(srcCluster string) error {
err := d.ensurePlacement(srcCluster)
if err != nil {
return err
}

// We updated the placement to the targetCluster, this is when ACM will
// start rolling out the workloads to the targetCluster and delete the
// workloads from the original cluster. In case of discovered apps, we have
// to let the user know that they need to clean up the apps from the
// original cluster. So set the progression to wait on user to clean up.
// If not discovered apps, then we can set the progression to cleaning up.
if d.instance.Spec.ProtectedNamespaces != nil &&
len(*d.instance.Spec.ProtectedNamespaces) > 0 {
d.setProgression(rmn.ProgressionWaitOnUserToCleanUp)
} else {
d.setProgression(rmn.ProgressionCleaningUp)
}

return nil
}

func (d *DRPCInstance) ensureActionCompleted(srcCluster string) (bool, error) {
const done = true

Expand All @@ -904,13 +938,6 @@ func (d *DRPCInstance) ensureActionCompleted(srcCluster string) (bool, error) {
return !done, err
}

err = d.ensurePlacement(srcCluster)
if err != nil {
return !done, err
}

d.setProgression(rmn.ProgressionCleaningUp)

// Cleanup and setup VolSync if enabled
err = d.ensureCleanupAndVolSyncReplicationSetup(srcCluster)
if err != nil {
Expand Down Expand Up @@ -974,13 +1001,25 @@ func (d *DRPCInstance) quiesceAndRunFinalSync(homeCluster string) (bool, error)
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), "Starting quiescing for relocation")

// clear current user PlacementRule's decision
d.setProgression(rmn.ProgressionClearingPlacement)
// We are going to clear the placement, this is when ACM will start
// deleting the workloads from the current cluster. In case of
// discovered apps, we have to let the user know that they need to
// clean up the apps from the current cluster. So set the progression
// to wait on user to clean up. For non-discovered apps, we can set the
// progression to clearing placement.
if d.instance.Spec.ProtectedNamespaces != nil &&
len(*d.instance.Spec.ProtectedNamespaces) > 0 {
d.setProgression(rmn.ProgressionWaitOnUserToCleanUp)
} else {
// clear current user PlacementRule's decision
d.setProgression(rmn.ProgressionClearingPlacement)
}

err := d.clearUserPlacementRuleStatus()
if err != nil {
return !done, err
}

}

Check failure on line 1023 in internal/controller/drplacementcontrol.go

View workflow job for this annotation

GitHub Actions / Golangci Lint (.)

block should not end with a whitespace (or comment) (wsl)

// Ensure final sync has been taken
Expand All @@ -990,8 +1029,6 @@ func (d *DRPCInstance) quiesceAndRunFinalSync(homeCluster string) (bool, error)
}

if !result {
d.setProgression(rmn.ProgressionRunningFinalSync)

return !done, nil
}

Expand Down Expand Up @@ -2048,10 +2085,6 @@ func (d *DRPCInstance) ensureVRGManifestWorkOnClusterDeleted(clusterName string)

d.log.Info("Request not complete yet", "cluster", clusterName)

if d.instance.Spec.ProtectedNamespaces != nil && len(*d.instance.Spec.ProtectedNamespaces) > 0 {
d.setProgression(rmn.ProgressionWaitOnUserToCleanUp)
}

// IF we get here, either the VRG has not transitioned to secondary (yet) or delete didn't succeed. In either cases,
// we need to make sure that the VRG object is deleted. IOW, we still have to wait
return !done, nil
Expand All @@ -2067,10 +2100,6 @@ func (d *DRPCInstance) ensureVRGIsSecondaryEverywhere(clusterToSkip string) bool
continue
}

if d.instance.Spec.ProtectedNamespaces != nil && len(*d.instance.Spec.ProtectedNamespaces) > 0 {
d.setProgression(rmn.ProgressionWaitOnUserToCleanUp)
}

if !d.ensureVRGIsSecondaryOnCluster(clusterName) {
d.log.Info("Still waiting for VRG to transition to secondary", "cluster", clusterName)

Expand Down

0 comments on commit 520eabb

Please sign in to comment.