Skip to content

Commit

Permalink
Rename function for improved clarity
Browse files Browse the repository at this point in the history
Signed-off-by: Benamar Mekhissi <[email protected]>
  • Loading branch information
Benamar Mekhissi authored and ShyamsundarR committed Aug 3, 2023
1 parent 572c9dc commit 5ee0ddb
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 66 deletions.
66 changes: 30 additions & 36 deletions controllers/drplacementcontrol.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ func (d *DRPCInstance) RunInitialDeployment() (bool, error) {

if homeCluster == "" {
err := fmt.Errorf("PreferredCluster not set. Placement (%v)", d.userPlacement)
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), err.Error())
// needStatusUpdate is not set. Still better to capture the event to report later
rmnutil.ReportIfNotPresent(d.reconciler.eventRecorder, d.instance, corev1.EventTypeWarning,
Expand Down Expand Up @@ -140,7 +140,7 @@ func (d *DRPCInstance) RunInitialDeployment() (bool, error) {

_, err := d.startDeploying(homeCluster, homeClusterNamespace)
if err != nil {
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), err.Error())

return !done, err
Expand Down Expand Up @@ -327,7 +327,7 @@ func (d *DRPCInstance) RunFailover() (bool, error) {
// We are done if empty
if d.instance.Spec.FailoverCluster == "" {
msg := "failover cluster not set. FailoverCluster is a mandatory field"
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), msg)

return done, fmt.Errorf(msg)
Expand All @@ -338,7 +338,7 @@ func (d *DRPCInstance) RunFailover() (bool, error) {
// IFF VRG exists and it is primary in the failoverCluster, the clean up and setup VolSync if needed.
if d.vrgExistsAndPrimary(failoverCluster) {
d.setDRState(rmn.FailedOver)
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
metav1.ConditionTrue, string(d.instance.Status.Phase), "Completed")

// Make sure VolRep 'Data' and VolSync 'setup' conditions are ready
Expand Down Expand Up @@ -392,9 +392,9 @@ func (d *DRPCInstance) switchToFailoverCluster() (bool, error) {
const done = true
// Make sure we record the state that we are failing over
d.setDRState(rmn.FailingOver)
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), "Starting failover")
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
metav1.ConditionFalse, rmn.ReasonNotStarted,
fmt.Sprintf("Started failover to cluster %q", d.instance.Spec.FailoverCluster))
d.setProgression(rmn.ProgressionCheckingFailoverPrequisites)
Expand All @@ -404,7 +404,7 @@ func (d *DRPCInstance) switchToFailoverCluster() (bool, error) {
if curHomeCluster == "" {
msg := "Invalid Failover request. Current home cluster does not exists"
d.log.Info(msg)
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), msg)

err := fmt.Errorf("failover requested on invalid state %v", d.instance.Status)
Expand All @@ -424,7 +424,7 @@ func (d *DRPCInstance) switchToFailoverCluster() (bool, error) {

err := d.switchToCluster(newHomeCluster, "")
if err != nil {
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), err.Error())
rmnutil.ReportIfNotPresent(d.reconciler.eventRecorder, d.instance, corev1.EventTypeWarning,
rmnutil.EventReasonSwitchFailed, err.Error())
Expand All @@ -433,7 +433,7 @@ func (d *DRPCInstance) switchToFailoverCluster() (bool, error) {
}

d.setDRState(rmn.FailedOver)
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), "Completed")
d.log.Info("Failover completed", "state", d.getLastDRState())

Expand Down Expand Up @@ -493,7 +493,7 @@ func (d *DRPCInstance) checkFailoverPrerequisites(curHomeCluster string) (bool,
rmnutil.EventReasonSwitchFailed, err.Error())
}

d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), msg)

return met, err
Expand Down Expand Up @@ -714,7 +714,7 @@ func (d *DRPCInstance) RunRelocate() (bool, error) {
// Before relocating to the preferredCluster, do a quick validation and select the current preferred cluster.
curHomeCluster, err := d.validateAndSelectCurrentPrimary(preferredCluster)
if err != nil {
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), err.Error())

return !done, err
Expand All @@ -723,7 +723,7 @@ func (d *DRPCInstance) RunRelocate() (bool, error) {
// We are done if already relocated; if there were secondaries they are cleaned up above
if curHomeCluster != "" && d.vrgExistsAndPrimary(preferredCluster) {
d.setDRState(rmn.Relocated)
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
metav1.ConditionTrue, string(d.instance.Status.Phase), "Completed")

return d.ensureActionCompleted(preferredCluster)
Expand All @@ -735,7 +735,7 @@ func (d *DRPCInstance) RunRelocate() (bool, error) {
if curHomeCluster != "" && curHomeCluster != preferredCluster &&
!d.readyToSwitchOver(curHomeCluster, preferredCluster) {
errMsg := fmt.Sprintf("current cluster (%s) has not completed protection actions", curHomeCluster)
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), errMsg)

return !done, fmt.Errorf(errMsg)
Expand Down Expand Up @@ -832,7 +832,7 @@ func (d *DRPCInstance) quiesceAndRunFinalSync(homeCluster string) (bool, error)
clusterDecision := d.reconciler.getClusterDecision(d.userPlacement)
if clusterDecision.ClusterName != "" {
d.setDRState(rmn.Relocating)
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), "Starting quiescing for relocation")

// clear current user PlacementRule's decision
Expand Down Expand Up @@ -1046,30 +1046,30 @@ func (d *DRPCInstance) relocate(preferredCluster, preferredClusterNamespace stri

// Make sure we record the state that we are failing over
d.setDRState(drState)
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), "Starting relocation")

// Setting up relocation ensures that all VRGs in all managed cluster are secondaries
err := d.setupRelocation(preferredCluster)
if err != nil {
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), err.Error())

return !done, err
}

err = d.switchToCluster(preferredCluster, preferredClusterNamespace)
if err != nil {
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), err.Error())

return !done, err
}

d.setDRState(rmn.Relocated)
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), "Completed")
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
metav1.ConditionFalse, rmn.ReasonNotStarted,
fmt.Sprintf("Started relocation to cluster %q", preferredCluster))

Expand Down Expand Up @@ -1609,7 +1609,7 @@ func (d *DRPCInstance) EnsureCleanup(clusterToSkip string) error {
if condition == nil {
msg := "Starting cleanup check"
d.log.Info(msg)
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
metav1.ConditionFalse, rmn.ReasonProgressing, msg)

condition = findCondition(d.instance.Status.Conditions, rmn.ConditionPeerReady)
Expand Down Expand Up @@ -1639,21 +1639,21 @@ func (d *DRPCInstance) EnsureCleanup(clusterToSkip string) error {

clean, err := d.cleanupSecondaries(clusterToSkip)
if err != nil {
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
metav1.ConditionFalse, rmn.ReasonCleaning, err.Error())

return err
}

if !clean {
msg := "cleaning secondaries"
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
metav1.ConditionFalse, rmn.ReasonCleaning, msg)

return fmt.Errorf("waiting to clean secondaries")
}

d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
metav1.ConditionTrue, rmn.ReasonSuccess, "Cleaned")

return nil
Expand Down Expand Up @@ -1691,7 +1691,7 @@ func (d *DRPCInstance) cleanupForVolSync(clusterToSkip string) error {
return fmt.Errorf("still waiting for peer to be ready")
}

d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
metav1.ConditionTrue, rmn.ReasonSuccess, "Ready")

return nil
Expand Down Expand Up @@ -2217,19 +2217,13 @@ func (d *DRPCInstance) getRequeueDuration() time.Duration {
}

func (d *DRPCInstance) setConditionOnInitialDeploymentCompletion() {
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
d.getConditionStatusForTypeAvailable(), string(d.instance.Status.Phase), "Initial deployment completed")

d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
metav1.ConditionTrue, rmn.ReasonSuccess, "Ready")
}

func (d *DRPCInstance) setDRPCCondition(conditions *[]metav1.Condition, conditionType string,
observedGeneration int64, status metav1.ConditionStatus, reason, msg string,
) {
SetDRPCStatusCondition(conditions, conditionType, observedGeneration, status, reason, msg)
}

func (d *DRPCInstance) isPlacementNeedsFixing() bool {
// Needs fixing if and only if the DRPC Status is empty, the Placement decision is empty, and
// the we have VRG(s) in the managed clusters
Expand Down Expand Up @@ -2305,10 +2299,10 @@ func (d *DRPCInstance) fixupPlacementForFailover() error {
peerReadyMsg = "NotReady"
}

d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
peerReadyConditionStatus, rmn.ReasonSuccess, peerReadyMsg)

d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionAvailable, d.instance.Generation,
metav1.ConditionTrue, string(d.instance.Status.Phase), "Available")

return nil
Expand All @@ -2335,11 +2329,11 @@ func (d *DRPCInstance) fixupPlacementForRelocate() error {
}

// Assume that it is not clean
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
metav1.ConditionFalse, rmn.ReasonNotStarted, "NotReady")
} else if len(secondaries) > 1 {
// Use case 3: After Hub Recovery, the DRPC Action found to be 'Relocate', and ALL VRGs are secondary
d.setDRPCCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
addOrUpdateCondition(&d.instance.Status.Conditions, rmn.ConditionPeerReady, d.instance.Generation,
metav1.ConditionTrue, rmn.ReasonSuccess,
fmt.Sprintf("Fixed for relocation to %q", d.instance.Spec.PreferredCluster))
}
Expand Down
60 changes: 30 additions & 30 deletions controllers/drplacementcontrol_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -467,32 +467,6 @@ func DRPCsFailingOverToClusterForPolicy(
return filteredDRPCs, nil
}

func SetDRPCStatusCondition(conditions *[]metav1.Condition, conditionType string,
observedGeneration int64, status metav1.ConditionStatus, reason, msg string,
) bool {
newCondition := metav1.Condition{
Type: conditionType,
Status: status,
ObservedGeneration: observedGeneration,
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: msg,
}

existingCondition := findCondition(*conditions, conditionType)
if existingCondition == nil ||
existingCondition.Status != newCondition.Status ||
existingCondition.ObservedGeneration != newCondition.ObservedGeneration ||
existingCondition.Reason != newCondition.Reason ||
existingCondition.Message != newCondition.Message {
setStatusCondition(conditions, newCondition)

return true
}

return false
}

// SetupWithManager sets up the controller with the Manager.
//
//nolint:funlen
Expand Down Expand Up @@ -662,7 +636,7 @@ func (r *DRPlacementControlReconciler) Reconcile(ctx context.Context, req ctrl.R
func (r *DRPlacementControlReconciler) recordFailure(drpc *rmn.DRPlacementControl,
placementObj client.Object, reason, msg string, drpcMetrics *DRPCMetrics, log logr.Logger,
) {
needsUpdate := SetDRPCStatusCondition(&drpc.Status.Conditions, rmn.ConditionAvailable,
needsUpdate := addOrUpdateCondition(&drpc.Status.Conditions, rmn.ConditionAvailable,
drpc.Generation, metav1.ConditionFalse, reason, msg)
if needsUpdate {
err := r.updateDRPCStatus(drpc, placementObj, drpcMetrics, log)
Expand Down Expand Up @@ -1368,10 +1342,10 @@ func updateVRGsFromManagedClusters(mcvGetter rmnutil.ManagedClusterViewGetter, d
if len(vrgs) == 0 && failedClusterToQuery != drpc.Spec.FailoverCluster {
condition := findCondition(drpc.Status.Conditions, rmn.ConditionPeerReady)
if condition == nil {
SetDRPCStatusCondition(&drpc.Status.Conditions, rmn.ConditionPeerReady, drpc.Generation,
metav1.ConditionTrue, rmn.ReasonSuccess, "Forcing Ready")
SetDRPCStatusCondition(&drpc.Status.Conditions, rmn.ConditionAvailable,
addOrUpdateCondition(&drpc.Status.Conditions, rmn.ConditionAvailable,
drpc.Generation, metav1.ConditionTrue, rmn.ReasonSuccess, "Forcing Available")
addOrUpdateCondition(&drpc.Status.Conditions, rmn.ConditionPeerReady, drpc.Generation,
metav1.ConditionTrue, rmn.ReasonSuccess, "Forcing Ready")
}
}

Expand Down Expand Up @@ -1823,3 +1797,29 @@ func selectVRGNamespace(
return drpc.Namespace, nil
}
}

func addOrUpdateCondition(conditions *[]metav1.Condition, conditionType string,
observedGeneration int64, status metav1.ConditionStatus, reason, msg string,
) bool {
newCondition := metav1.Condition{
Type: conditionType,
Status: status,
ObservedGeneration: observedGeneration,
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: msg,
}

existingCondition := findCondition(*conditions, conditionType)
if existingCondition == nil ||
existingCondition.Status != newCondition.Status ||
existingCondition.ObservedGeneration != newCondition.ObservedGeneration ||
existingCondition.Reason != newCondition.Reason ||
existingCondition.Message != newCondition.Message {
setStatusCondition(conditions, newCondition)

return true
}

return false
}

0 comments on commit 5ee0ddb

Please sign in to comment.