From 0d3dcece058702b2071858e3a21a511b049182ef Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Wed, 28 Feb 2024 14:26:57 -0800 Subject: [PATCH 1/5] allow work agent track resource availability --- pkg/controllers/work/apply_controller.go | 400 +++++++-- .../work/apply_controller_helper_test.go | 22 + .../work/apply_controller_integration_test.go | 38 +- pkg/controllers/work/apply_controller_test.go | 838 ++++++++++++++++-- pkg/controllers/workgenerator/controller.go | 10 +- pkg/utils/common.go | 26 + pkg/utils/condition/condition.go | 17 + test/e2e/actuals_test.go | 9 +- test/e2e/enveloped_object_placement_test.go | 24 +- .../e2e/placement_selecting_resources_test.go | 8 +- test/e2e/resources/test-deployment.yaml | 34 + .../resources/test-envelop-deployment.yaml | 11 + test/e2e/resources_test.go | 3 +- test/e2e/rollout_test.go | 217 +++++ test/e2e/setup_test.go | 7 +- test/e2e/utils_test.go | 14 +- 16 files changed, 1495 insertions(+), 183 deletions(-) create mode 100644 test/e2e/resources/test-deployment.yaml create mode 100644 test/e2e/resources/test-envelop-deployment.yaml create mode 100644 test/e2e/rollout_test.go diff --git a/pkg/controllers/work/apply_controller.go b/pkg/controllers/work/apply_controller.go index 1bb6c9636..281a83d7d 100644 --- a/pkg/controllers/work/apply_controller.go +++ b/pkg/controllers/work/apply_controller.go @@ -27,11 +27,14 @@ import ( "time" "go.uber.org/atomic" + appv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -42,13 +45,15 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" + ctrloption "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/metrics" "go.goms.io/fleet/pkg/utils" + "go.goms.io/fleet/pkg/utils/condition" + "go.goms.io/fleet/pkg/utils/controller" "go.goms.io/fleet/pkg/utils/resource" ) @@ -58,12 +63,18 @@ const ( // WorkCondition condition reasons const ( - // AppliedWorkFailedReason is the reason string of work condition when it failed to apply the work. - AppliedWorkFailedReason = "AppliedWorkFailed" - // AppliedWorkCompleteReason is the reason string of work condition when it finished applying work. - AppliedWorkCompleteReason = "AppliedWorkComplete" - // AppliedManifestFailedReason is the reason string of condition when it failed to apply manifest. - AppliedManifestFailedReason = "AppliedManifestFailedReason" + workAppliedFailedReason = "WorkAppliedFailed" + workAppliedCompleteReason = "WorkAppliedComplete" + workNotAvailableYetReason = "WorkNotAvailableYet" + workAvailabilityUnknownReason = "WorkAvailabilityUnknown" + workAvailableReason = "WorkAvailable" + workNotTrackableReason = "WorkNotTrackable" + // ManifestApplyFailedReason is the reason string of condition when it failed to apply manifest. + ManifestApplyFailedReason = "ManifestApplyFailedReason" + // ManifestAlreadyUpToDateReason is the reason string of condition when the manifest is already up to date. + ManifestAlreadyUpToDateReason = "ManifestAlreadyUpToDate" + // ManifestNeedsUpdateReason is the reason string of condition when the manifest needs to be updated. + ManifestNeedsUpdateReason = "ManifestNeedsUpdate" ) // ApplyWorkReconciler reconciles a Work object @@ -92,22 +103,32 @@ func NewApplyWorkReconciler(hubClient client.Client, spokeDynamicClient dynamic. } } -// applyAction represents the action we take to apply the manifest +// applyAction represents the action we take to apply the manifest. +// It is used only internally to track the result of the apply function. // +enum type applyAction string const ( - // ManifestCreatedAction indicates that we created the manifest for the first time. - ManifestCreatedAction applyAction = "ManifestCreated" + // manifestCreatedAction indicates that we created the manifest for the first time. + manifestCreatedAction applyAction = "ManifestCreated" - // ManifestThreeWayMergePatchAction indicates that we updated the manifest using three-way merge patch. - ManifestThreeWayMergePatchAction applyAction = "ManifestThreeWayMergePatched" + // manifestThreeWayMergePatchAction indicates that we updated the manifest using three-way merge patch. + manifestThreeWayMergePatchAction applyAction = "ManifestThreeWayMergePatched" - // ManifestServerSideAppliedAction indicates that we updated the manifest using server side apply. - ManifestServerSideAppliedAction applyAction = "ManifestServerSideApplied" + // manifestServerSideAppliedAction indicates that we updated the manifest using server side apply. + manifestServerSideAppliedAction applyAction = "ManifestServerSideApplied" - // ManifestNoChangeAction indicates that we don't need to change the manifest. - ManifestNoChangeAction applyAction = "ManifestNoChange" + // errorApplyAction indicates that there was an error during the apply action. + errorApplyAction applyAction = "ErrorApply" + + // manifestNotAvailableYetAction indicates that we still need to wait for the manifest to be available. + manifestNotAvailableYetAction applyAction = "ManifestNotAvailableYet" + + // manifestNotTrackableAction indicates that the manifest is already up to date but we don't have a way to track its availabilities. + manifestNotTrackableAction applyAction = "ManifestNotTrackable" + + // manifestAvailableAction indicates that the manifest is available. + manifestAvailableAction applyAction = "ManifestAvailable" ) // applyResult contains the result of a manifest being applied. @@ -115,7 +136,7 @@ type applyResult struct { identifier fleetv1beta1.WorkResourceIdentifier generation int64 action applyAction - err error + applyErr error } // Reconcile implement the control loop logic for Work object. @@ -140,7 +161,7 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil case err != nil: klog.ErrorS(err, "failed to retrieve the work", "work", req.NamespacedName) - return ctrl.Result{}, err + return ctrl.Result{}, controller.NewAPIServerError(true, err) } logObjRef := klog.KObj(work) @@ -164,7 +185,7 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } // apply the manifests to the member cluster - results := r.applyManifests(ctx, work.Spec.Workload.Manifests, owner) + results := r.applyManifests(ctx, work.Spec.Workload.Manifests, owner, work.Spec.ApplyStrategy) // collect the latency from the work update time to now. lastUpdateTime, ok := work.GetAnnotations()[utils.LastWorkUpdateTimeAnnotationKey] @@ -182,7 +203,7 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } // generate the work condition based on the manifest apply result - errs := r.generateWorkCondition(results, work) + errs := constructWorkCondition(results, work) // update the work status if err = r.client.Status().Update(ctx, work, &client.SubResourceUpdateOptions{}); err != nil { @@ -224,8 +245,13 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( "work", logObjRef) } - // we periodically reconcile the work to make sure the member cluster state is in sync with the work - // even if the reconciling succeeds in case the resources on the member cluster is removed/changed. + availableCond := meta.FindStatusCondition(work.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) + if !condition.IsConditionStatusTrue(availableCond, work.Generation) { + klog.V(2).InfoS("work is not available yet, check again", "work", logObjRef, "availableCond", availableCond) + return ctrl.Result{RequeueAfter: time.Second * 3}, err + } + // the work is available (might due to not trackable) but we still periodically reconcile to make sure the + // member cluster state is in sync with the work in case the resources on the member cluster is removed/changed. return ctrl.Result{RequeueAfter: time.Minute * 5}, err } @@ -267,7 +293,7 @@ func (r *ApplyWorkReconciler) ensureAppliedWork(ctx context.Context, work *fleet klog.ErrorS(err, "appliedWork finalizer resource does not exist even with the finalizer, it will be recreated", "appliedWork", workRef.Name) case err != nil: klog.ErrorS(err, "failed to retrieve the appliedWork ", "appliedWork", workRef.Name) - return nil, err + return nil, controller.NewAPIServerError(true, err) default: return appliedWork, nil } @@ -297,7 +323,7 @@ func (r *ApplyWorkReconciler) ensureAppliedWork(ctx context.Context, work *fleet } // applyManifests processes a given set of Manifests by: setting ownership, validating the manifest, and passing it on for application to the cluster. -func (r *ApplyWorkReconciler) applyManifests(ctx context.Context, manifests []fleetv1beta1.Manifest, owner metav1.OwnerReference) []applyResult { +func (r *ApplyWorkReconciler) applyManifests(ctx context.Context, manifests []fleetv1beta1.Manifest, owner metav1.OwnerReference, applyStrategy *fleetv1beta1.ApplyStrategy) []applyResult { var appliedObj *unstructured.Unstructured results := make([]applyResult, len(manifests)) @@ -306,7 +332,7 @@ func (r *ApplyWorkReconciler) applyManifests(ctx context.Context, manifests []fl gvr, rawObj, err := r.decodeManifest(manifest) switch { case err != nil: - result.err = err + result.applyErr = err result.identifier = fleetv1beta1.WorkResourceIdentifier{ Ordinal: index, } @@ -320,18 +346,18 @@ func (r *ApplyWorkReconciler) applyManifests(ctx context.Context, manifests []fl default: addOwnerRef(owner, rawObj) - appliedObj, result.action, result.err = r.applyUnstructured(ctx, gvr, rawObj) + appliedObj, result.action, result.applyErr = r.applyUnstructured(ctx, gvr, rawObj) result.identifier = buildResourceIdentifier(index, rawObj, gvr) logObjRef := klog.ObjectRef{ Name: result.identifier.Name, Namespace: result.identifier.Namespace, } - if result.err == nil { + if result.applyErr == nil { result.generation = appliedObj.GetGeneration() klog.V(2).InfoS("apply manifest succeeded", "gvr", gvr, "manifest", logObjRef, - "apply action", result.action, "new ObservedGeneration", result.generation) + "action", result.action, "applyStrategy", applyStrategy, "new ObservedGeneration", result.generation) } else { - klog.ErrorS(result.err, "manifest upsert failed", "gvr", gvr, "manifest", logObjRef) + klog.ErrorS(result.applyErr, "manifest upsert failed", "gvr", gvr, "manifest", logObjRef) } } results[index] = result @@ -367,22 +393,22 @@ func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema. // compute the hash without taking into consider the last applied annotation if err := setManifestHashAnnotation(manifestObj); err != nil { - return nil, ManifestNoChangeAction, err + return nil, errorApplyAction, err } // extract the common create procedure to reuse var createFunc = func() (*unstructured.Unstructured, applyAction, error) { // record the raw manifest with the hash annotation in the manifest if _, err := setModifiedConfigurationAnnotation(manifestObj); err != nil { - return nil, ManifestNoChangeAction, err + return nil, errorApplyAction, err } actual, err := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()).Create( ctx, manifestObj, metav1.CreateOptions{FieldManager: workFieldManagerName}) if err == nil { klog.V(2).InfoS("successfully created the manifest", "gvr", gvr, "manifest", manifestRef) - return actual, ManifestCreatedAction, nil + return actual, manifestCreatedAction, nil } - return nil, ManifestNoChangeAction, err + return nil, errorApplyAction, err } // support resources with generated name @@ -397,14 +423,15 @@ func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema. case apierrors.IsNotFound(err): return createFunc() case err != nil: - return nil, ManifestNoChangeAction, err + return nil, errorApplyAction, controller.NewAPIServerError(false, err) } // check if the existing manifest is managed by the work if !isManifestManagedByWork(curObj.GetOwnerReferences()) { + // TODO: honer the applyStrategy to decide if we should overwrite the existing resource err = fmt.Errorf("resource is not managed by the work controller") klog.ErrorS(err, "skip applying a not managed manifest", "gvr", gvr, "obj", manifestRef) - return nil, ManifestNoChangeAction, err + return nil, errorApplyAction, controller.NewExpectedBehaviorError(err) } // We only try to update the object if its spec hash value has changed. @@ -415,21 +442,121 @@ func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema. // record the raw manifest with the hash annotation in the manifest. isModifiedConfigAnnotationNotEmpty, err := setModifiedConfigurationAnnotation(manifestObj) if err != nil { - return nil, ManifestNoChangeAction, err + return nil, errorApplyAction, err } if !isModifiedConfigAnnotationNotEmpty { klog.V(2).InfoS("using server side apply for manifest", "gvr", gvr, "manifest", manifestRef) - return r.applyObject(ctx, gvr, manifestObj) + return r.serversideApplyObject(ctx, gvr, manifestObj) } klog.V(2).InfoS("using three way merge for manifest", "gvr", gvr, "manifest", manifestRef) return r.patchCurrentResource(ctx, gvr, manifestObj, curObj) } + // the manifest is already up to date, we just need to track its availability + applyAction, err := trackResourceAvailability(gvr, curObj) + return curObj, applyAction, err +} + +func trackResourceAvailability(gvr schema.GroupVersionResource, curObj *unstructured.Unstructured) (applyAction, error) { + switch gvr { + case utils.DeploymentGVR: + return trackDeploymentAvailability(curObj) + + case utils.StatefulSettGVR: + return trackStatefulSetAvailability(curObj) + + case utils.DaemonSettGVR: + return trackDaemonSetAvailability(curObj) - return curObj, ManifestNoChangeAction, nil + case utils.JobGVR: + return trackJobAvailability(curObj) + + default: + klog.V(2).InfoS("we don't know how to track the availability of the resource", "gvr", gvr, "resource", klog.KObj(curObj)) + return manifestNotTrackableAction, nil + } +} + +func trackDeploymentAvailability(curObj *unstructured.Unstructured) (applyAction, error) { + var deployment appv1.Deployment + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(curObj.Object, &deployment); err != nil { + return errorApplyAction, controller.NewUnexpectedBehaviorError(err) + } + // see if the DeploymentAvailable condition is true + var depCond *appv1.DeploymentCondition + for i := range deployment.Status.Conditions { + if deployment.Status.Conditions[i].Type == appv1.DeploymentAvailable { + depCond = &deployment.Status.Conditions[i] + break + } + } + // a deployment is available if the observedGeneration is equal to the generation and the Available condition is true + if deployment.Status.ObservedGeneration == deployment.Generation && depCond != nil && depCond.Status == v1.ConditionTrue { + klog.V(2).InfoS("deployment is available", "deployment", klog.KObj(curObj)) + return manifestAvailableAction, nil + } + klog.V(2).InfoS("still need to wait for deployment to be available", "deployment", klog.KObj(curObj)) + return manifestNotAvailableYetAction, nil } -// applyObject uses server side apply to apply the manifest. -func (r *ApplyWorkReconciler) applyObject(ctx context.Context, gvr schema.GroupVersionResource, +func trackStatefulSetAvailability(curObj *unstructured.Unstructured) (applyAction, error) { + var statefulSet appv1.StatefulSet + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(curObj.Object, &statefulSet); err != nil { + return errorApplyAction, controller.NewUnexpectedBehaviorError(err) + } + // a statefulSet is available if all the replicas are available and the currentReplicas is equal to the updatedReplicas + // which means there is no more update in progress. + if statefulSet.Status.ObservedGeneration == statefulSet.Generation && + statefulSet.Status.AvailableReplicas == *statefulSet.Spec.Replicas && + statefulSet.Status.CurrentReplicas == statefulSet.Status.UpdatedReplicas { + klog.V(2).InfoS("statefulSet is available", "statefulSet", klog.KObj(curObj)) + return manifestAvailableAction, nil + } + klog.V(2).InfoS("still need to wait for statefulSet to be available", "statefulSet", klog.KObj(curObj)) + return manifestNotAvailableYetAction, nil +} + +func trackDaemonSetAvailability(curObj *unstructured.Unstructured) (applyAction, error) { + var daemonSet appv1.DaemonSet + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(curObj.Object, &daemonSet); err != nil { + return errorApplyAction, controller.NewUnexpectedBehaviorError(err) + } + // a daemonSet is available if all the desired replicas (equal to all node suit for this Daemonset) + // are available and the currentReplicas is equal to the updatedReplicas which means there is no more update in progress. + if daemonSet.Status.ObservedGeneration == daemonSet.Generation && + daemonSet.Status.NumberAvailable == daemonSet.Status.DesiredNumberScheduled && + daemonSet.Status.CurrentNumberScheduled == daemonSet.Status.UpdatedNumberScheduled { + klog.V(2).InfoS("daemonSet is available", "daemonSet", klog.KObj(curObj)) + return manifestAvailableAction, nil + } + klog.V(2).InfoS("still need to wait for daemonSet to be available", "daemonSet", klog.KObj(curObj)) + return manifestNotAvailableYetAction, nil +} + +func trackJobAvailability(curObj *unstructured.Unstructured) (applyAction, error) { + var job batchv1.Job + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(curObj.Object, &job); err != nil { + return errorApplyAction, controller.NewUnexpectedBehaviorError(err) + } + if job.Status.Succeeded > 0 { + klog.V(2).InfoS("job is available with at least one succeeded pod", "job", klog.KObj(curObj)) + return manifestAvailableAction, nil + } + if job.Status.Ready != nil { + // we consider a job available if there is at least one pod ready + if *job.Status.Ready > 0 { + klog.V(2).InfoS("job is available with at least one ready pod", "job", klog.KObj(curObj)) + return manifestAvailableAction, nil + } + klog.V(2).InfoS("still need to wait for job to be available", "job", klog.KObj(curObj)) + return manifestNotAvailableYetAction, nil + } + // this field only exists in k8s 1.24+ by default, so we can't track the availability of the job without it + klog.V(2).InfoS("job does not have ready status, we can't track its availability", "job", klog.KObj(curObj)) + return manifestNotTrackableAction, nil +} + +// serversideApplyObject uses server side apply to apply the manifest. +func (r *ApplyWorkReconciler) serversideApplyObject(ctx context.Context, gvr schema.GroupVersionResource, manifestObj *unstructured.Unstructured) (*unstructured.Unstructured, applyAction, error) { manifestRef := klog.ObjectRef{ Name: manifestObj.GetName(), @@ -442,10 +569,10 @@ func (r *ApplyWorkReconciler) applyObject(ctx context.Context, gvr schema.GroupV manifestObj, err := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()).Apply(ctx, manifestObj.GetName(), manifestObj, options) if err != nil { klog.ErrorS(err, "failed to apply object", "gvr", gvr, "manifest", manifestRef) - return nil, ManifestNoChangeAction, err + return nil, errorApplyAction, err } klog.V(2).InfoS("manifest apply succeeded", "gvr", gvr, "manifest", manifestRef) - return manifestObj, ManifestServerSideAppliedAction, nil + return manifestObj, manifestServerSideAppliedAction, nil } // patchCurrentResource uses three-way merge to patch the current resource with the new manifest we get from the work. @@ -462,49 +589,56 @@ func (r *ApplyWorkReconciler) patchCurrentResource(ctx context.Context, gvr sche patch, err := threeWayMergePatch(curObj, manifestObj) if err != nil { klog.ErrorS(err, "failed to generate the three way patch", "gvr", gvr, "manifest", manifestRef) - return nil, ManifestNoChangeAction, err + return nil, errorApplyAction, err } data, err := patch.Data(manifestObj) if err != nil { klog.ErrorS(err, "failed to generate the three way patch", "gvr", gvr, "manifest", manifestRef) - return nil, ManifestNoChangeAction, err + return nil, errorApplyAction, err } // Use client side apply the patch to the member cluster manifestObj, patchErr := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()). Patch(ctx, manifestObj.GetName(), patch.Type(), data, metav1.PatchOptions{FieldManager: workFieldManagerName}) if patchErr != nil { klog.ErrorS(patchErr, "failed to patch the manifest", "gvr", gvr, "manifest", manifestRef) - return nil, ManifestNoChangeAction, patchErr + return nil, errorApplyAction, patchErr } klog.V(2).InfoS("manifest patch succeeded", "gvr", gvr, "manifest", manifestRef) - return manifestObj, ManifestThreeWayMergePatchAction, nil + return manifestObj, manifestThreeWayMergePatchAction, nil } -// generateWorkCondition constructs the work condition based on the apply result -func (r *ApplyWorkReconciler) generateWorkCondition(results []applyResult, work *fleetv1beta1.Work) []error { +// constructWorkCondition constructs the work condition based on the apply result +func constructWorkCondition(results []applyResult, work *fleetv1beta1.Work) []error { var errs []error // Update manifestCondition based on the results. manifestConditions := make([]fleetv1beta1.ManifestCondition, len(results)) for index, result := range results { - if result.err != nil { - errs = append(errs, result.err) + if result.applyErr != nil { + errs = append(errs, result.applyErr) } - appliedCondition := buildManifestAppliedCondition(result.err, result.action, result.generation) + newConditions := buildManifestCondition(result.applyErr, result.action, result.generation) manifestCondition := fleetv1beta1.ManifestCondition{ Identifier: result.identifier, - Conditions: []metav1.Condition{appliedCondition}, } - foundmanifestCondition := findManifestConditionByIdentifier(result.identifier, work.Status.ManifestConditions) - if foundmanifestCondition != nil { - manifestCondition.Conditions = foundmanifestCondition.Conditions - meta.SetStatusCondition(&manifestCondition.Conditions, appliedCondition) + existingManifestCondition := findManifestConditionByIdentifier(result.identifier, work.Status.ManifestConditions) + if existingManifestCondition != nil { + // merge the status of the manifest condition + manifestCondition.Conditions = existingManifestCondition.Conditions + for _, condition := range newConditions { + meta.SetStatusCondition(&manifestCondition.Conditions, condition) + } + } else { + manifestCondition.Conditions = newConditions } manifestConditions[index] = manifestCondition } work.Status.ManifestConditions = manifestConditions - workCond := generateWorkAppliedCondition(manifestConditions, work.Generation) - work.Status.Conditions = []metav1.Condition{workCond} + // merge the status of the work condition + newWorkConditions := buildWorkCondition(manifestConditions, work.Generation) + for _, condition := range newWorkConditions { + meta.SetStatusCondition(&work.Status.Conditions, condition) + } return errs } @@ -552,7 +686,7 @@ func (r *ApplyWorkReconciler) Leave(ctx context.Context) error { // SetupWithManager wires up the controller. func (r *ApplyWorkReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - WithOptions(controller.Options{ + WithOptions(ctrloption.Options{ MaxConcurrentReconciles: r.concurrency, }). For(&fleetv1beta1.Work{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). @@ -653,50 +787,132 @@ func buildResourceIdentifier(index int, object *unstructured.Unstructured, gvr s } } -func buildManifestAppliedCondition(err error, action applyAction, observedGeneration int64) metav1.Condition { - if err != nil { - return metav1.Condition{ - Type: fleetv1beta1.WorkConditionTypeApplied, - Status: metav1.ConditionFalse, - ObservedGeneration: observedGeneration, - LastTransitionTime: metav1.Now(), - Reason: AppliedManifestFailedReason, - Message: fmt.Sprintf("Failed to apply manifest: %v", err), - } +func buildManifestCondition(err error, action applyAction, observedGeneration int64) []metav1.Condition { + applyCondition := metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeApplied, + LastTransitionTime: metav1.Now(), + ObservedGeneration: observedGeneration, } - return metav1.Condition{ - Type: fleetv1beta1.WorkConditionTypeApplied, - Status: metav1.ConditionTrue, + availableCondition := metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeAvailable, LastTransitionTime: metav1.Now(), ObservedGeneration: observedGeneration, - Reason: string(action), - Message: string(action), } -} -// generateWorkAppliedCondition generate applied status condition for work. -// If one of the manifests is applied failed on the spoke, the applied status condition of the work is false. -func generateWorkAppliedCondition(manifestConditions []fleetv1beta1.ManifestCondition, observedGeneration int64) metav1.Condition { - for _, manifestCond := range manifestConditions { - if meta.IsStatusConditionFalse(manifestCond.Conditions, fleetv1beta1.WorkConditionTypeApplied) { - return metav1.Condition{ - Type: fleetv1beta1.WorkConditionTypeApplied, - Status: metav1.ConditionFalse, - LastTransitionTime: metav1.Now(), - Reason: AppliedWorkFailedReason, - Message: "Failed to apply work", - ObservedGeneration: observedGeneration, - } + if err != nil { + applyCondition.Status = metav1.ConditionFalse + applyCondition.Reason = ManifestApplyFailedReason + applyCondition.Message = fmt.Sprintf("Failed to apply manifest: %v", err) + availableCondition.Status = metav1.ConditionUnknown + availableCondition.Reason = ManifestApplyFailedReason + } else { + applyCondition.Status = metav1.ConditionTrue + // the first three actions types means we did write to the cluster thus the availability is unknown + // the last three actions types means we start to track the resources + switch action { + case manifestCreatedAction: + applyCondition.Reason = string(manifestCreatedAction) + availableCondition.Status = metav1.ConditionUnknown + availableCondition.Reason = ManifestNeedsUpdateReason + + case manifestThreeWayMergePatchAction: + applyCondition.Reason = string(manifestThreeWayMergePatchAction) + availableCondition.Status = metav1.ConditionUnknown + availableCondition.Reason = ManifestNeedsUpdateReason + + case manifestServerSideAppliedAction: + applyCondition.Reason = string(manifestServerSideAppliedAction) + availableCondition.Status = metav1.ConditionUnknown + availableCondition.Reason = ManifestNeedsUpdateReason + + case manifestAvailableAction: + applyCondition.Reason = ManifestAlreadyUpToDateReason + availableCondition.Status = metav1.ConditionTrue + availableCondition.Reason = string(manifestAvailableAction) + + case manifestNotAvailableYetAction: + applyCondition.Reason = ManifestAlreadyUpToDateReason + availableCondition.Status = metav1.ConditionFalse + availableCondition.Reason = string(manifestNotAvailableYetAction) + // we cannot stuck at unknown so we have to mark it as true + case manifestNotTrackableAction: + applyCondition.Reason = ManifestAlreadyUpToDateReason + availableCondition.Status = metav1.ConditionTrue + availableCondition.Reason = string(manifestNotTrackableAction) + + default: } } - return metav1.Condition{ + return []metav1.Condition{applyCondition, availableCondition} +} + +// buildWorkCondition generate applied and available status condition for work. +// If one of the manifests is applied failed on the spoke, the applied status condition of the work is false. +// If one of the manifests is not available yet on the spoke, the available status condition of the work is false. +// If all the manifests are available, the available status condition of the work is true. +// Otherwise, the available status condition of the work is unknown as we can't track some of them. +func buildWorkCondition(manifestConditions []fleetv1beta1.ManifestCondition, observedGeneration int64) []metav1.Condition { + applyCondition := metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeApplied, - Status: metav1.ConditionTrue, LastTransitionTime: metav1.Now(), - Reason: AppliedWorkCompleteReason, - Message: "Apply work complete", ObservedGeneration: observedGeneration, } + availableCondition := metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeAvailable, + LastTransitionTime: metav1.Now(), + ObservedGeneration: observedGeneration, + } + // the manifest condition should not be an empty list + for _, manifestCond := range manifestConditions { + if meta.IsStatusConditionFalse(manifestCond.Conditions, fleetv1beta1.WorkConditionTypeApplied) { + // we mark the entire work applied condition to false if one of the manifests is applied failed + applyCondition.Status = metav1.ConditionFalse + applyCondition.Reason = workAppliedFailedReason + applyCondition.Message = fmt.Sprintf("Apply manifest %+v failed", manifestCond.Identifier) + availableCondition.Status = metav1.ConditionUnknown + availableCondition.Reason = workAppliedFailedReason + return []metav1.Condition{applyCondition, availableCondition} + } + } + applyCondition.Status = metav1.ConditionTrue + applyCondition.Reason = workAppliedCompleteReason + applyCondition.Message = "Apply work complete" + // we mark the entire work available condition to unknown if one of the manifests is not known yet + for _, manifestCond := range manifestConditions { + cond := meta.FindStatusCondition(manifestCond.Conditions, fleetv1beta1.WorkConditionTypeAvailable) + if cond.Status == metav1.ConditionUnknown { + availableCondition.Status = metav1.ConditionUnknown + availableCondition.Reason = workAvailabilityUnknownReason + availableCondition.Message = fmt.Sprintf("Manifest %+v availability is not known yet", manifestCond.Identifier) + return []metav1.Condition{applyCondition, availableCondition} + } + } + // now that there is no unknown, we mark the entire work available condition to false if one of the manifests is not applied yet + for _, manifestCond := range manifestConditions { + cond := meta.FindStatusCondition(manifestCond.Conditions, fleetv1beta1.WorkConditionTypeAvailable) + if cond.Status == metav1.ConditionFalse { + availableCondition.Status = metav1.ConditionFalse + availableCondition.Reason = workNotAvailableYetReason + availableCondition.Message = fmt.Sprintf("Manifest %+v is not available yet", manifestCond.Identifier) + return []metav1.Condition{applyCondition, availableCondition} + } + } + // now that all the conditions are true, we mark the entire work available condition reason to be not trackable if one of the manifests is not trackable + trackable := true + for _, manifestCond := range manifestConditions { + cond := meta.FindStatusCondition(manifestCond.Conditions, fleetv1beta1.WorkConditionTypeAvailable) + if cond.Reason == string(manifestNotTrackableAction) { + trackable = false + break + } + } + availableCondition.Status = metav1.ConditionTrue + if trackable { + availableCondition.Reason = workAvailableReason + } else { + availableCondition.Reason = workNotTrackableReason + } + return []metav1.Condition{applyCondition, availableCondition} } diff --git a/pkg/controllers/work/apply_controller_helper_test.go b/pkg/controllers/work/apply_controller_helper_test.go index fda6a91c7..914276c01 100644 --- a/pkg/controllers/work/apply_controller_helper_test.go +++ b/pkg/controllers/work/apply_controller_helper_test.go @@ -86,6 +86,28 @@ func waitForWorkToApply(workName, workNS string) *fleetv1beta1.Work { return &resultWork } +// waitForWorkToAvailable waits for a work to have an available condition to be true +func waitForWorkToBeAvailable(workName, workNS string) *fleetv1beta1.Work { + var resultWork fleetv1beta1.Work + Eventually(func() bool { + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: workNS}, &resultWork) + if err != nil { + return false + } + applyCond := meta.FindStatusCondition(resultWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) + if applyCond == nil || applyCond.Status != metav1.ConditionTrue || applyCond.ObservedGeneration != resultWork.Generation { + return false + } + for _, manifestCondition := range resultWork.Status.ManifestConditions { + if !meta.IsStatusConditionTrue(manifestCondition.Conditions, fleetv1beta1.WorkConditionTypeAvailable) { + return false + } + } + return true + }, timeout, interval).Should(BeTrue()) + return &resultWork +} + // waitForWorkToBeHandled waits for a work to have a finalizer func waitForWorkToBeHandled(workName, workNS string) *fleetv1beta1.Work { var resultWork fleetv1beta1.Work diff --git a/pkg/controllers/work/apply_controller_integration_test.go b/pkg/controllers/work/apply_controller_integration_test.go index 2de2cdd44..43d621bef 100644 --- a/pkg/controllers/work/apply_controller_integration_test.go +++ b/pkg/controllers/work/apply_controller_integration_test.go @@ -42,6 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils/condition" ) const timeout = time.Second * 10 @@ -95,10 +96,8 @@ var _ = Describe("Work Controller", func() { err := k8sClient.Create(context.Background(), work) Expect(err).ToNot(HaveOccurred()) - resultWork := waitForWorkToApply(work.GetName(), work.GetNamespace()) + resultWork := waitForWorkToBeAvailable(work.GetName(), work.GetNamespace()) Expect(len(resultWork.Status.ManifestConditions)).Should(Equal(1)) - Expect(meta.IsStatusConditionTrue(resultWork.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied)).Should(BeTrue()) - Expect(meta.IsStatusConditionTrue(resultWork.Status.ManifestConditions[0].Conditions, fleetv1beta1.WorkConditionTypeApplied)).Should(BeTrue()) expectedResourceID := fleetv1beta1.WorkResourceIdentifier{ Ordinal: 0, Group: "", @@ -109,13 +108,38 @@ var _ = Describe("Work Controller", func() { Name: cm.Name, } Expect(cmp.Diff(resultWork.Status.ManifestConditions[0].Identifier, expectedResourceID)).Should(BeEmpty()) + expected := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: ManifestAlreadyUpToDateReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(manifestNotTrackableAction), + }, + } + Expect(condition.CompareConditions(expected, resultWork.Status.ManifestConditions[0].Conditions)).Should(BeEmpty()) + expected = []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: workAppliedCompleteReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: workNotTrackableReason, + }, + } + Expect(condition.CompareConditions(expected, resultWork.Status.Conditions)).Should(BeEmpty()) By("Check applied config map") var configMap corev1.ConfigMap Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: cmName, Namespace: cmNamespace}, &configMap)).Should(Succeed()) Expect(cmp.Diff(configMap.Labels, cm.Labels)).Should(BeEmpty()) Expect(cmp.Diff(configMap.Data, cm.Data)).Should(BeEmpty()) - }) It("Should apply the same manifest in two work properly", func() { @@ -208,8 +232,8 @@ var _ = Describe("Work Controller", func() { work = createWorkWithManifest(workNamespace, cm) Expect(k8sClient.Create(context.Background(), work)).ToNot(HaveOccurred()) - By("wait for the work to be applied") - waitForWorkToApply(work.GetName(), work.GetNamespace()) + By("wait for the work to be available") + waitForWorkToBeAvailable(work.GetName(), work.GetNamespace()) By("Check applied config map") verifyAppliedConfigMap(cm) @@ -371,7 +395,7 @@ var _ = Describe("Work Controller", func() { Expect(err).ToNot(HaveOccurred()) By("wait for the work to be applied") - waitForWorkToApply(work.GetName(), work.GetNamespace()) + waitForWorkToBeAvailable(work.GetName(), work.GetNamespace()) By("Check applied CloneSet") var appliedCloneSet kruisev1alpha1.CloneSet diff --git a/pkg/controllers/work/apply_controller_test.go b/pkg/controllers/work/apply_controller_test.go index 126c826db..a17c549b5 100644 --- a/pkg/controllers/work/apply_controller_test.go +++ b/pkg/controllers/work/apply_controller_test.go @@ -53,6 +53,7 @@ import ( fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils" + "go.goms.io/fleet/pkg/utils/condition" ) var ( @@ -61,11 +62,6 @@ var ( APIVersion: fleetv1beta1.GroupVersion.String(), Kind: "AppliedWork", } - testGvr = schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "Deployment", - } testDeployment = appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ Kind: "Deployment", @@ -95,7 +91,7 @@ type testMapper struct { func (m testMapper) RESTMapping(gk schema.GroupKind, _ ...string) (*meta.RESTMapping, error) { if gk.Kind == "Deployment" { return &meta.RESTMapping{ - Resource: testGvr, + Resource: utils.DeploymentGVR, GroupVersionKind: testDeployment.GroupVersionKind(), Scope: nil, }, nil @@ -288,6 +284,747 @@ func TestIsManifestManagedByWork(t *testing.T) { } } +func TestBuildManifestCondition(t *testing.T) { + tests := map[string]struct { + err error + action applyAction + expected []metav1.Condition + }{ + "TestNoErrorManifestCreated": { + err: nil, + action: manifestCreatedAction, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(manifestCreatedAction), + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionUnknown, + Reason: ManifestNeedsUpdateReason, + }, + }, + }, + "TestNoErrorManifestServerSideApplied": { + err: nil, + action: manifestServerSideAppliedAction, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(manifestServerSideAppliedAction), + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionUnknown, + Reason: ManifestNeedsUpdateReason, + }, + }, + }, + "TestNoErrorManifestThreeWayMergePatch": { + err: nil, + action: manifestThreeWayMergePatchAction, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(manifestThreeWayMergePatchAction), + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionUnknown, + Reason: ManifestNeedsUpdateReason, + }, + }, + }, + "TestNoErrorManifestNotAvailable": { + err: nil, + action: manifestNotAvailableYetAction, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: ManifestAlreadyUpToDateReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + Reason: string(manifestNotAvailableYetAction), + }, + }, + }, + "TestNoErrorManifestNotTrackableAction": { + err: nil, + action: manifestNotTrackableAction, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: ManifestAlreadyUpToDateReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(manifestNotTrackableAction), + }, + }, + }, + "TestNoErrorManifestAvailableAction": { + err: nil, + action: manifestAvailableAction, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: ManifestAlreadyUpToDateReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(manifestAvailableAction), + }, + }, + }, + "TestWithError": { + err: errors.New("test error"), + action: errorApplyAction, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: ManifestApplyFailedReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionUnknown, + Reason: ManifestApplyFailedReason, + }, + }, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + conditions := buildManifestCondition(tt.err, tt.action, 1) + diff := condition.CompareConditions(tt.expected, conditions) + assert.Empty(t, diff, "buildManifestCondition() test %v failed, (-want +got):\n%s", name, diff) + }) + } +} + +func TestGenerateWorkCondition(t *testing.T) { + tests := map[string]struct { + manifestConditions []fleetv1beta1.ManifestCondition + expected []metav1.Condition + }{ + "Test applied one failed": { + manifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionUnknown, + }, + }, + }, + }, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: workAppliedFailedReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionUnknown, + Reason: workAppliedFailedReason, + }, + }, + }, + "Test applied one of the two failed": { + manifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionUnknown, + }, + }, + }, + }, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: workAppliedFailedReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionUnknown, + Reason: workAppliedFailedReason, + }, + }, + }, + "Test applied one succeed but available unknown yet": { + manifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(manifestNotTrackableAction), + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionUnknown, + Reason: ManifestNeedsUpdateReason, + }, + }, + }, + }, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: workAppliedCompleteReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionUnknown, + Reason: workAvailabilityUnknownReason, + }, + }, + }, + "Test applied one succeed but not available yet": { + manifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(manifestNotTrackableAction), + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: workAppliedCompleteReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + Reason: workNotAvailableYetReason, + }, + }, + }, + "Test applied all succeeded but one of two not available yet": { + manifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + Reason: string(manifestNotAvailableYetAction), + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(manifestNotTrackableAction), + }, + }, + }, + }, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: workAppliedCompleteReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + Reason: workNotAvailableYetReason, + }, + }, + }, + "Test applied all succeeded but one unknown, one unavailable, one available": { + manifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + Reason: string(manifestNotAvailableYetAction), + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionUnknown, + Reason: ManifestNeedsUpdateReason, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 3, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(manifestNotTrackableAction), + }, + }, + }, + }, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: workAppliedCompleteReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionUnknown, + Reason: workAvailabilityUnknownReason, + }, + }, + }, + "Test applied all succeeded but one of two not trackable": { + manifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(manifestAvailableAction), + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(manifestNotTrackableAction), + }, + }, + }, + }, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: workAppliedCompleteReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: workNotTrackableReason, + }, + }, + }, + "Test applied all available": { + manifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(manifestAvailableAction), + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(manifestAvailableAction), + }, + }, + }, + }, + expected: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: workAppliedCompleteReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: workAvailableReason, + }, + }, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + conditions := buildWorkCondition(tt.manifestConditions, 1) + diff := condition.CompareConditions(tt.expected, conditions) + assert.Empty(t, diff, "buildWorkCondition() test %v failed, (-want +got):\n%s", name, diff) + }) + } +} + +func TestTrackResourceAvailability(t *testing.T) { + tests := map[string]struct { + gvr schema.GroupVersionResource + obj *unstructured.Unstructured + expected applyAction + err error + }{ + "Test Deployment available": { + gvr: utils.DeploymentGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "generation": 1, + "name": "test-deployment", + }, + "status": map[string]interface{}{ + "observedGeneration": 1, + "conditions": []interface{}{ + map[string]interface{}{ + "type": "Available", + "status": "True", + }, + }, + }, + }, + }, + expected: manifestAvailableAction, + err: nil, + }, + "Test Deployment not observe the latest generation": { + gvr: utils.DeploymentGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "generation": 2, + "name": "test-deployment", + }, + "status": map[string]interface{}{ + "observedGeneration": 1, + "conditions": []interface{}{ + map[string]interface{}{ + "type": "Available", + "status": "True", + }, + }, + }, + }, + }, + expected: manifestNotAvailableYetAction, + err: nil, + }, + "Test Deployment not available": { + gvr: utils.DeploymentGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "generation": 1, + "name": "test-deployment", + }, + "status": map[string]interface{}{ + "observedGeneration": 1, + "conditions": []interface{}{ + map[string]interface{}{ + "type": "Available", + "status": "False", + }, + }, + }, + }, + }, + expected: manifestNotAvailableYetAction, + err: nil, + }, + "Test StatefulSet available": { + gvr: utils.StatefulSettGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "StatefulSet", + "metadata": map[string]interface{}{ + "generation": 5, + "name": "test-statefulset", + }, + "spec": map[string]interface{}{ + "replicas": 3, + }, + "status": map[string]interface{}{ + "observedGeneration": 5, + "availableReplicas": 3, + "currentReplicas": 3, + "updatedReplicas": 3, + }, + }, + }, + expected: manifestAvailableAction, + err: nil, + }, + "Test StatefulSet not available": { + gvr: utils.StatefulSettGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "StatefulSet", + "metadata": map[string]interface{}{ + "generation": 3, + "name": "test-statefulset", + }, + "spec": map[string]interface{}{ + "replicas": 3, + }, + "status": map[string]interface{}{ + "observedGeneration": 3, + "availableReplicas": 2, + "currentReplicas": 3, + "updatedReplicas": 3, + }, + }, + }, + expected: manifestNotAvailableYetAction, + err: nil, + }, + "Test DaemonSet Available": { + gvr: utils.DaemonSettGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "DaemonSet", + "metadata": map[string]interface{}{ + "generation": 1, + }, + "status": map[string]interface{}{ + "observedGeneration": 1, + "numberAvailable": 1, + "desiredNumberScheduled": 1, + "currentNumberScheduled": 1, + "updatedNumberScheduled": 1, + }, + }, + }, + expected: manifestAvailableAction, + err: nil, + }, + "Test DaemonSet not available": { + gvr: utils.DaemonSettGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "DaemonSet", + "metadata": map[string]interface{}{ + "generation": 1, + }, + "status": map[string]interface{}{ + "observedGeneration": 1, + "numberAvailable": 0, + "desiredNumberScheduled": 1, + "currentNumberScheduled": 1, + "updatedNumberScheduled": 1, + }, + }, + }, + expected: manifestNotAvailableYetAction, + err: nil, + }, + "Test Job available with succeeded pod": { + gvr: utils.JobGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "batch/v1", + "kind": "Job", + "status": map[string]interface{}{ + "succeeded": 1, + "ready": 0, + }, + }, + }, + expected: manifestAvailableAction, + err: nil, + }, + "Test Job available with ready pod": { + gvr: utils.JobGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "batch/v1", + "kind": "Job", + "status": map[string]interface{}{ + "ready": 4, + }, + }, + }, + expected: manifestAvailableAction, + err: nil, + }, + "TestJob not available": { + gvr: utils.JobGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "batch/v1", + "kind": "Job", + "status": map[string]interface{}{ + "ready": 0, + }, + }, + }, + expected: manifestNotAvailableYetAction, + err: nil, + }, + "Test Job not trackable": { + gvr: utils.JobGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "batch/v1", + "kind": "Job", + "status": map[string]interface{}{ + "Succeeded": 2, + }, + }, + }, + expected: manifestNotTrackableAction, + err: nil, + }, + "Test UnknownResource": { + gvr: schema.GroupVersionResource{ + Group: "unknown", + Version: "v1", + Resource: "unknown", + }, + obj: &unstructured.Unstructured{}, + expected: manifestNotTrackableAction, + err: nil, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + action, err := trackResourceAvailability(tt.gvr, tt.obj) + assert.Equal(t, tt.expected, action, "action not matching in test %s", name) + assert.Equal(t, tt.err, err, "applyErr not matching in test %s", name) + }) + } +} + func TestApplyUnstructured(t *testing.T) { correctObj, correctDynamicClient, correctSpecHash, err := createObjAndDynamicClient(testManifest.Raw) if err != nil { @@ -444,7 +1181,7 @@ func TestApplyUnstructured(t *testing.T) { }, workObj: correctObj.DeepCopy(), resultSpecHash: correctSpecHash, - resultAction: ManifestCreatedAction, + resultAction: manifestCreatedAction, resultErr: nil, }, "test creation succeeds when the object has a generated name": { @@ -457,7 +1194,7 @@ func TestApplyUnstructured(t *testing.T) { }, workObj: generatedSpecObj.DeepCopy(), resultSpecHash: generatedSpecHash, - resultAction: ManifestCreatedAction, + resultAction: manifestCreatedAction, resultErr: nil, }, "client error looking for object / fail": { @@ -469,7 +1206,7 @@ func TestApplyUnstructured(t *testing.T) { recorder: utils.NewFakeRecorder(1), }, workObj: correctObj.DeepCopy(), - resultAction: ManifestNoChangeAction, + resultAction: errorApplyAction, resultErr: errors.New("client error"), }, "owner reference comparison failure / fail": { @@ -481,17 +1218,17 @@ func TestApplyUnstructured(t *testing.T) { recorder: utils.NewFakeRecorder(1), }, workObj: correctObj.DeepCopy(), - resultAction: ManifestNoChangeAction, + resultAction: errorApplyAction, resultErr: errors.New("resource is not managed by the work controller"), }, - "equal spec hash of current vs work object / succeed without updates": { + "equal spec hash of current vs work object / not available yet": { reconciler: ApplyWorkReconciler{ spokeDynamicClient: correctDynamicClient, recorder: utils.NewFakeRecorder(1), }, workObj: correctObj.DeepCopy(), resultSpecHash: correctSpecHash, - resultAction: ManifestNoChangeAction, + resultAction: manifestNotAvailableYetAction, resultErr: nil, }, "unequal spec hash of current vs work object / client patch fail": { @@ -500,7 +1237,7 @@ func TestApplyUnstructured(t *testing.T) { recorder: utils.NewFakeRecorder(1), }, workObj: correctObj.DeepCopy(), - resultAction: ManifestNoChangeAction, + resultAction: errorApplyAction, resultErr: errors.New("patch failed"), }, "happy path - with updates": { @@ -511,7 +1248,7 @@ func TestApplyUnstructured(t *testing.T) { }, workObj: correctObj, resultSpecHash: diffSpecHash, - resultAction: ManifestThreeWayMergePatchAction, + resultAction: manifestThreeWayMergePatchAction, resultErr: nil, }, "test create succeeds for large manifest when object does not exist": { @@ -522,7 +1259,7 @@ func TestApplyUnstructured(t *testing.T) { }, workObj: largeObj, resultSpecHash: largeObjSpecHash, - resultAction: ManifestCreatedAction, + resultAction: manifestCreatedAction, resultErr: nil, }, "test apply succeeds on update for large manifest when object exists": { @@ -533,7 +1270,7 @@ func TestApplyUnstructured(t *testing.T) { }, workObj: updatedLargeObj, resultSpecHash: updatedLargeObjSpecHash, - resultAction: ManifestServerSideAppliedAction, + resultAction: manifestServerSideAppliedAction, resultErr: nil, }, "test create fails for large manifest when object does not exist": { @@ -543,7 +1280,7 @@ func TestApplyUnstructured(t *testing.T) { recorder: utils.NewFakeRecorder(1), }, workObj: largeObj, - resultAction: ManifestNoChangeAction, + resultAction: errorApplyAction, resultErr: errors.New("create error"), }, "test apply fails for large manifest when object exists": { @@ -553,19 +1290,19 @@ func TestApplyUnstructured(t *testing.T) { recorder: utils.NewFakeRecorder(1), }, workObj: updatedLargeObj, - resultAction: ManifestNoChangeAction, + resultAction: errorApplyAction, resultErr: errors.New("apply error"), }, } for testName, testCase := range testCases { t.Run(testName, func(t *testing.T) { - applyResult, applyAction, err := testCase.reconciler.applyUnstructured(context.Background(), testGvr, testCase.workObj) + applyResult, applyAction, err := testCase.reconciler.applyUnstructured(context.Background(), utils.DeploymentGVR, testCase.workObj) assert.Equalf(t, testCase.resultAction, applyAction, "updated boolean not matching for Testcase %s", testName) if testCase.resultErr != nil { assert.Containsf(t, err.Error(), testCase.resultErr.Error(), "error not matching for Testcase %s", testName) } else { - assert.Truef(t, err == nil, "err is not nil for Testcase %s", testName) + assert.Truef(t, err == nil, "applyErr is not nil for Testcase %s", testName) assert.Truef(t, applyResult != nil, "applyResult is not nil for Testcase %s", testName) // Not checking last applied config because it has live fields. assert.Equalf(t, testCase.resultSpecHash, applyResult.GetAnnotations()[fleetv1beta1.ManifestHashAnnotation], @@ -609,12 +1346,13 @@ func TestApplyManifest(t *testing.T) { }) testCases := map[string]struct { - reconciler ApplyWorkReconciler - manifestList []fleetv1beta1.Manifest - generation int64 - action applyAction - wantGvr schema.GroupVersionResource - wantErr error + reconciler ApplyWorkReconciler + manifestList []fleetv1beta1.Manifest + applyStrategy *fleetv1beta1.ApplyStrategy + wantGeneration int64 + wantAction applyAction + wantGvr schema.GroupVersionResource + wantErr error }{ "manifest is in proper format/ happy path": { reconciler: ApplyWorkReconciler{ @@ -625,11 +1363,11 @@ func TestApplyManifest(t *testing.T) { recorder: utils.NewFakeRecorder(1), joined: atomic.NewBool(true), }, - manifestList: []fleetv1beta1.Manifest{testManifest}, - generation: 0, - action: ManifestCreatedAction, - wantGvr: expectedGvr, - wantErr: nil, + manifestList: []fleetv1beta1.Manifest{testManifest}, + wantGeneration: 0, + wantAction: manifestCreatedAction, + wantGvr: expectedGvr, + wantErr: nil, }, "manifest has incorrect syntax/ decode fail": { reconciler: ApplyWorkReconciler{ @@ -640,10 +1378,10 @@ func TestApplyManifest(t *testing.T) { recorder: utils.NewFakeRecorder(1), joined: atomic.NewBool(true), }, - manifestList: append([]fleetv1beta1.Manifest{}, InvalidManifest), - generation: 0, - action: ManifestNoChangeAction, - wantGvr: emptyGvr, + manifestList: append([]fleetv1beta1.Manifest{}, InvalidManifest), + wantGeneration: 0, + wantAction: errorApplyAction, + wantGvr: emptyGvr, wantErr: &json.UnmarshalTypeError{ Value: "string", Type: reflect.TypeOf(map[string]interface{}{}), @@ -658,11 +1396,11 @@ func TestApplyManifest(t *testing.T) { recorder: utils.NewFakeRecorder(1), joined: atomic.NewBool(true), }, - manifestList: append([]fleetv1beta1.Manifest{}, MissingManifest), - generation: 0, - action: ManifestNoChangeAction, - wantGvr: emptyGvr, - wantErr: errors.New("failed to find group/version/resource from restmapping: test error: mapping does not exist"), + manifestList: append([]fleetv1beta1.Manifest{}, MissingManifest), + wantGeneration: 0, + wantAction: errorApplyAction, + wantGvr: emptyGvr, + wantErr: errors.New("failed to find group/version/resource from restmapping: test error: mapping does not exist"), }, "manifest is in proper format/ should fail applyUnstructured": { reconciler: ApplyWorkReconciler{ @@ -673,23 +1411,23 @@ func TestApplyManifest(t *testing.T) { recorder: utils.NewFakeRecorder(1), joined: atomic.NewBool(true), }, - manifestList: append([]fleetv1beta1.Manifest{}, testManifest), - generation: 0, - action: ManifestNoChangeAction, - wantGvr: expectedGvr, - wantErr: errors.New(failMsg), + manifestList: append([]fleetv1beta1.Manifest{}, testManifest), + wantGeneration: 0, + wantAction: errorApplyAction, + wantGvr: expectedGvr, + wantErr: errors.New(failMsg), }, } for testName, testCase := range testCases { t.Run(testName, func(t *testing.T) { - resultList := testCase.reconciler.applyManifests(context.Background(), testCase.manifestList, ownerRef) + resultList := testCase.reconciler.applyManifests(context.Background(), testCase.manifestList, ownerRef, testCase.applyStrategy) for _, result := range resultList { if testCase.wantErr != nil { - assert.Containsf(t, result.err.Error(), testCase.wantErr.Error(), "Incorrect error for Testcase %s", testName) + assert.Containsf(t, result.applyErr.Error(), testCase.wantErr.Error(), "Incorrect error for Testcase %s", testName) } else { - assert.Equalf(t, testCase.generation, result.generation, "Testcase %s: generation incorrect", testName) - assert.Equalf(t, testCase.action, result.action, "Testcase %s: Updated action incorrect", testName) + assert.Equalf(t, testCase.wantGeneration, result.generation, "Testcase %s: wantGeneration incorrect", testName) + assert.Equalf(t, testCase.wantAction, result.action, "Testcase %s: Updated wantAction incorrect", testName) } } }) @@ -1008,7 +1746,7 @@ func TestReconcile(t *testing.T) { } else { if testCase.requeue { if testCase.reconciler.joined.Load() { - assert.Equal(t, ctrl.Result{RequeueAfter: time.Minute * 5}, ctrlResult, "incorrect ctrlResult for Testcase %s", testName) + assert.Equal(t, ctrl.Result{RequeueAfter: time.Second * 3}, ctrlResult, "incorrect ctrlResult for Testcase %s", testName) } else { assert.Equal(t, ctrl.Result{RequeueAfter: time.Second * 5}, ctrlResult, "incorrect ctrlResult for Testcase %s", testName) } diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index 040f266e0..a2bae09d0 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -659,10 +659,14 @@ func (r *Reconciler) SetupWithManager(mgr controllerruntime.Manager) error { } oldAppliedStatus := meta.FindStatusCondition(oldWork.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied) newAppliedStatus := meta.FindStatusCondition(newWork.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied) - // we only need to handle the case the applied condition is flipped between true and NOT true between the + oldAvailableStatus := meta.FindStatusCondition(oldWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) + newAvailableStatus := meta.FindStatusCondition(newWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) + + // we only need to handle the case the applied or available conditions is flipped between true and NOT true between the // new and old work objects. Otherwise, it won't affect the binding applied condition - if condition.IsConditionStatusTrue(oldAppliedStatus, oldWork.GetGeneration()) == condition.IsConditionStatusTrue(newAppliedStatus, newWork.GetGeneration()) { - klog.V(2).InfoS("The work applied condition didn't flip between true and false, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) + if condition.IsConditionStatusTrue(oldAppliedStatus, oldWork.GetGeneration()) == condition.IsConditionStatusTrue(newAppliedStatus, newWork.GetGeneration()) && + condition.IsConditionStatusTrue(oldAvailableStatus, oldWork.GetGeneration()) == condition.IsConditionStatusTrue(newAvailableStatus, newWork.GetGeneration()) { + klog.V(2).InfoS("The work applied or available condition didn't flip between true and false, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) return } klog.V(2).InfoS("Received a work update event", "work", klog.KObj(newWork), "parentBindingName", parentBindingName) diff --git a/pkg/utils/common.go b/pkg/utils/common.go index 3ab06df0a..f75de980a 100644 --- a/pkg/utils/common.go +++ b/pkg/utils/common.go @@ -12,6 +12,8 @@ import ( "strings" "time" + appv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -281,6 +283,30 @@ var ( Version: placementv1alpha1.GroupVersion.Version, Kind: placementv1alpha1.ResourceOverrideSnapshotKind, } + + DeploymentGVR = schema.GroupVersionResource{ + Group: appv1.GroupName, + Version: appv1.SchemeGroupVersion.Version, + Resource: "deployments", + } + + DaemonSettGVR = schema.GroupVersionResource{ + Group: appv1.GroupName, + Version: appv1.SchemeGroupVersion.Version, + Resource: "daemonsets", + } + + StatefulSettGVR = schema.GroupVersionResource{ + Group: appv1.GroupName, + Version: appv1.SchemeGroupVersion.Version, + Resource: "statefulsets", + } + + JobGVR = schema.GroupVersionResource{ + Group: batchv1.GroupName, + Version: batchv1.SchemeGroupVersion.Version, + Resource: "jobs", + } ) // RandSecureInt returns a uniform random value in [1, max] or panic. diff --git a/pkg/utils/condition/condition.go b/pkg/utils/condition/condition.go index 10364cce5..b3e6acda7 100644 --- a/pkg/utils/condition/condition.go +++ b/pkg/utils/condition/condition.go @@ -7,6 +7,10 @@ Licensed under the MIT license. package condition import ( + "sort" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -101,3 +105,16 @@ func IsConditionStatusTrue(cond *metav1.Condition, latestGeneration int64) bool func IsConditionStatusFalse(cond *metav1.Condition, latestGeneration int64) bool { return cond != nil && cond.Status == metav1.ConditionFalse && cond.ObservedGeneration == latestGeneration } + +// CompareConditions compares two condition slices and returns a string with the differences. +func CompareConditions(wantConditions, gotConditions []metav1.Condition) string { + ignoreOption := cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "ObservedGeneration", "Message") + // we need to sort each condition slice by type before comparing + sort.SliceStable(wantConditions, func(i, j int) bool { + return wantConditions[i].Type < wantConditions[j].Type + }) + sort.SliceStable(gotConditions, func(i, j int) bool { + return gotConditions[i].Type < gotConditions[j].Type + }) + return cmp.Diff(wantConditions, gotConditions, ignoreOption) +} diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index 3412ae4e2..17b50d086 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -309,10 +309,10 @@ func crpStatusUpdatedActual( func workNamespaceRemovedFromClusterActual(cluster *framework.Cluster) func() error { client := cluster.KubeClient - workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + ns := appNamespace() return func() error { - if err := client.Get(ctx, types.NamespacedName{Name: workNamespaceName}, &corev1.Namespace{}); !errors.IsNotFound(err) { - return fmt.Errorf("work namespace %s still exists or an unexpected error occurred: %w", workNamespaceName, err) + if err := client.Get(ctx, types.NamespacedName{Name: ns.Name}, &corev1.Namespace{}); !errors.IsNotFound(err) { + return fmt.Errorf("work namespace %s still exists or an unexpected error occurred: %w", ns.Name, err) } return nil } @@ -324,6 +324,9 @@ func allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual() func() er return func() error { crp := &placementv1beta1.ClusterResourcePlacement{} if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + if errors.IsNotFound(err) { + return nil + } return err } diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index 9285d6fc9..95f78890b 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -42,7 +42,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { var wantSelectedResources []placementv1beta1.ResourceIdentifier BeforeAll(func() { // Create the test resources. - readTestManifests() + readEnvelopTestManifests() wantSelectedResources = []placementv1beta1.ResourceIdentifier{ { Kind: "Namespace", @@ -65,7 +65,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) Context("Test a CRP place enveloped objects successfully", Ordered, func() { - It("Create the test resources in the namespace", createWrappedWorkResources) + It("Create the test resources in the namespace", createWrappedResourcesForEnvelopTest) It("Create the CRP that select the name space", func() { crp := &placementv1beta1.ClusterResourcePlacement{ @@ -96,7 +96,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { It("should place the resources on all member clusters", func() { for idx := range allMemberClusters { memberCluster := allMemberClusters[idx] - workResourcesPlacedActual := checkEnvelopResourcePlacement(memberCluster) + workResourcesPlacedActual := checkEnvelopQuotaAndMutationWebhookPlacement(memberCluster) Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) } }) @@ -138,7 +138,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { It("should place the resources on all member clusters again", func() { for idx := range allMemberClusters { memberCluster := allMemberClusters[idx] - workResourcesPlacedActual := checkEnvelopResourcePlacement(memberCluster) + workResourcesPlacedActual := checkEnvelopQuotaAndMutationWebhookPlacement(memberCluster) Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) } }) @@ -169,8 +169,8 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) }) -func checkEnvelopResourcePlacement(memberCluster *framework.Cluster) func() error { - workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) +func checkEnvelopQuotaAndMutationWebhookPlacement(memberCluster *framework.Cluster) func() error { + workNamespaceName := appNamespace().Name return func() error { if err := validateWorkNamespaceOnCluster(memberCluster, types.NamespacedName{Name: workNamespaceName}); err != nil { return err @@ -238,9 +238,9 @@ func checkForOneClusterFailedToApplyStatus(wantSelectedResources []placementv1be }, }, Condition: metav1.Condition{ - Type: string(placementv1beta1.WorkConditionTypeApplied), + Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: work.AppliedManifestFailedReason, + Reason: work.ManifestApplyFailedReason, }, }, } @@ -292,7 +292,7 @@ func checkForOneClusterFailedToApplyStatus(wantSelectedResources []placementv1be } } -func readTestManifests() { +func readEnvelopTestManifests() { By("Read the testConfigMap resources") err := utils.GetObjectFromManifest("resources/test-configmap.yaml", &testConfigMap) Expect(err).Should(Succeed()) @@ -314,9 +314,9 @@ func readTestManifests() { Expect(err).Should(Succeed()) } -// createWrappedWorkResources creates some enveloped resources on the hub cluster for testing purposes. -func createWrappedWorkResources() { - ns := workNamespace() +// createWrappedResourcesForEnvelopTest creates some enveloped resources on the hub cluster for testing purposes. +func createWrappedResourcesForEnvelopTest() { + ns := appNamespace() Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Namespace) // modify the configMap according to the namespace testConfigMap.Namespace = ns.Name diff --git a/test/e2e/placement_selecting_resources_test.go b/test/e2e/placement_selecting_resources_test.go index 1ede49224..5705b6efe 100644 --- a/test/e2e/placement_selecting_resources_test.go +++ b/test/e2e/placement_selecting_resources_test.go @@ -431,7 +431,7 @@ var _ = Describe("validating CRP when adding resources in a matching namespace", BeforeAll(func() { By("creating namespace") - ns := workNamespace() + ns := appNamespace() Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) // Create the CRP. @@ -770,7 +770,7 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( createWorkResources() By("creating work namespace on member cluster") - ns := workNamespace() + ns := appNamespace() Expect(memberCluster1EastProdClient.Create(ctx, &ns)).Should(Succeed(), "Failed to create namespace %s", ns.Name) @@ -844,7 +844,7 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( Condition: metav1.Condition{ Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, - Reason: work.AppliedManifestFailedReason, + Reason: work.ManifestApplyFailedReason, ObservedGeneration: 0, }, }, @@ -1321,7 +1321,7 @@ func createResourcesForMultipleResourceSnapshots() { for i := 0; i < 3; i++ { var secret corev1.Secret Expect(utils.GetObjectFromManifest("../integration/manifests/resources/test-large-secret.yaml", &secret)).Should(Succeed(), "Failed to read large secret from file") - secret.Namespace = workNamespace().Name + secret.Namespace = appNamespace().Name secret.Name = fmt.Sprintf(appSecretNameTemplate, i) Expect(hubClient.Create(ctx, &secret)).To(Succeed(), "Failed to create secret %s/%s", secret.Name, secret.Namespace) } diff --git a/test/e2e/resources/test-deployment.yaml b/test/e2e/resources/test-deployment.yaml new file mode 100644 index 000000000..24af05c07 --- /dev/null +++ b/test/e2e/resources/test-deployment.yaml @@ -0,0 +1,34 @@ +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-nginx + namespace: default +spec: + selector: + matchLabels: + app: test-nginx + replicas: 2 + template: + metadata: + labels: + app: test-nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/test/e2e/resources/test-envelop-deployment.yaml b/test/e2e/resources/test-envelop-deployment.yaml new file mode 100644 index 000000000..9ef229dbb --- /dev/null +++ b/test/e2e/resources/test-envelop-deployment.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: envelop-deployment + namespace: app + annotations: + kubernetes-fleet.io/envelope-configmap: "true" +data: + deployment.yaml: | + apiVersion: apps/v1 + kind: Deployment \ No newline at end of file diff --git a/test/e2e/resources_test.go b/test/e2e/resources_test.go index 3e7d36b54..02b9fc731 100644 --- a/test/e2e/resources_test.go +++ b/test/e2e/resources_test.go @@ -17,6 +17,7 @@ import ( "k8s.io/utils/ptr" fleetnetworkingv1alpha1 "go.goms.io/fleet-networking/api/v1alpha1" + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" ) @@ -59,7 +60,7 @@ func invalidWorkResourceSelector() []placementv1beta1.ClusterResourceSelector { } } -func workNamespace() corev1.Namespace { +func appNamespace() corev1.Namespace { return corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()), diff --git a/test/e2e/rollout_test.go b/test/e2e/rollout_test.go new file mode 100644 index 000000000..461e21f67 --- /dev/null +++ b/test/e2e/rollout_test.go @@ -0,0 +1,217 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package e2e + +import ( + "encoding/json" + "fmt" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils" + "go.goms.io/fleet/pkg/utils/condition" + "go.goms.io/fleet/test/e2e/framework" + testutils "go.goms.io/fleet/test/e2e/v1alpha1/utils" +) + +var ( + // pre loaded test manifests + testDeployment appv1.Deployment + testEnvelopDeployment corev1.ConfigMap +) + +// Note that this container will run in parallel with other containers. +var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + workNamespaceName := appNamespace().Name + var wantSelectedResources []placementv1beta1.ResourceIdentifier + BeforeAll(func() { + // Create the test resources. + readRolloutTestManifests() + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", + }, + { + Kind: "ConfigMap", + Name: testEnvelopDeployment.Name, + Version: "v1", + Namespace: workNamespaceName, + }, + } + }) + + Context("Test a CRP place enveloped objects successfully", Ordered, func() { + It("Create the wrapped deployment resources in the namespace", createWrappedResourcesForRollout) + + It("Create the CRP that select the name space", func() { + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(wantSelectedResources, allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := waitForDeploymentPlacementToReady(memberCluster) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("should mark the work as available", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + var works placementv1beta1.WorkList + listOpts := []client.ListOption{ + client.InNamespace(fmt.Sprintf(utils.NamespaceNameFormat, memberCluster.ClusterName)), + } + Eventually(func() string { + if err := hubClient.List(ctx, &works, listOpts...); err != nil { + return err.Error() + } + workAvailable := false + for i := range works.Items { + work := works.Items[i] + if strings.Contains(work.Name, "configmap") { + // this is the wrapped work + wantConditions := []metav1.Condition{ + { + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: "WorkAppliedComplete", + ObservedGeneration: 1, + }, + { + Type: placementv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: "WorkAvailable", + ObservedGeneration: 1, + }, + } + diff := condition.CompareConditions(wantConditions, work.Status.Conditions) + if len(diff) != 0 { + return diff + } + workAvailable = true + } else { + // this is the plain work which contains only the namespace which is not trackable + wantConditions := []metav1.Condition{ + { + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: "WorkAppliedComplete", + ObservedGeneration: 1, + }, + { + Type: placementv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: "WorkNotTrackable", + ObservedGeneration: 1, + }, + } + diff := condition.CompareConditions(wantConditions, work.Status.Conditions) + if len(diff) != 0 { + return diff + } + } + } + if workAvailable { + return "" + } + return "no available work found" + }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), + "work condition mismatch for work %s (-want, +got):", memberCluster.ClusterName) + } + }) + }) + + AfterAll(func() { + // Remove the custom deletion blocker finalizer from the CRP. + ensureCRPAndRelatedResourcesDeletion(crpName, allMemberClusters) + }) +}) + +func readRolloutTestManifests() { + By("Read the testConfigMap resources") + err := utils.GetObjectFromManifest("resources/test-deployment.yaml", &testDeployment) + Expect(err).Should(Succeed()) + + By("Read testEnvelopConfigMap resource") + err = utils.GetObjectFromManifest("resources/test-envelop-deployment.yaml", &testEnvelopDeployment) + Expect(err).Should(Succeed()) +} + +// createWrappedResourcesForRollout creates some enveloped resources on the hub cluster with a deployment for testing purposes. +func createWrappedResourcesForRollout() { + ns := appNamespace() + Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Namespace) + // modify the enveloped configMap according to the namespace + testEnvelopDeployment.Namespace = ns.Name + + // modify the embedded namespaced resource according to the namespace + testDeployment.Namespace = ns.Name + resourceDeploymentByte, err := json.Marshal(testDeployment) + Expect(err).Should(Succeed()) + testEnvelopDeployment.Data["deployment.yaml"] = string(resourceDeploymentByte) + Expect(hubClient.Create(ctx, &testEnvelopDeployment)).To(Succeed(), "Failed to create testEnvelop deployment %s", testEnvelopDeployment.Name) +} + +func waitForDeploymentPlacementToReady(memberCluster *framework.Cluster) func() error { + workNamespaceName := appNamespace().Name + return func() error { + if err := validateWorkNamespaceOnCluster(memberCluster, types.NamespacedName{Name: workNamespaceName}); err != nil { + return err + } + By("check the placedDeployment") + placedDeployment := &appv1.Deployment{} + if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: testDeployment.Name}, placedDeployment); err != nil { + return err + } + By("check the placedDeployment is ready") + var depCond *appv1.DeploymentCondition + for i := range placedDeployment.Status.Conditions { + if placedDeployment.Status.Conditions[i].Type == appv1.DeploymentAvailable { + depCond = &placedDeployment.Status.Conditions[i] + break + } + } + if placedDeployment.Status.ObservedGeneration == placedDeployment.Generation && depCond != nil && depCond.Status == corev1.ConditionTrue { + return nil + } + return nil + } +} diff --git a/test/e2e/setup_test.go b/test/e2e/setup_test.go index a30ba8dce..87cd0c57d 100644 --- a/test/e2e/setup_test.go +++ b/test/e2e/setup_test.go @@ -28,6 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" fleetnetworkingv1alpha1 "go.goms.io/fleet-networking/api/v1alpha1" + clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/test/e2e/framework" @@ -59,10 +60,10 @@ const ( ) const ( - eventuallyDuration = time.Minute * 2 - eventuallyInterval = time.Second * 5 + eventuallyDuration = time.Minute * 3 + eventuallyInterval = time.Millisecond * 250 consistentlyDuration = time.Second * 10 - consistentlyInterval = time.Second * 2 + consistentlyInterval = time.Millisecond * 250 ) var ( diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 96c8b6589..48fe03a73 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -267,12 +267,10 @@ func cleanupInvalidClusters() { Name: name, }, } - Expect(hubClient.Delete(ctx, mcObj)).To(Succeed(), "Failed to delete member cluster object") - Expect(hubClient.Get(ctx, types.NamespacedName{Name: name}, mcObj)).To(Succeed(), "Failed to get member cluster object") mcObj.Finalizers = []string{} Expect(hubClient.Update(ctx, mcObj)).To(Succeed(), "Failed to update member cluster object") - + Expect(hubClient.Delete(ctx, mcObj)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{}), "Failed to delete member cluster object") Eventually(func() error { mcObj := &clusterv1beta1.MemberCluster{} if err := hubClient.Get(ctx, types.NamespacedName{Name: name}, mcObj); !apierrors.IsNotFound(err) { @@ -332,14 +330,14 @@ func deleteResourcesForFleetGuardRail() { Name: "test-cluster-role-binding", }, } - Expect(hubClient.Delete(ctx, &crb)).Should(Succeed()) + Expect(hubClient.Delete(ctx, &crb)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{})) cr := rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster-role", }, } - Expect(hubClient.Delete(ctx, &cr)).Should(Succeed()) + Expect(hubClient.Delete(ctx, &cr)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{})) } // cleanupMemberCluster removes finalizers (if any) from the member cluster, and @@ -447,7 +445,7 @@ func createWorkResource(name, namespace string) { // createWorkResources creates some resources on the hub cluster for testing purposes. func createWorkResources() { - ns := workNamespace() + ns := appNamespace() Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Namespace) configMap := appConfigMap() @@ -460,7 +458,7 @@ func cleanupWorkResources() { } func cleanWorkResourcesOnCluster(cluster *framework.Cluster) { - ns := workNamespace() + ns := appNamespace() Expect(client.IgnoreNotFound(cluster.KubeClient.Delete(ctx, &ns))).To(Succeed(), "Failed to delete namespace %s", ns.Namespace) workResourcesRemovedActual := workNamespaceRemovedFromClusterActual(cluster) @@ -559,7 +557,7 @@ func ensureCRPAndRelatedResourcesDeletion(crpName string, memberClusters []*fram Name: crpName, }, } - Expect(hubClient.Delete(ctx, crp)).To(Succeed(), "Failed to delete CRP") + Expect(hubClient.Delete(ctx, crp)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{}), "Failed to delete CRP") // Verify that all resources placed have been removed from specified member clusters. for idx := range memberClusters { From 79b4d4f18f7b816cab8464f6f527d9fa305191ec Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Thu, 7 Mar 2024 22:36:11 -0800 Subject: [PATCH 2/5] address comments --- .../work/applied_work_syncer_test.go | 8 +- pkg/controllers/work/apply_controller.go | 140 +++++++++--------- .../work/apply_controller_helper_test.go | 6 +- .../work/apply_controller_integration_test.go | 6 +- pkg/controllers/work/apply_controller_test.go | 99 +++++++++++-- pkg/controllers/workgenerator/controller.go | 5 +- pkg/utils/condition/condition.go | 17 --- test/e2e/rollout_test.go | 6 +- test/utils/controller/controller.go | 18 +++ 9 files changed, 191 insertions(+), 114 deletions(-) diff --git a/pkg/controllers/work/applied_work_syncer_test.go b/pkg/controllers/work/applied_work_syncer_test.go index f71bbe499..879d95027 100644 --- a/pkg/controllers/work/applied_work_syncer_test.go +++ b/pkg/controllers/work/applied_work_syncer_test.go @@ -185,21 +185,21 @@ func TestCalculateNewAppliedWork(t *testing.T) { } newRes, staleRes, err := r.generateDiff(context.Background(), &tt.inputWork, &tt.inputAppliedWork) if len(tt.expectedNewRes) != len(newRes) { - t.Errorf("Testcase %s: get newRes contains different number of elements than the expected newRes.", testName) + t.Errorf("Testcase %s: get newRes contains different number of elements than the want newRes.", testName) } for i := 0; i < len(newRes); i++ { diff := cmp.Diff(tt.expectedNewRes[i].WorkResourceIdentifier, newRes[i].WorkResourceIdentifier) if len(diff) != 0 { - t.Errorf("Testcase %s: get newRes is different from the expected newRes, diff = %s", testName, diff) + t.Errorf("Testcase %s: get newRes is different from the want newRes, diff = %s", testName, diff) } } if len(tt.expectedStaleRes) != len(staleRes) { - t.Errorf("Testcase %s: get staleRes contains different number of elements than the expected staleRes.", testName) + t.Errorf("Testcase %s: get staleRes contains different number of elements than the want staleRes.", testName) } for i := 0; i < len(staleRes); i++ { diff := cmp.Diff(tt.expectedStaleRes[i].WorkResourceIdentifier, staleRes[i].WorkResourceIdentifier) if len(diff) != 0 { - t.Errorf("Testcase %s: get staleRes is different from the expected staleRes, diff = %s", testName, diff) + t.Errorf("Testcase %s: get staleRes is different from the want staleRes, diff = %s", testName, diff) } } if tt.hasErr { diff --git a/pkg/controllers/work/apply_controller.go b/pkg/controllers/work/apply_controller.go index 281a83d7d..1b079e680 100644 --- a/pkg/controllers/work/apply_controller.go +++ b/pkg/controllers/work/apply_controller.go @@ -70,7 +70,7 @@ const ( workAvailableReason = "WorkAvailable" workNotTrackableReason = "WorkNotTrackable" // ManifestApplyFailedReason is the reason string of condition when it failed to apply manifest. - ManifestApplyFailedReason = "ManifestApplyFailedReason" + ManifestApplyFailedReason = "ManifestApplyFailed" // ManifestAlreadyUpToDateReason is the reason string of condition when the manifest is already up to date. ManifestAlreadyUpToDateReason = "ManifestAlreadyUpToDate" // ManifestNeedsUpdateReason is the reason string of condition when the manifest needs to be updated. @@ -142,7 +142,7 @@ type applyResult struct { // Reconcile implement the control loop logic for Work object. func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { if !r.joined.Load() { - klog.V(2).InfoS("work controller is not started yet, requeue the request", "work", req.NamespacedName) + klog.V(2).InfoS("Work controller is not started yet, requeue the request", "work", req.NamespacedName) return ctrl.Result{RequeueAfter: time.Second * 5}, nil } startTime := time.Now() @@ -157,17 +157,17 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( err := r.client.Get(ctx, req.NamespacedName, work) switch { case apierrors.IsNotFound(err): - klog.V(2).InfoS("the work resource is deleted", "work", req.NamespacedName) + klog.V(2).InfoS("The work resource is deleted", "work", req.NamespacedName) return ctrl.Result{}, nil case err != nil: - klog.ErrorS(err, "failed to retrieve the work", "work", req.NamespacedName) + klog.ErrorS(err, "Failed to retrieve the work", "work", req.NamespacedName) return ctrl.Result{}, controller.NewAPIServerError(true, err) } logObjRef := klog.KObj(work) // Handle deleting work, garbage collect the resources if !work.DeletionTimestamp.IsZero() { - klog.V(2).InfoS("resource is in the process of being deleted", work.Kind, logObjRef) + klog.V(2).InfoS("Resource is in the process of being deleted", work.Kind, logObjRef) return r.garbageCollectAppliedWork(ctx, work) } @@ -192,14 +192,14 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( if ok { workUpdateTime, parseErr := time.Parse(time.RFC3339, lastUpdateTime) if parseErr != nil { - klog.ErrorS(parseErr, "failed to parse the last work update time", "work", logObjRef) + klog.ErrorS(parseErr, "Failed to parse the last work update time", "work", logObjRef) } else { latency := time.Since(workUpdateTime) metrics.WorkApplyTime.WithLabelValues(work.GetName()).Observe(latency.Seconds()) - klog.V(2).InfoS("work is applied", "work", work.GetName(), "latency", latency.Milliseconds()) + klog.V(2).InfoS("Work is applied", "work", work.GetName(), "latency", latency.Milliseconds()) } } else { - klog.V(2).InfoS("work has no last update time", "work", work.GetName()) + klog.V(2).InfoS("Work has no last update time", "work", work.GetName()) } // generate the work condition based on the manifest apply result @@ -207,52 +207,52 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // update the work status if err = r.client.Status().Update(ctx, work, &client.SubResourceUpdateOptions{}); err != nil { - klog.ErrorS(err, "failed to update work status", "work", logObjRef) + klog.ErrorS(err, "Failed to update work status", "work", logObjRef) return ctrl.Result{}, err } if len(errs) == 0 { - klog.InfoS("successfully applied the work to the cluster", "work", logObjRef) + klog.InfoS("Successfully applied the work to the cluster", "work", logObjRef) r.recorder.Event(work, v1.EventTypeNormal, "ApplyWorkSucceed", "apply the work successfully") } // now we sync the status from work to appliedWork no matter if apply succeeds or not newRes, staleRes, genErr := r.generateDiff(ctx, work, appliedWork) if genErr != nil { - klog.ErrorS(err, "failed to generate the diff between work status and appliedWork status", work.Kind, logObjRef) + klog.ErrorS(err, "Failed to generate the diff between work status and appliedWork status", work.Kind, logObjRef) return ctrl.Result{}, err } // delete all the manifests that should not be in the cluster. if err = r.deleteStaleManifest(ctx, staleRes, owner); err != nil { - klog.ErrorS(err, "resource garbage-collection incomplete; some Work owned resources could not be deleted", work.Kind, logObjRef) + klog.ErrorS(err, "Resource garbage-collection incomplete; some Work owned resources could not be deleted", work.Kind, logObjRef) // we can't proceed to update the applied return ctrl.Result{}, err } else if len(staleRes) > 0 { - klog.V(2).InfoS("successfully garbage-collected all stale manifests", work.Kind, logObjRef, "number of GCed res", len(staleRes)) + klog.V(2).InfoS("Successfully garbage-collected all stale manifests", work.Kind, logObjRef, "number of GCed res", len(staleRes)) for _, res := range staleRes { - klog.V(2).InfoS("successfully garbage-collected a stale manifest", work.Kind, logObjRef, "res", res) + klog.V(2).InfoS("Successfully garbage-collected a stale manifest", work.Kind, logObjRef, "res", res) } } - // update the appliedWork with the new work after the stales are deleted appliedWork.Status.AppliedResources = newRes if err = r.spokeClient.Status().Update(ctx, appliedWork, &client.SubResourceUpdateOptions{}); err != nil { - klog.ErrorS(err, "failed to update appliedWork status", appliedWork.Kind, appliedWork.GetName()) + klog.ErrorS(err, "Failed to update appliedWork status", appliedWork.Kind, appliedWork.GetName()) return ctrl.Result{}, err } - err = utilerrors.NewAggregate(errs) - if err != nil { - klog.ErrorS(err, "manifest apply incomplete; the message is queued again for reconciliation", + + if err = utilerrors.NewAggregate(errs); err != nil { + klog.ErrorS(err, "Manifest apply incomplete; the message is queued again for reconciliation", "work", logObjRef) + return ctrl.Result{}, err } - + // check if the work is available, if not, we will requeue the work for reconciliation availableCond := meta.FindStatusCondition(work.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) if !condition.IsConditionStatusTrue(availableCond, work.Generation) { - klog.V(2).InfoS("work is not available yet, check again", "work", logObjRef, "availableCond", availableCond) - return ctrl.Result{RequeueAfter: time.Second * 3}, err + klog.V(2).InfoS("Work is not available yet, check again", "work", logObjRef, "availableCond", availableCond) + return ctrl.Result{RequeueAfter: time.Second * 3}, nil } // the work is available (might due to not trackable) but we still periodically reconcile to make sure the // member cluster state is in sync with the work in case the resources on the member cluster is removed/changed. - return ctrl.Result{RequeueAfter: time.Minute * 5}, err + return ctrl.Result{RequeueAfter: time.Minute * 5}, nil } // garbageCollectAppliedWork deletes the appliedWork and all the manifests associated with it from the cluster. @@ -269,12 +269,12 @@ func (r *ApplyWorkReconciler) garbageCollectAppliedWork(ctx context.Context, wor err := r.spokeClient.Delete(ctx, &appliedWork, &client.DeleteOptions{PropagationPolicy: &deletePolicy}) switch { case apierrors.IsNotFound(err): - klog.V(2).InfoS("the appliedWork is already deleted", "appliedWork", work.Name) + klog.V(2).InfoS("The appliedWork is already deleted", "appliedWork", work.Name) case err != nil: - klog.ErrorS(err, "failed to delete the appliedWork", "appliedWork", work.Name) + klog.ErrorS(err, "Failed to delete the appliedWork", "appliedWork", work.Name) return ctrl.Result{}, err default: - klog.InfoS("successfully deleted the appliedWork", "appliedWork", work.Name) + klog.InfoS("Successfully deleted the appliedWork", "appliedWork", work.Name) } controllerutil.RemoveFinalizer(work, fleetv1beta1.WorkFinalizer) return ctrl.Result{}, r.client.Update(ctx, work, &client.UpdateOptions{}) @@ -290,9 +290,9 @@ func (r *ApplyWorkReconciler) ensureAppliedWork(ctx context.Context, work *fleet err := r.spokeClient.Get(ctx, types.NamespacedName{Name: work.Name}, appliedWork) switch { case apierrors.IsNotFound(err): - klog.ErrorS(err, "appliedWork finalizer resource does not exist even with the finalizer, it will be recreated", "appliedWork", workRef.Name) + klog.ErrorS(err, "AppliedWork finalizer resource does not exist even with the finalizer, it will be recreated", "appliedWork", workRef.Name) case err != nil: - klog.ErrorS(err, "failed to retrieve the appliedWork ", "appliedWork", workRef.Name) + klog.ErrorS(err, "Failed to retrieve the appliedWork ", "appliedWork", workRef.Name) return nil, controller.NewAPIServerError(true, err) default: return appliedWork, nil @@ -310,15 +310,15 @@ func (r *ApplyWorkReconciler) ensureAppliedWork(ctx context.Context, work *fleet }, } if err := r.spokeClient.Create(ctx, appliedWork); err != nil && !apierrors.IsAlreadyExists(err) { - klog.ErrorS(err, "appliedWork create failed", "appliedWork", workRef.Name) + klog.ErrorS(err, "AppliedWork create failed", "appliedWork", workRef.Name) return nil, err } if !hasFinalizer { - klog.InfoS("add the finalizer to the work", "work", workRef) + klog.InfoS("Add the finalizer to the work", "work", workRef) work.Finalizers = append(work.Finalizers, fleetv1beta1.WorkFinalizer) return appliedWork, r.client.Update(ctx, work, &client.UpdateOptions{}) } - klog.InfoS("recreated the appliedWork resource", "appliedWork", workRef.Name) + klog.InfoS("Recreated the appliedWork resource", "appliedWork", workRef.Name) return appliedWork, nil } @@ -354,7 +354,7 @@ func (r *ApplyWorkReconciler) applyManifests(ctx context.Context, manifests []fl } if result.applyErr == nil { result.generation = appliedObj.GetGeneration() - klog.V(2).InfoS("apply manifest succeeded", "gvr", gvr, "manifest", logObjRef, + klog.V(2).InfoS("Apply manifest succeeded", "gvr", gvr, "manifest", logObjRef, "action", result.action, "applyStrategy", applyStrategy, "new ObservedGeneration", result.generation) } else { klog.ErrorS(result.applyErr, "manifest upsert failed", "gvr", gvr, "manifest", logObjRef) @@ -405,7 +405,7 @@ func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema. actual, err := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()).Create( ctx, manifestObj, metav1.CreateOptions{FieldManager: workFieldManagerName}) if err == nil { - klog.V(2).InfoS("successfully created the manifest", "gvr", gvr, "manifest", manifestRef) + klog.V(2).InfoS("Successfully created the manifest", "gvr", gvr, "manifest", manifestRef) return actual, manifestCreatedAction, nil } return nil, errorApplyAction, err @@ -413,7 +413,7 @@ func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema. // support resources with generated name if manifestObj.GetName() == "" && manifestObj.GetGenerateName() != "" { - klog.InfoS("create the resource with generated name regardless", "gvr", gvr, "manifest", manifestRef) + klog.InfoS("Create the resource with generated name regardless", "gvr", gvr, "manifest", manifestRef) return createFunc() } @@ -430,7 +430,7 @@ func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema. if !isManifestManagedByWork(curObj.GetOwnerReferences()) { // TODO: honer the applyStrategy to decide if we should overwrite the existing resource err = fmt.Errorf("resource is not managed by the work controller") - klog.ErrorS(err, "skip applying a not managed manifest", "gvr", gvr, "obj", manifestRef) + klog.ErrorS(err, "Skip applying a not managed manifest", "gvr", gvr, "obj", manifestRef) return nil, errorApplyAction, controller.NewExpectedBehaviorError(err) } @@ -445,10 +445,10 @@ func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema. return nil, errorApplyAction, err } if !isModifiedConfigAnnotationNotEmpty { - klog.V(2).InfoS("using server side apply for manifest", "gvr", gvr, "manifest", manifestRef) + klog.V(2).InfoS("Using server side apply for manifest", "gvr", gvr, "manifest", manifestRef) return r.serversideApplyObject(ctx, gvr, manifestObj) } - klog.V(2).InfoS("using three way merge for manifest", "gvr", gvr, "manifest", manifestRef) + klog.V(2).InfoS("Using three way merge for manifest", "gvr", gvr, "manifest", manifestRef) return r.patchCurrentResource(ctx, gvr, manifestObj, curObj) } // the manifest is already up to date, we just need to track its availability @@ -471,7 +471,7 @@ func trackResourceAvailability(gvr schema.GroupVersionResource, curObj *unstruct return trackJobAvailability(curObj) default: - klog.V(2).InfoS("we don't know how to track the availability of the resource", "gvr", gvr, "resource", klog.KObj(curObj)) + klog.V(2).InfoS("We don't know how to track the availability of the resource", "gvr", gvr, "resource", klog.KObj(curObj)) return manifestNotTrackableAction, nil } } @@ -491,10 +491,10 @@ func trackDeploymentAvailability(curObj *unstructured.Unstructured) (applyAction } // a deployment is available if the observedGeneration is equal to the generation and the Available condition is true if deployment.Status.ObservedGeneration == deployment.Generation && depCond != nil && depCond.Status == v1.ConditionTrue { - klog.V(2).InfoS("deployment is available", "deployment", klog.KObj(curObj)) + klog.V(2).InfoS("Deployment is available", "deployment", klog.KObj(curObj)) return manifestAvailableAction, nil } - klog.V(2).InfoS("still need to wait for deployment to be available", "deployment", klog.KObj(curObj)) + klog.V(2).InfoS("Still need to wait for deployment to be available", "deployment", klog.KObj(curObj)) return manifestNotAvailableYetAction, nil } @@ -505,13 +505,18 @@ func trackStatefulSetAvailability(curObj *unstructured.Unstructured) (applyActio } // a statefulSet is available if all the replicas are available and the currentReplicas is equal to the updatedReplicas // which means there is no more update in progress. + requiredReplicas := int32(1) + if statefulSet.Spec.Replicas != nil { + requiredReplicas = *statefulSet.Spec.Replicas + } if statefulSet.Status.ObservedGeneration == statefulSet.Generation && - statefulSet.Status.AvailableReplicas == *statefulSet.Spec.Replicas && - statefulSet.Status.CurrentReplicas == statefulSet.Status.UpdatedReplicas { - klog.V(2).InfoS("statefulSet is available", "statefulSet", klog.KObj(curObj)) + statefulSet.Status.AvailableReplicas == requiredReplicas && + statefulSet.Status.CurrentReplicas == statefulSet.Status.UpdatedReplicas && + statefulSet.Status.CurrentRevision == statefulSet.Status.UpdateRevision { + klog.V(2).InfoS("StatefulSet is available", "statefulSet", klog.KObj(curObj)) return manifestAvailableAction, nil } - klog.V(2).InfoS("still need to wait for statefulSet to be available", "statefulSet", klog.KObj(curObj)) + klog.V(2).InfoS("Still need to wait for statefulSet to be available", "statefulSet", klog.KObj(curObj)) return manifestNotAvailableYetAction, nil } @@ -525,10 +530,10 @@ func trackDaemonSetAvailability(curObj *unstructured.Unstructured) (applyAction, if daemonSet.Status.ObservedGeneration == daemonSet.Generation && daemonSet.Status.NumberAvailable == daemonSet.Status.DesiredNumberScheduled && daemonSet.Status.CurrentNumberScheduled == daemonSet.Status.UpdatedNumberScheduled { - klog.V(2).InfoS("daemonSet is available", "daemonSet", klog.KObj(curObj)) + klog.V(2).InfoS("DaemonSet is available", "daemonSet", klog.KObj(curObj)) return manifestAvailableAction, nil } - klog.V(2).InfoS("still need to wait for daemonSet to be available", "daemonSet", klog.KObj(curObj)) + klog.V(2).InfoS("Still need to wait for daemonSet to be available", "daemonSet", klog.KObj(curObj)) return manifestNotAvailableYetAction, nil } @@ -538,20 +543,20 @@ func trackJobAvailability(curObj *unstructured.Unstructured) (applyAction, error return errorApplyAction, controller.NewUnexpectedBehaviorError(err) } if job.Status.Succeeded > 0 { - klog.V(2).InfoS("job is available with at least one succeeded pod", "job", klog.KObj(curObj)) + klog.V(2).InfoS("Job is available with at least one succeeded pod", "job", klog.KObj(curObj)) return manifestAvailableAction, nil } if job.Status.Ready != nil { // we consider a job available if there is at least one pod ready if *job.Status.Ready > 0 { - klog.V(2).InfoS("job is available with at least one ready pod", "job", klog.KObj(curObj)) + klog.V(2).InfoS("Job is available with at least one ready pod", "job", klog.KObj(curObj)) return manifestAvailableAction, nil } - klog.V(2).InfoS("still need to wait for job to be available", "job", klog.KObj(curObj)) + klog.V(2).InfoS("Still need to wait for job to be available", "job", klog.KObj(curObj)) return manifestNotAvailableYetAction, nil } // this field only exists in k8s 1.24+ by default, so we can't track the availability of the job without it - klog.V(2).InfoS("job does not have ready status, we can't track its availability", "job", klog.KObj(curObj)) + klog.V(2).InfoS("Job does not have ready status, we can't track its availability", "job", klog.KObj(curObj)) return manifestNotTrackableAction, nil } @@ -568,10 +573,10 @@ func (r *ApplyWorkReconciler) serversideApplyObject(ctx context.Context, gvr sch } manifestObj, err := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()).Apply(ctx, manifestObj.GetName(), manifestObj, options) if err != nil { - klog.ErrorS(err, "failed to apply object", "gvr", gvr, "manifest", manifestRef) + klog.ErrorS(err, "Failed to apply object", "gvr", gvr, "manifest", manifestRef) return nil, errorApplyAction, err } - klog.V(2).InfoS("manifest apply succeeded", "gvr", gvr, "manifest", manifestRef) + klog.V(2).InfoS("Manifest apply succeeded", "gvr", gvr, "manifest", manifestRef) return manifestObj, manifestServerSideAppliedAction, nil } @@ -582,28 +587,28 @@ func (r *ApplyWorkReconciler) patchCurrentResource(ctx context.Context, gvr sche Name: manifestObj.GetName(), Namespace: manifestObj.GetNamespace(), } - klog.V(2).InfoS("manifest is modified", "gvr", gvr, "manifest", manifestRef, + klog.V(2).InfoS("Manifest is modified", "gvr", gvr, "manifest", manifestRef, "new hash", manifestObj.GetAnnotations()[fleetv1beta1.ManifestHashAnnotation], "existing hash", curObj.GetAnnotations()[fleetv1beta1.ManifestHashAnnotation]) // create the three-way merge patch between the current, original and manifest similar to how kubectl apply does patch, err := threeWayMergePatch(curObj, manifestObj) if err != nil { - klog.ErrorS(err, "failed to generate the three way patch", "gvr", gvr, "manifest", manifestRef) + klog.ErrorS(err, "Failed to generate the three way patch", "gvr", gvr, "manifest", manifestRef) return nil, errorApplyAction, err } data, err := patch.Data(manifestObj) if err != nil { - klog.ErrorS(err, "failed to generate the three way patch", "gvr", gvr, "manifest", manifestRef) + klog.ErrorS(err, "Failed to generate the three way patch", "gvr", gvr, "manifest", manifestRef) return nil, errorApplyAction, err } // Use client side apply the patch to the member cluster manifestObj, patchErr := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()). Patch(ctx, manifestObj.GetName(), patch.Type(), data, metav1.PatchOptions{FieldManager: workFieldManagerName}) if patchErr != nil { - klog.ErrorS(patchErr, "failed to patch the manifest", "gvr", gvr, "manifest", manifestRef) + klog.ErrorS(patchErr, "Failed to patch the manifest", "gvr", gvr, "manifest", manifestRef) return nil, errorApplyAction, patchErr } - klog.V(2).InfoS("manifest patch succeeded", "gvr", gvr, "manifest", manifestRef) + klog.V(2).InfoS("Manifest patch succeeded", "gvr", gvr, "manifest", manifestRef) return manifestObj, manifestThreeWayMergePatchAction, nil } @@ -622,13 +627,11 @@ func constructWorkCondition(results []applyResult, work *fleetv1beta1.Work) []er } existingManifestCondition := findManifestConditionByIdentifier(result.identifier, work.Status.ManifestConditions) if existingManifestCondition != nil { - // merge the status of the manifest condition manifestCondition.Conditions = existingManifestCondition.Conditions - for _, condition := range newConditions { - meta.SetStatusCondition(&manifestCondition.Conditions, condition) - } - } else { - manifestCondition.Conditions = newConditions + } + // merge the status of the manifest condition + for _, condition := range newConditions { + meta.SetStatusCondition(&manifestCondition.Conditions, condition) } manifestConditions[index] = manifestCondition } @@ -645,7 +648,7 @@ func constructWorkCondition(results []applyResult, work *fleetv1beta1.Work) []er // Join starts to reconcile func (r *ApplyWorkReconciler) Join(_ context.Context) error { if !r.joined.Load() { - klog.InfoS("mark the apply work reconciler joined") + klog.InfoS("Mark the apply work reconciler joined") } r.joined.Store(true) return nil @@ -655,7 +658,7 @@ func (r *ApplyWorkReconciler) Join(_ context.Context) error { func (r *ApplyWorkReconciler) Leave(ctx context.Context) error { var works fleetv1beta1.WorkList if r.joined.Load() { - klog.InfoS("mark the apply work reconciler left") + klog.InfoS("Mark the apply work reconciler left") } r.joined.Store(false) // list all the work object we created in the member cluster namespace @@ -663,7 +666,7 @@ func (r *ApplyWorkReconciler) Leave(ctx context.Context) error { client.InNamespace(r.workNameSpace), } if err := r.client.List(ctx, &works, listOpts...); err != nil { - klog.ErrorS(err, "failed to list all the work object", "clusterNS", r.workNameSpace) + klog.ErrorS(err, "Failed to list all the work object", "clusterNS", r.workNameSpace) return client.IgnoreNotFound(err) } // we leave the resources on the member cluster for now @@ -672,13 +675,13 @@ func (r *ApplyWorkReconciler) Leave(ctx context.Context) error { if controllerutil.ContainsFinalizer(staleWork, fleetv1beta1.WorkFinalizer) { controllerutil.RemoveFinalizer(staleWork, fleetv1beta1.WorkFinalizer) if updateErr := r.client.Update(ctx, staleWork, &client.UpdateOptions{}); updateErr != nil { - klog.ErrorS(updateErr, "failed to remove the work finalizer from the work", + klog.ErrorS(updateErr, "Failed to remove the work finalizer from the work", "clusterNS", r.workNameSpace, "work", klog.KObj(staleWork)) return updateErr } } } - klog.V(2).InfoS("successfully removed all the work finalizers in the cluster namespace", + klog.V(2).InfoS("Successfully removed all the work finalizers in the cluster namespace", "clusterNS", r.workNameSpace, "number of work", len(works.Items)) return nil } @@ -842,6 +845,7 @@ func buildManifestCondition(err error, action applyAction, observedGeneration in availableCondition.Reason = string(manifestNotTrackableAction) default: + klog.ErrorS(controller.ErrUnexpectedBehavior, "Unknown apply action result", "applyResult", action) } } diff --git a/pkg/controllers/work/apply_controller_helper_test.go b/pkg/controllers/work/apply_controller_helper_test.go index 914276c01..c5a6e67b7 100644 --- a/pkg/controllers/work/apply_controller_helper_test.go +++ b/pkg/controllers/work/apply_controller_helper_test.go @@ -20,6 +20,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils/condition" ) // createWorkWithManifest creates a work given a manifest @@ -95,11 +96,12 @@ func waitForWorkToBeAvailable(workName, workNS string) *fleetv1beta1.Work { return false } applyCond := meta.FindStatusCondition(resultWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) - if applyCond == nil || applyCond.Status != metav1.ConditionTrue || applyCond.ObservedGeneration != resultWork.Generation { + if !condition.IsConditionStatusTrue(applyCond, resultWork.Generation) { return false } for _, manifestCondition := range resultWork.Status.ManifestConditions { - if !meta.IsStatusConditionTrue(manifestCondition.Conditions, fleetv1beta1.WorkConditionTypeAvailable) { + applyCond := meta.FindStatusCondition(manifestCondition.Conditions, fleetv1beta1.WorkConditionTypeAvailable) + if !condition.IsConditionStatusTrue(applyCond, resultWork.Generation) { return false } } diff --git a/pkg/controllers/work/apply_controller_integration_test.go b/pkg/controllers/work/apply_controller_integration_test.go index 43d621bef..8efab38d8 100644 --- a/pkg/controllers/work/apply_controller_integration_test.go +++ b/pkg/controllers/work/apply_controller_integration_test.go @@ -42,7 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" - "go.goms.io/fleet/pkg/utils/condition" + "go.goms.io/fleet/test/utils/controller" ) const timeout = time.Second * 10 @@ -120,7 +120,7 @@ var _ = Describe("Work Controller", func() { Reason: string(manifestNotTrackableAction), }, } - Expect(condition.CompareConditions(expected, resultWork.Status.ManifestConditions[0].Conditions)).Should(BeEmpty()) + Expect(controller.CompareConditions(expected, resultWork.Status.ManifestConditions[0].Conditions)).Should(BeEmpty()) expected = []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, @@ -133,7 +133,7 @@ var _ = Describe("Work Controller", func() { Reason: workNotTrackableReason, }, } - Expect(condition.CompareConditions(expected, resultWork.Status.Conditions)).Should(BeEmpty()) + Expect(controller.CompareConditions(expected, resultWork.Status.Conditions)).Should(BeEmpty()) By("Check applied config map") var configMap corev1.ConfigMap diff --git a/pkg/controllers/work/apply_controller_test.go b/pkg/controllers/work/apply_controller_test.go index a17c549b5..d9d19d1cb 100644 --- a/pkg/controllers/work/apply_controller_test.go +++ b/pkg/controllers/work/apply_controller_test.go @@ -53,7 +53,8 @@ import ( fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils" - "go.goms.io/fleet/pkg/utils/condition" + "go.goms.io/fleet/pkg/utils/controller" + controller2 "go.goms.io/fleet/test/utils/controller" ) var ( @@ -286,14 +287,14 @@ func TestIsManifestManagedByWork(t *testing.T) { func TestBuildManifestCondition(t *testing.T) { tests := map[string]struct { - err error - action applyAction - expected []metav1.Condition + err error + action applyAction + want []metav1.Condition }{ "TestNoErrorManifestCreated": { err: nil, action: manifestCreatedAction, - expected: []metav1.Condition{ + want: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, @@ -309,7 +310,7 @@ func TestBuildManifestCondition(t *testing.T) { "TestNoErrorManifestServerSideApplied": { err: nil, action: manifestServerSideAppliedAction, - expected: []metav1.Condition{ + want: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, @@ -325,7 +326,7 @@ func TestBuildManifestCondition(t *testing.T) { "TestNoErrorManifestThreeWayMergePatch": { err: nil, action: manifestThreeWayMergePatchAction, - expected: []metav1.Condition{ + want: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, @@ -341,7 +342,7 @@ func TestBuildManifestCondition(t *testing.T) { "TestNoErrorManifestNotAvailable": { err: nil, action: manifestNotAvailableYetAction, - expected: []metav1.Condition{ + want: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, @@ -357,7 +358,7 @@ func TestBuildManifestCondition(t *testing.T) { "TestNoErrorManifestNotTrackableAction": { err: nil, action: manifestNotTrackableAction, - expected: []metav1.Condition{ + want: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, @@ -373,7 +374,7 @@ func TestBuildManifestCondition(t *testing.T) { "TestNoErrorManifestAvailableAction": { err: nil, action: manifestAvailableAction, - expected: []metav1.Condition{ + want: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, @@ -389,7 +390,7 @@ func TestBuildManifestCondition(t *testing.T) { "TestWithError": { err: errors.New("test error"), action: errorApplyAction, - expected: []metav1.Condition{ + want: []metav1.Condition{ { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionFalse, @@ -407,7 +408,7 @@ func TestBuildManifestCondition(t *testing.T) { for name, tt := range tests { t.Run(name, func(t *testing.T) { conditions := buildManifestCondition(tt.err, tt.action, 1) - diff := condition.CompareConditions(tt.expected, conditions) + diff := controller2.CompareConditions(tt.want, conditions) assert.Empty(t, diff, "buildManifestCondition() test %v failed, (-want +got):\n%s", name, diff) }) } @@ -772,7 +773,7 @@ func TestGenerateWorkCondition(t *testing.T) { for name, tt := range tests { t.Run(name, func(t *testing.T) { conditions := buildWorkCondition(tt.manifestConditions, 1) - diff := condition.CompareConditions(tt.expected, conditions) + diff := controller2.CompareConditions(tt.expected, conditions) assert.Empty(t, diff, "buildWorkCondition() test %v failed, (-want +got):\n%s", name, diff) }) } @@ -785,6 +786,31 @@ func TestTrackResourceAvailability(t *testing.T) { expected applyAction err error }{ + "Test a mal-formated object": { + gvr: utils.DeploymentGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "generation": 1, + "name": "test-deployment", + }, + "spec": "wrongspec", + "status": map[string]interface{}{ + "observedGeneration": 1, + "conditions": []interface{}{ + map[string]interface{}{ + "type": "Available", + "status": "True", + }, + }, + }, + }, + }, + expected: errorApplyAction, + err: controller.ErrUnexpectedBehavior, + }, "Test Deployment available": { gvr: utils.DeploymentGVR, obj: &unstructured.Unstructured{ @@ -905,6 +931,30 @@ func TestTrackResourceAvailability(t *testing.T) { expected: manifestNotAvailableYetAction, err: nil, }, + "Test StatefulSet observed old generation": { + gvr: utils.StatefulSettGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "StatefulSet", + "metadata": map[string]interface{}{ + "generation": 3, + "name": "test-statefulset", + }, + "spec": map[string]interface{}{ + "replicas": 3, + }, + "status": map[string]interface{}{ + "observedGeneration": 2, + "availableReplicas": 2, + "currentReplicas": 3, + "updatedReplicas": 3, + }, + }, + }, + expected: manifestNotAvailableYetAction, + err: nil, + }, "Test DaemonSet Available": { gvr: utils.DaemonSettGVR, obj: &unstructured.Unstructured{ @@ -947,6 +997,27 @@ func TestTrackResourceAvailability(t *testing.T) { expected: manifestNotAvailableYetAction, err: nil, }, + "Test DaemonSet not observe current generation": { + gvr: utils.DaemonSettGVR, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "DaemonSet", + "metadata": map[string]interface{}{ + "generation": 2, + }, + "status": map[string]interface{}{ + "observedGeneration": 1, + "numberAvailable": 0, + "desiredNumberScheduled": 1, + "currentNumberScheduled": 1, + "updatedNumberScheduled": 1, + }, + }, + }, + expected: manifestNotAvailableYetAction, + err: nil, + }, "Test Job available with succeeded pod": { gvr: utils.JobGVR, obj: &unstructured.Unstructured{ @@ -1020,7 +1091,7 @@ func TestTrackResourceAvailability(t *testing.T) { t.Run(name, func(t *testing.T) { action, err := trackResourceAvailability(tt.gvr, tt.obj) assert.Equal(t, tt.expected, action, "action not matching in test %s", name) - assert.Equal(t, tt.err, err, "applyErr not matching in test %s", name) + assert.Equal(t, errors.Is(err, tt.err), true, "applyErr not matching in test %s", name) }) } } diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index a2bae09d0..3599bc930 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -662,10 +662,9 @@ func (r *Reconciler) SetupWithManager(mgr controllerruntime.Manager) error { oldAvailableStatus := meta.FindStatusCondition(oldWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) newAvailableStatus := meta.FindStatusCondition(newWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) - // we only need to handle the case the applied or available conditions is flipped between true and NOT true between the + // we only need to handle the case the applied or available condition is changed between the // new and old work objects. Otherwise, it won't affect the binding applied condition - if condition.IsConditionStatusTrue(oldAppliedStatus, oldWork.GetGeneration()) == condition.IsConditionStatusTrue(newAppliedStatus, newWork.GetGeneration()) && - condition.IsConditionStatusTrue(oldAvailableStatus, oldWork.GetGeneration()) == condition.IsConditionStatusTrue(newAvailableStatus, newWork.GetGeneration()) { + if condition.EqualCondition(oldAppliedStatus, newAppliedStatus) && condition.EqualCondition(oldAvailableStatus, newAvailableStatus) { klog.V(2).InfoS("The work applied or available condition didn't flip between true and false, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) return } diff --git a/pkg/utils/condition/condition.go b/pkg/utils/condition/condition.go index b3e6acda7..10364cce5 100644 --- a/pkg/utils/condition/condition.go +++ b/pkg/utils/condition/condition.go @@ -7,10 +7,6 @@ Licensed under the MIT license. package condition import ( - "sort" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -105,16 +101,3 @@ func IsConditionStatusTrue(cond *metav1.Condition, latestGeneration int64) bool func IsConditionStatusFalse(cond *metav1.Condition, latestGeneration int64) bool { return cond != nil && cond.Status == metav1.ConditionFalse && cond.ObservedGeneration == latestGeneration } - -// CompareConditions compares two condition slices and returns a string with the differences. -func CompareConditions(wantConditions, gotConditions []metav1.Condition) string { - ignoreOption := cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "ObservedGeneration", "Message") - // we need to sort each condition slice by type before comparing - sort.SliceStable(wantConditions, func(i, j int) bool { - return wantConditions[i].Type < wantConditions[j].Type - }) - sort.SliceStable(gotConditions, func(i, j int) bool { - return gotConditions[i].Type < gotConditions[j].Type - }) - return cmp.Diff(wantConditions, gotConditions, ignoreOption) -} diff --git a/test/e2e/rollout_test.go b/test/e2e/rollout_test.go index 461e21f67..d07a96d04 100644 --- a/test/e2e/rollout_test.go +++ b/test/e2e/rollout_test.go @@ -21,9 +21,9 @@ import ( placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils" - "go.goms.io/fleet/pkg/utils/condition" "go.goms.io/fleet/test/e2e/framework" testutils "go.goms.io/fleet/test/e2e/v1alpha1/utils" + "go.goms.io/fleet/test/utils/controller" ) var ( @@ -122,7 +122,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { ObservedGeneration: 1, }, } - diff := condition.CompareConditions(wantConditions, work.Status.Conditions) + diff := controller.CompareConditions(wantConditions, work.Status.Conditions) if len(diff) != 0 { return diff } @@ -143,7 +143,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { ObservedGeneration: 1, }, } - diff := condition.CompareConditions(wantConditions, work.Status.Conditions) + diff := controller.CompareConditions(wantConditions, work.Status.Conditions) if len(diff) != 0 { return diff } diff --git a/test/utils/controller/controller.go b/test/utils/controller/controller.go index 4540280e4..6e3f2a369 100644 --- a/test/utils/controller/controller.go +++ b/test/utils/controller/controller.go @@ -8,7 +8,12 @@ package controller import ( "context" + "sort" "sync" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // FakeController is a fake controller which only stores one key. @@ -46,3 +51,16 @@ func (f *FakeController) Key() string { defer f.mu.RUnlock() return f.key } + +// CompareConditions compares two condition slices and returns a string with the differences. +func CompareConditions(wantConditions, gotConditions []v1.Condition) string { + ignoreOption := cmpopts.IgnoreFields(v1.Condition{}, "LastTransitionTime", "ObservedGeneration", "Message") + // we need to sort each condition slice by type before comparing + sort.SliceStable(wantConditions, func(i, j int) bool { + return wantConditions[i].Type < wantConditions[j].Type + }) + sort.SliceStable(gotConditions, func(i, j int) bool { + return gotConditions[i].Type < gotConditions[j].Type + }) + return cmp.Diff(wantConditions, gotConditions, ignoreOption) +} From ce4d9aa62c07da038ef76be820e10bbcf3fceb3a Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Thu, 7 Mar 2024 23:56:33 -0800 Subject: [PATCH 3/5] fix the test bug --- pkg/controllers/work/apply_controller_helper_test.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/pkg/controllers/work/apply_controller_helper_test.go b/pkg/controllers/work/apply_controller_helper_test.go index c5a6e67b7..ae917c165 100644 --- a/pkg/controllers/work/apply_controller_helper_test.go +++ b/pkg/controllers/work/apply_controller_helper_test.go @@ -7,6 +7,7 @@ package work import ( "context" + "fmt" "github.com/google/go-cmp/cmp" . "github.com/onsi/ginkgo/v2" @@ -75,10 +76,12 @@ func waitForWorkToApply(workName, workNS string) *fleetv1beta1.Work { } applyCond := meta.FindStatusCondition(resultWork.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied) if applyCond == nil || applyCond.Status != metav1.ConditionTrue || applyCond.ObservedGeneration != resultWork.Generation { + By(fmt.Sprintf("applyCond not true: %v", applyCond)) return false } for _, manifestCondition := range resultWork.Status.ManifestConditions { if !meta.IsStatusConditionTrue(manifestCondition.Conditions, fleetv1beta1.WorkConditionTypeApplied) { + By(fmt.Sprintf("manifest applyCond not true %v : %v", manifestCondition.Identifier, manifestCondition.Conditions)) return false } } @@ -95,13 +98,14 @@ func waitForWorkToBeAvailable(workName, workNS string) *fleetv1beta1.Work { if err != nil { return false } - applyCond := meta.FindStatusCondition(resultWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) - if !condition.IsConditionStatusTrue(applyCond, resultWork.Generation) { + availCond := meta.FindStatusCondition(resultWork.Status.Conditions, fleetv1beta1.WorkConditionTypeAvailable) + if !condition.IsConditionStatusTrue(availCond, resultWork.Generation) { + By(fmt.Sprintf("availCond not true: %v", availCond)) return false } for _, manifestCondition := range resultWork.Status.ManifestConditions { - applyCond := meta.FindStatusCondition(manifestCondition.Conditions, fleetv1beta1.WorkConditionTypeAvailable) - if !condition.IsConditionStatusTrue(applyCond, resultWork.Generation) { + if !meta.IsStatusConditionTrue(manifestCondition.Conditions, fleetv1beta1.WorkConditionTypeAvailable) { + By(fmt.Sprintf("manifest availCond not true %v : %v", manifestCondition.Identifier, manifestCondition.Conditions)) return false } } From 8011c10c8a8ac7517631dffc137f04698c6d2995 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Tue, 12 Mar 2024 18:43:44 -0700 Subject: [PATCH 4/5] address comment --- pkg/controllers/work/apply_controller.go | 37 ++++++++++++++----- .../work/apply_controller_integration_test.go | 2 +- pkg/controllers/work/apply_controller_test.go | 12 +++--- test/e2e/rollout_test.go | 2 +- 4 files changed, 36 insertions(+), 17 deletions(-) diff --git a/pkg/controllers/work/apply_controller.go b/pkg/controllers/work/apply_controller.go index 1b079e680..710df2bb6 100644 --- a/pkg/controllers/work/apply_controller.go +++ b/pkg/controllers/work/apply_controller.go @@ -64,7 +64,7 @@ const ( // WorkCondition condition reasons const ( workAppliedFailedReason = "WorkAppliedFailed" - workAppliedCompleteReason = "WorkAppliedComplete" + workAppliedCompletedReason = "WorkAppliedCompleted" workNotAvailableYetReason = "WorkNotAvailableYet" workAvailabilityUnknownReason = "WorkAvailabilityUnknown" workAvailableReason = "WorkAvailable" @@ -72,9 +72,11 @@ const ( // ManifestApplyFailedReason is the reason string of condition when it failed to apply manifest. ManifestApplyFailedReason = "ManifestApplyFailed" // ManifestAlreadyUpToDateReason is the reason string of condition when the manifest is already up to date. - ManifestAlreadyUpToDateReason = "ManifestAlreadyUpToDate" + ManifestAlreadyUpToDateReason = "ManifestAlreadyUpToDate" + manifestAlreadyUpToDateMessage = "Manifest is already up to date" // ManifestNeedsUpdateReason is the reason string of condition when the manifest needs to be updated. - ManifestNeedsUpdateReason = "ManifestNeedsUpdate" + ManifestNeedsUpdateReason = "ManifestNeedsUpdate" + manifestNeedsUpdateMessage = "Manifest has just been updated and in the processing of checking its availability" ) // ApplyWorkReconciler reconciles a Work object @@ -613,6 +615,7 @@ func (r *ApplyWorkReconciler) patchCurrentResource(ctx context.Context, gvr sche } // constructWorkCondition constructs the work condition based on the apply result +// TODO: special handle no results func constructWorkCondition(results []applyResult, work *fleetv1beta1.Work) []error { var errs []error // Update manifestCondition based on the results. @@ -809,6 +812,7 @@ func buildManifestCondition(err error, action applyAction, observedGeneration in applyCondition.Message = fmt.Sprintf("Failed to apply manifest: %v", err) availableCondition.Status = metav1.ConditionUnknown availableCondition.Reason = ManifestApplyFailedReason + availableCondition.Message = "Manifest is not applied yet" } else { applyCondition.Status = metav1.ConditionTrue // the first three actions types means we did write to the cluster thus the availability is unknown @@ -816,33 +820,46 @@ func buildManifestCondition(err error, action applyAction, observedGeneration in switch action { case manifestCreatedAction: applyCondition.Reason = string(manifestCreatedAction) + applyCondition.Message = "Manifest is created successfully" availableCondition.Status = metav1.ConditionUnknown availableCondition.Reason = ManifestNeedsUpdateReason + availableCondition.Message = manifestNeedsUpdateMessage case manifestThreeWayMergePatchAction: applyCondition.Reason = string(manifestThreeWayMergePatchAction) + applyCondition.Message = "Manifest is patched successfully" availableCondition.Status = metav1.ConditionUnknown availableCondition.Reason = ManifestNeedsUpdateReason + availableCondition.Message = manifestNeedsUpdateMessage case manifestServerSideAppliedAction: applyCondition.Reason = string(manifestServerSideAppliedAction) + applyCondition.Message = "Manifest is patched successfully" availableCondition.Status = metav1.ConditionUnknown availableCondition.Reason = ManifestNeedsUpdateReason + availableCondition.Message = manifestNeedsUpdateMessage case manifestAvailableAction: applyCondition.Reason = ManifestAlreadyUpToDateReason + applyCondition.Message = manifestAlreadyUpToDateMessage availableCondition.Status = metav1.ConditionTrue availableCondition.Reason = string(manifestAvailableAction) + availableCondition.Message = "Manifest is trackable and available now" case manifestNotAvailableYetAction: applyCondition.Reason = ManifestAlreadyUpToDateReason + applyCondition.Message = manifestAlreadyUpToDateMessage availableCondition.Status = metav1.ConditionFalse availableCondition.Reason = string(manifestNotAvailableYetAction) + availableCondition.Message = "Manifest is trackable but not available yet" + // we cannot stuck at unknown so we have to mark it as true case manifestNotTrackableAction: applyCondition.Reason = ManifestAlreadyUpToDateReason + applyCondition.Message = manifestAlreadyUpToDateMessage availableCondition.Status = metav1.ConditionTrue availableCondition.Reason = string(manifestNotTrackableAction) + availableCondition.Message = "Manifest is not trackable" default: klog.ErrorS(controller.ErrUnexpectedBehavior, "Unknown apply action result", "applyResult", action) @@ -852,11 +869,11 @@ func buildManifestCondition(err error, action applyAction, observedGeneration in return []metav1.Condition{applyCondition, availableCondition} } -// buildWorkCondition generate applied and available status condition for work. -// If one of the manifests is applied failed on the spoke, the applied status condition of the work is false. -// If one of the manifests is not available yet on the spoke, the available status condition of the work is false. +// buildWorkCondition generate overall applied and available status condition for work. +// If one of the manifests is applied failed on the member cluster, the applied status condition of the work is false. +// If one of the manifests is not available yet on the member cluster, the available status condition of the work is false. // If all the manifests are available, the available status condition of the work is true. -// Otherwise, the available status condition of the work is unknown as we can't track some of them. +// Otherwise, the available status condition of the work is unknown. func buildWorkCondition(manifestConditions []fleetv1beta1.ManifestCondition, observedGeneration int64) []metav1.Condition { applyCondition := metav1.Condition{ Type: fleetv1beta1.WorkConditionTypeApplied, @@ -881,8 +898,8 @@ func buildWorkCondition(manifestConditions []fleetv1beta1.ManifestCondition, obs } } applyCondition.Status = metav1.ConditionTrue - applyCondition.Reason = workAppliedCompleteReason - applyCondition.Message = "Apply work complete" + applyCondition.Reason = workAppliedCompletedReason + applyCondition.Message = "Work is applied successfully" // we mark the entire work available condition to unknown if one of the manifests is not known yet for _, manifestCond := range manifestConditions { cond := meta.FindStatusCondition(manifestCond.Conditions, fleetv1beta1.WorkConditionTypeAvailable) @@ -915,8 +932,10 @@ func buildWorkCondition(manifestConditions []fleetv1beta1.ManifestCondition, obs availableCondition.Status = metav1.ConditionTrue if trackable { availableCondition.Reason = workAvailableReason + availableCondition.Message = "Work is available now" } else { availableCondition.Reason = workNotTrackableReason + availableCondition.Message = "Work's availability is not trackable" } return []metav1.Condition{applyCondition, availableCondition} } diff --git a/pkg/controllers/work/apply_controller_integration_test.go b/pkg/controllers/work/apply_controller_integration_test.go index 8efab38d8..0510f5c69 100644 --- a/pkg/controllers/work/apply_controller_integration_test.go +++ b/pkg/controllers/work/apply_controller_integration_test.go @@ -125,7 +125,7 @@ var _ = Describe("Work Controller", func() { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: workAppliedCompleteReason, + Reason: workAppliedCompletedReason, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, diff --git a/pkg/controllers/work/apply_controller_test.go b/pkg/controllers/work/apply_controller_test.go index d9d19d1cb..f0e1b225c 100644 --- a/pkg/controllers/work/apply_controller_test.go +++ b/pkg/controllers/work/apply_controller_test.go @@ -520,7 +520,7 @@ func TestGenerateWorkCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: workAppliedCompleteReason, + Reason: workAppliedCompletedReason, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -552,7 +552,7 @@ func TestGenerateWorkCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: workAppliedCompleteReason, + Reason: workAppliedCompletedReason, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -600,7 +600,7 @@ func TestGenerateWorkCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: workAppliedCompleteReason, + Reason: workAppliedCompletedReason, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -664,7 +664,7 @@ func TestGenerateWorkCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: workAppliedCompleteReason, + Reason: workAppliedCompletedReason, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -712,7 +712,7 @@ func TestGenerateWorkCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: workAppliedCompleteReason, + Reason: workAppliedCompletedReason, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, @@ -760,7 +760,7 @@ func TestGenerateWorkCondition(t *testing.T) { { Type: fleetv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: workAppliedCompleteReason, + Reason: workAppliedCompletedReason, }, { Type: fleetv1beta1.WorkConditionTypeAvailable, diff --git a/test/e2e/rollout_test.go b/test/e2e/rollout_test.go index d07a96d04..cafad67c6 100644 --- a/test/e2e/rollout_test.go +++ b/test/e2e/rollout_test.go @@ -133,7 +133,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: "WorkAppliedComplete", + Reason: "WorkAppliedCompleted", ObservedGeneration: 1, }, { From 417da610b81f1d7341232d839fb66aa9cfc94e5d Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Tue, 12 Mar 2024 21:34:36 -0700 Subject: [PATCH 5/5] fix e2e --- test/e2e/rollout_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/rollout_test.go b/test/e2e/rollout_test.go index cafad67c6..06342d613 100644 --- a/test/e2e/rollout_test.go +++ b/test/e2e/rollout_test.go @@ -112,7 +112,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { { Type: placementv1beta1.WorkConditionTypeApplied, Status: metav1.ConditionTrue, - Reason: "WorkAppliedComplete", + Reason: "WorkAppliedCompleted", ObservedGeneration: 1, }, {