Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

AUTH-543: Add optional operand deletion condition #1902

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 25 additions & 17 deletions pkg/operator/apiserver/controller/workload/workload.go

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would it be beneficial to add some unit tests for the new deletion behavior?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The unit tests would end up using mocks for most of the stuff that the deletion does; but on second thought it might be beneficial to test the operator status, so I'll add some 👍

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be good to import and use this change in one of the consumers (e.g. cluster-authentication-operator) and show that the CI is passing there.

Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ const (
// Delegate captures a set of methods that hold a custom logic
type Delegate interface {
// Sync a method that will be used for delegation. It should bring the desired workload into operation.
Sync(ctx context.Context, controllerContext factory.SyncContext) (*appsv1.Deployment, bool, []error)
Sync(ctx context.Context, controllerContext factory.SyncContext) (*appsv1.Deployment, bool, bool, []error)

// PreconditionFulfilled a method that indicates whether all prerequisites are met and we can Sync.
//
Expand Down Expand Up @@ -83,7 +83,7 @@ func NewController(instanceName, operatorNamespace, targetNamespace, targetOpera
kubeClient kubernetes.Interface,
podLister corev1listers.PodLister,
informers []factory.Informer,
tagetNamespaceInformers []factory.Informer,
targetNamespaceInformers []factory.Informer,
delegate Delegate,
openshiftClusterConfigClient openshiftconfigclientv1.ClusterOperatorInterface,
eventRecorder events.Recorder,
Expand All @@ -102,11 +102,11 @@ func NewController(instanceName, operatorNamespace, targetNamespace, targetOpera
delegate: delegate,
openshiftClusterConfigClient: openshiftClusterConfigClient,
versionRecorder: versionRecorder,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), instanceName),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[any](), instanceName),
}

c := factory.New()
for _, nsi := range tagetNamespaceInformers {
for _, nsi := range targetNamespaceInformers {
c.WithNamespaceInformer(nsi, targetNamespace)
}

Expand All @@ -130,14 +130,14 @@ func (c *Controller) sync(ctx context.Context, controllerContext factory.SyncCon
}

if fulfilled, err := c.delegate.PreconditionFulfilled(ctx); err != nil {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

wouldn't it be better to let the delegate communicate everything as we do with the preconditions?

return c.updateOperatorStatus(ctx, operatorStatus, nil, false, false, []error{err})
return c.updateOperatorStatus(ctx, operatorStatus, nil, false, false, false, []error{err})
} else if !fulfilled {
return c.updateOperatorStatus(ctx, operatorStatus, nil, false, false, nil)
return c.updateOperatorStatus(ctx, operatorStatus, nil, false, false, false, nil)
}

workload, operatorConfigAtHighestGeneration, errs := c.delegate.Sync(ctx, controllerContext)
workload, operatorConfigAtHighestGeneration, removeConditions, errs := c.delegate.Sync(ctx, controllerContext)

return c.updateOperatorStatus(ctx, operatorStatus, workload, operatorConfigAtHighestGeneration, true, errs)
return c.updateOperatorStatus(ctx, operatorStatus, workload, operatorConfigAtHighestGeneration, true, removeConditions, errs)
}

// shouldSync checks ManagementState to determine if we can run this operator, probably set by a cluster administrator.
Expand All @@ -159,22 +159,30 @@ func (c *Controller) shouldSync(ctx context.Context, operatorSpec *operatorv1.Op
}

// updateOperatorStatus updates the status based on the actual workload and errors that might have occurred during synchronization.
func (c *Controller) updateOperatorStatus(ctx context.Context, previousStatus *operatorv1.OperatorStatus, workload *appsv1.Deployment, operatorConfigAtHighestGeneration bool, preconditionsReady bool, errs []error) (err error) {
func (c *Controller) updateOperatorStatus(ctx context.Context, previousStatus *operatorv1.OperatorStatus, workload *appsv1.Deployment, operatorConfigAtHighestGeneration, preconditionsReady, removeConditions bool, errs []error) (err error) {
if errs == nil {
errs = []error{}
}

deploymentAvailableCondition := applyoperatorv1.OperatorCondition().
WithType(fmt.Sprintf("%sDeployment%s", c.conditionsPrefix, operatorv1.OperatorStatusTypeAvailable))
typeAvailable := fmt.Sprintf("%sDeployment%s", c.conditionsPrefix, operatorv1.OperatorStatusTypeAvailable)
typeDegraded := fmt.Sprintf("%sDeployment%s", c.conditionsPrefix, operatorv1.OperatorStatusTypeDegraded)
typeProgressing := fmt.Sprintf("%sDeployment%s", c.conditionsPrefix, operatorv1.OperatorStatusTypeProgressing)
typeWorkloadDegraded := fmt.Sprintf("%sWorkload%s", c.conditionsPrefix, operatorv1.OperatorStatusTypeDegraded)

workloadDegradedCondition := applyoperatorv1.OperatorCondition().
WithType(fmt.Sprintf("%sWorkloadDegraded", c.conditionsPrefix))
deploymentAvailableCondition := applyoperatorv1.OperatorCondition().WithType(typeAvailable)
workloadDegradedCondition := applyoperatorv1.OperatorCondition().WithType(typeWorkloadDegraded)
deploymentDegradedCondition := applyoperatorv1.OperatorCondition().WithType(typeDegraded)
deploymentProgressingCondition := applyoperatorv1.OperatorCondition().WithType(typeProgressing)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It might make more sense to still consider preconditions even when the workload will be deleted later. Thoughts?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Given that the delete would happen during the delegate's sync, we shouldn't normally reach the point of removing the conditions if preconditions are failing. But you are right, we should safe-guard against this -- I'll add a check before removing conditions.

deploymentDegradedCondition := applyoperatorv1.OperatorCondition().
WithType(fmt.Sprintf("%sDeploymentDegraded", c.conditionsPrefix))
if preconditionsReady && removeConditions && workload == nil {
jsonPatch := v1helpers.RemoveConditionsJSONPatch(previousStatus, []string{typeAvailable, typeDegraded, typeProgressing, typeWorkloadDegraded})
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems we cannot use SSA for removing conditions, but I am not sure if patch is better than v1helpers.UpdateStatus here. Would be good to add an additional opinion on this.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The advantage of the patch here in my opinion is that we're only adding to the patch the specific conditions that we want to remove, so it's more concise -- we won't have to manage the whole status object to perform the update, so maybe it's less prone to mistakes.

What would you think @bertinatto?

if jsonPatch.IsEmpty() {
return kerrors.NewAggregate(errs)
}

deploymentProgressingCondition := applyoperatorv1.OperatorCondition().
WithType(fmt.Sprintf("%sDeployment%s", c.conditionsPrefix, operatorv1.OperatorStatusTypeProgressing))
err = c.operatorClient.PatchOperatorStatus(ctx, jsonPatch)
return kerrors.NewAggregate(append(errs, err))
}

status := applyoperatorv1.OperatorStatus()
if workload != nil {
Expand Down
83 changes: 76 additions & 7 deletions pkg/operator/apiserver/controller/workload/workload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,17 @@ package workload
import (
"context"
"fmt"
clocktesting "k8s.io/utils/clock/testing"
"testing"
"time"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
corev1listers "k8s.io/client-go/listers/core/v1"
clocktesting "k8s.io/utils/clock/testing"
"k8s.io/utils/ptr"

operatorv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/library-go/pkg/apiserver/jsonpatch"
"github.com/openshift/library-go/pkg/controller/factory"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/v1helpers"
Expand All @@ -36,15 +37,16 @@ type testDelegate struct {
// for Sync
syncWorkload *appsv1.Deployment
syncIsAtHighestRevision bool
operandWorkloadDeleted bool
syncErrrors []error
}

func (d *testDelegate) PreconditionFulfilled(_ context.Context) (bool, error) {
return d.preconditionReady, d.preconditionErr
}

func (d *testDelegate) Sync(_ context.Context, _ factory.SyncContext) (*appsv1.Deployment, bool, []error) {
return d.syncWorkload, d.syncIsAtHighestRevision, d.syncErrrors
func (d *testDelegate) Sync(_ context.Context, _ factory.SyncContext) (*appsv1.Deployment, bool, bool, []error) {
return d.syncWorkload, d.syncIsAtHighestRevision, d.operandWorkloadDeleted, d.syncErrrors
}

func TestUpdateOperatorStatus(t *testing.T) {
Expand All @@ -55,11 +57,13 @@ func TestUpdateOperatorStatus(t *testing.T) {
pods []*corev1.Pod
operatorConfigAtHighestRevision bool
operatorPreconditionsNotReady bool
operatorOperandWorkloadDeleted bool
preconditionError error
errors []error
previousConditions []operatorv1.OperatorCondition

validateOperatorStatus func(*operatorv1.OperatorStatus) error
validateOperatorStatus func(*operatorv1.OperatorStatus) error
validateOperatorStatusJSONPatch func(*jsonpatch.PatchSet) error
}{
{
name: "scenario: no workload, no errors thus we are degraded and we are progressing",
Expand Down Expand Up @@ -672,6 +676,44 @@ func TestUpdateOperatorStatus(t *testing.T) {
return areCondidtionsEqual(expectedConditions, actualStatus.Conditions)
},
},
{
name: "the delegate controller deletes its operand workload and we remove the conditions",
workload: nil,
operatorOperandWorkloadDeleted: true,
previousConditions: []operatorv1.OperatorCondition{
{
Type: fmt.Sprintf("%sDeployment%s", defaultControllerName, operatorv1.OperatorStatusTypeAvailable),
Status: operatorv1.ConditionTrue,
Reason: "AsExpected",
Message: "",
},
{
Type: fmt.Sprintf("%sWorkloadDegraded", defaultControllerName),
Status: operatorv1.ConditionFalse,
},
{
Type: fmt.Sprintf("%sDeploymentDegraded", defaultControllerName),
Status: operatorv1.ConditionFalse,
Reason: "AsExpected",
Message: "",
},
{
Type: fmt.Sprintf("%sDeployment%s", defaultControllerName, operatorv1.OperatorStatusTypeProgressing),
Status: operatorv1.ConditionFalse,
Reason: "AsExpected",
Message: "",
},
},
validateOperatorStatusJSONPatch: func(actualJSONPatch *jsonpatch.PatchSet) error {
path := "/status/conditions/0"
expectedJSONPatch := jsonpatch.New().
WithRemove(path, jsonpatch.NewTestCondition(path+"/type", "DeploymentAvailable")).
WithRemove(path, jsonpatch.NewTestCondition(path+"/type", "WorkloadDegraded")).
WithRemove(path, jsonpatch.NewTestCondition(path+"/type", "DeploymentDegraded")).
WithRemove(path, jsonpatch.NewTestCondition(path+"/type", "DeploymentProgressing"))
return validateJSONPatch(expectedJSONPatch, actualJSONPatch)
},
},
}

for _, scenario := range scenarios {
Expand All @@ -697,6 +739,7 @@ func TestUpdateOperatorStatus(t *testing.T) {

syncWorkload: scenario.workload,
syncIsAtHighestRevision: scenario.operatorConfigAtHighestRevision,
operandWorkloadDeleted: scenario.operatorOperandWorkloadDeleted,
syncErrrors: scenario.errors,
}

Expand All @@ -718,9 +761,17 @@ func TestUpdateOperatorStatus(t *testing.T) {
if err != nil {
t.Fatal(err)
}
err = scenario.validateOperatorStatus(actualOperatorStatus)
if err != nil {
t.Fatal(err)

if scenario.validateOperatorStatus != nil {
if err := scenario.validateOperatorStatus(actualOperatorStatus); err != nil {
t.Fatal(err)
}
}

if scenario.validateOperatorStatusJSONPatch != nil {
if err := scenario.validateOperatorStatusJSONPatch(fakeOperatorClient.GetPatchedOperatorStatus()); err != nil {
t.Fatal(err)
}
}
})
}
Expand Down Expand Up @@ -771,3 +822,21 @@ func areCondidtionsEqual(expectedConditions []operatorv1.OperatorCondition, actu
}
return nil
}

func validateJSONPatch(expected, actual *jsonpatch.PatchSet) error {
expectedSerializedPatch, err := expected.Marshal()
if err != nil {
return err
}
actualSerializedPatch := []byte("null")
if actual != nil {
actualSerializedPatch, err = actual.Marshal()
if err != nil {
return err
}
}
if string(expectedSerializedPatch) != string(actualSerializedPatch) {
return fmt.Errorf("incorrect JSONPatch, expected = %s, got = %s", string(expectedSerializedPatch), string(actualSerializedPatch))
}
return nil
}
22 changes: 1 addition & 21 deletions pkg/operator/staleconditions/remove_stale_conditions.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,8 @@ package staleconditions

import (
"context"
"fmt"
"time"

"github.com/openshift/library-go/pkg/apiserver/jsonpatch"
"github.com/openshift/library-go/pkg/controller/factory"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/v1helpers"
Expand Down Expand Up @@ -45,25 +43,7 @@ func (c RemoveStaleConditionsController) sync(ctx context.Context, syncContext f
return err
}

var removedCount int
jsonPatch := jsonpatch.New()
for i, existingCondition := range operatorStatus.Conditions {
for _, conditionTypeToRemove := range c.conditionTypesToRemove {
if existingCondition.Type != conditionTypeToRemove {
continue
}
removeAtIndex := i
if !jsonPatch.IsEmpty() {
removeAtIndex = removeAtIndex - removedCount
}
jsonPatch.WithRemove(
fmt.Sprintf("/status/conditions/%d", removeAtIndex),
jsonpatch.NewTestCondition(fmt.Sprintf("/status/conditions/%d/type", removeAtIndex), conditionTypeToRemove),
)
removedCount++
}
}

jsonPatch := v1helpers.RemoveConditionsJSONPatch(operatorStatus, c.conditionTypesToRemove)
if jsonPatch.IsEmpty() {
return nil
}
Expand Down
80 changes: 80 additions & 0 deletions pkg/operator/v1helpers/helper_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,3 +287,83 @@ func TestRemoveCondition(t *testing.T) {
})
}
}

func TestRemoveConditionsJSONPatch(t *testing.T) {
operatorStatus := &operatorsv1.OperatorStatus{
Conditions: []operatorsv1.OperatorCondition{
newOperatorCondition("one", "True", "my-reason", "my-message", nil),
newOperatorCondition("two", "True", "my-reason", "my-message", nil),
newOperatorCondition("three", "True", "my-reason", "my-message", nil),
},
}

t.Run("nil status", func(tt *testing.T) {
jsonPatch := RemoveConditionsJSONPatch(nil, []string{"four", "five"})
if jsonPatch != nil {
raw, err := jsonPatch.Marshal()
if err != nil {
t.Fatalf("could not marshal json patch: %v", err)
}
t.Errorf("expected nil patch, got: %s", string(raw))
}
})

t.Run("no conditions", func(tt *testing.T) {
jsonPatch := RemoveConditionsJSONPatch(&operatorsv1.OperatorStatus{}, []string{"four", "five"})
if !jsonPatch.IsEmpty() {
raw, err := jsonPatch.Marshal()
if err != nil {
t.Fatalf("could not marshal json patch: %v", err)
}
t.Errorf("expected empty patch; got: %s", string(raw))
}
})

t.Run("nothing to remove", func(tt *testing.T) {
jsonPatch := RemoveConditionsJSONPatch(operatorStatus, []string{})
if !jsonPatch.IsEmpty() {
raw, err := jsonPatch.Marshal()
if err != nil {
t.Fatalf("could not marshal json patch: %v", err)
}
t.Errorf("expected empty patch; got: %s", string(raw))
}
})

t.Run("no conditions found to remove", func(tt *testing.T) {
jsonPatch := RemoveConditionsJSONPatch(operatorStatus, []string{"four", "five"})
if !jsonPatch.IsEmpty() {
raw, err := jsonPatch.Marshal()
if err != nil {
t.Fatalf("could not marshal json patch: %v", err)
}
t.Errorf("expected empty patch; got: %s", string(raw))
}
})

t.Run("remove one", func(tt *testing.T) {
jsonPatch := RemoveConditionsJSONPatch(operatorStatus, []string{"two", "four"})
raw, err := jsonPatch.Marshal()
if err != nil {
t.Fatalf("could not marshal json patch: %v", err)
}

expectedJSONPatch := `[{"op":"test","path":"/status/conditions/1/type","value":"two"},{"op":"remove","path":"/status/conditions/1"}]`
if expectedJSONPatch != string(raw) {
t.Errorf("unexpected json patch: %s", diff.ObjectDiff(expectedJSONPatch, string(raw)))
}
})

t.Run("remove all", func(tt *testing.T) {
jsonPatch := RemoveConditionsJSONPatch(operatorStatus, []string{"one", "two", "three", "four"})
raw, err := jsonPatch.Marshal()
if err != nil {
t.Fatalf("could not marshal json patch: %v", err)
}

expectedJSONPatch := `[{"op":"test","path":"/status/conditions/0/type","value":"one"},{"op":"remove","path":"/status/conditions/0"},{"op":"test","path":"/status/conditions/0/type","value":"two"},{"op":"remove","path":"/status/conditions/0"},{"op":"test","path":"/status/conditions/0/type","value":"three"},{"op":"remove","path":"/status/conditions/0"}]`
if expectedJSONPatch != string(raw) {
t.Errorf("unexpected json patch: %s", diff.ObjectDiff(expectedJSONPatch, string(raw)))
}
})
}
Loading