Skip to content

Commit

Permalink
Revert "Restart services when their NodeSelector changes"
Browse files Browse the repository at this point in the history
This reverts commit df33f3e.

PR openstack-k8s-operators#353 was thought to be the solution to a problem observed while
testing updates to how we manage cinder's LVM backend. This
pertained to the use of a nodeSelector label that pins the c-vol
pod to the k8s node where the LVM backend data is stored.
Unfortunately, two subsequent issues were uncovered.

First, the nodeSelector already affects a service's statefulset,
which in turn causes pods to be restarted automatically, There's
no need to add the nodeSelector to the collection of hashes we use
to restart pods when their configuration changes.

Second, the actual problem scenario we hoped to solve was getting
a pod to be recreated when it got stuck in the Pending state due
to a bad nodeSelector value. Currently we use an "Ordered" pod
management policy, which means a Pending pod will need to be
manually deleted. Hashing the nodeSelector has no impact on pods
stuck in the Pending state.
  • Loading branch information
ASBishop committed Mar 8, 2024
1 parent 7d067f6 commit 1bc27a3
Show file tree
Hide file tree
Showing 5 changed files with 0 additions and 67 deletions.
14 changes: 0 additions & 14 deletions controllers/cinderapi_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -706,20 +706,6 @@ func (r *CinderAPIReconciler) reconcileNormal(ctx context.Context, instance *cin
// all cert input checks out so report InputReady
instance.Status.Conditions.MarkTrue(condition.TLSInputReadyCondition, condition.InputReadyMessage)

//
// Hash the nodeSelector so the pod is recreated when it changes
//
err = cinder.AddNodeSelectorHash(instance.Spec.NodeSelector, &configVars)
if err != nil {
instance.Status.Conditions.Set(condition.FalseCondition(
condition.ServiceConfigReadyCondition,
condition.ErrorReason,
condition.SeverityWarning,
condition.ServiceConfigReadyErrorMessage,
err.Error()))
return ctrl.Result{}, err
}

//
// Create secrets required as input for the Service and calculate an overall hash of hashes
//
Expand Down
14 changes: 0 additions & 14 deletions controllers/cinderbackup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -410,20 +410,6 @@ func (r *CinderBackupReconciler) reconcileNormal(ctx context.Context, instance *
// all cert input checks out so report InputReady
instance.Status.Conditions.MarkTrue(condition.TLSInputReadyCondition, condition.InputReadyMessage)

//
// Hash the nodeSelector so the pod is recreated when it changes
//
err = cinder.AddNodeSelectorHash(instance.Spec.NodeSelector, &configVars)
if err != nil {
instance.Status.Conditions.Set(condition.FalseCondition(
condition.ServiceConfigReadyCondition,
condition.ErrorReason,
condition.SeverityWarning,
condition.ServiceConfigReadyErrorMessage,
err.Error()))
return ctrl.Result{}, err
}

//
// Create secrets required as input for the Service and calculate an overall hash of hashes
//
Expand Down
14 changes: 0 additions & 14 deletions controllers/cinderscheduler_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -409,20 +409,6 @@ func (r *CinderSchedulerReconciler) reconcileNormal(ctx context.Context, instanc
// all cert input checks out so report InputReady
instance.Status.Conditions.MarkTrue(condition.TLSInputReadyCondition, condition.InputReadyMessage)

//
// Hash the nodeSelector so the pod is recreated when it changes
//
err = cinder.AddNodeSelectorHash(instance.Spec.NodeSelector, &configVars)
if err != nil {
instance.Status.Conditions.Set(condition.FalseCondition(
condition.ServiceConfigReadyCondition,
condition.ErrorReason,
condition.SeverityWarning,
condition.ServiceConfigReadyErrorMessage,
err.Error()))
return ctrl.Result{}, err
}

//
// Create ConfigMaps required as input for the Service and calculate an overall hash of hashes
//
Expand Down
14 changes: 0 additions & 14 deletions controllers/cindervolume_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -434,20 +434,6 @@ func (r *CinderVolumeReconciler) reconcileNormal(ctx context.Context, instance *
return ctrl.Result{}, err
}

//
// Hash the nodeSelector so the pod is recreated when it changes
//
err = cinder.AddNodeSelectorHash(instance.Spec.NodeSelector, &configVars)
if err != nil {
instance.Status.Conditions.Set(condition.FalseCondition(
condition.ServiceConfigReadyCondition,
condition.ErrorReason,
condition.SeverityWarning,
condition.ServiceConfigReadyErrorMessage,
err.Error()))
return ctrl.Result{}, err
}

//
// create hash over all the different input resources to identify if any those changed
// and a restart/recreate is required.
Expand Down
11 changes: 0 additions & 11 deletions pkg/cinder/funcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@ package cinder
import (
common "github.com/openstack-k8s-operators/lib-common/modules/common"
"github.com/openstack-k8s-operators/lib-common/modules/common/affinity"
"github.com/openstack-k8s-operators/lib-common/modules/common/env"
"github.com/openstack-k8s-operators/lib-common/modules/common/util"

corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -49,12 +47,3 @@ func GetPodAffinity(componentName string) *corev1.Affinity {
corev1.LabelHostname,
)
}

// AddNodeSelectorHash - Adds a hash of a nodeSelector map to the envVars.
func AddNodeSelectorHash(nodeSelector map[string]string, envVars *map[string]env.Setter) error {
hash, err := util.ObjectHash(nodeSelector)
if err != nil {
(*envVars)["NodeSelectorHash"] = env.SetValue(hash)
}
return err
}

0 comments on commit 1bc27a3

Please sign in to comment.