From 63da8cad58cd8d807000878f17e72580b0ede3a0 Mon Sep 17 00:00:00 2001 From: Matthew Cary Date: Wed, 3 Mar 2021 12:55:48 -0800 Subject: [PATCH 1/5] Add StatefulSetAutoDeletePVC feature gate Signed-off-by: veophi --- pkg/features/kruise_features.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/features/kruise_features.go b/pkg/features/kruise_features.go index 2c2437fde1..70c1695973 100644 --- a/pkg/features/kruise_features.go +++ b/pkg/features/kruise_features.go @@ -79,6 +79,10 @@ const ( // InPlaceUpdateEnvFromMetadata enables Kruise to in-place update a container in Pod // when its env from labels/annotations changed and pod is in-place updating. InPlaceUpdateEnvFromMetadata featuregate.Feature = "InPlaceUpdateEnvFromMetadata" + + // Enables policies controlling deletion of PVCs created by a StatefulSet. + StatefulSetAutoDeletePVC featuregate.Feature = "StatefulSetAutoDeletePVC" + ) var defaultFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ @@ -96,6 +100,7 @@ var defaultFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ PodUnavailableBudgetUpdateGate: {Default: false, PreRelease: featuregate.Alpha}, TemplateNoDefaults: {Default: false, PreRelease: featuregate.Alpha}, InPlaceUpdateEnvFromMetadata: {Default: false, PreRelease: featuregate.Alpha}, + StatefulSetAutoDeletePVC: {Default: false, PreRelease: featuregate.Alpha}, } func init() { From e57166c82fca8b17239233339e3a3b4bf44d68a5 Mon Sep 17 00:00:00 2001 From: veophi Date: Thu, 6 Jan 2022 10:55:59 +0800 Subject: [PATCH 2/5] statefulset PersistentVolumeClaimDeletePolicy api change Signed-off-by: veophi --- apis/apps/defaults/v1beta1.go | 14 +++++++ apis/apps/v1beta1/statefulset_types.go | 40 +++++++++++++++++++ apis/apps/v1beta1/zz_generated.deepcopy.go | 20 ++++++++++ .../bases/apps.kruise.io_statefulsets.yaml | 21 ++++++++++ pkg/features/kruise_features.go | 1 - .../validating/statefulset_validation.go | 28 ++++++++++++- 6 files changed, 122 insertions(+), 2 deletions(-) diff --git a/apis/apps/defaults/v1beta1.go b/apis/apps/defaults/v1beta1.go index 14f819f219..26794c4fb5 100644 --- a/apis/apps/defaults/v1beta1.go +++ b/apis/apps/defaults/v1beta1.go @@ -18,6 +18,8 @@ package defaults import ( "github.com/openkruise/kruise/apis/apps/v1beta1" + "github.com/openkruise/kruise/pkg/features" + utilfeature "github.com/openkruise/kruise/pkg/util/feature" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/util/intstr" v1 "k8s.io/kubernetes/pkg/apis/core/v1" @@ -56,6 +58,18 @@ func SetDefaultsStatefulSet(obj *v1beta1.StatefulSet, injectTemplateDefaults boo } } + if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) { + if obj.Spec.PersistentVolumeClaimRetentionPolicy == nil { + obj.Spec.PersistentVolumeClaimRetentionPolicy = &v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{} + } + if len(obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted) == 0 { + obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted = v1beta1.RetainPersistentVolumeClaimRetentionPolicyType + } + if len(obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled) == 0 { + obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled = v1beta1.RetainPersistentVolumeClaimRetentionPolicyType + } + } + if obj.Spec.Replicas == nil { obj.Spec.Replicas = utilpointer.Int32Ptr(1) } diff --git a/apis/apps/v1beta1/statefulset_types.go b/apis/apps/v1beta1/statefulset_types.go index fda75b395c..b3aea90f10 100644 --- a/apis/apps/v1beta1/statefulset_types.go +++ b/apis/apps/v1beta1/statefulset_types.go @@ -109,6 +109,40 @@ const ( InPlaceOnlyPodUpdateStrategyType PodUpdateStrategyType = "InPlaceOnly" ) +// PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine +// when volumes from the VolumeClaimTemplates will be deleted when the controlling StatefulSet is +// deleted or scaled down. +type PersistentVolumeClaimRetentionPolicyType string + +const ( + // RetainPersistentVolumeClaimRetentionPolicyType is the default + // PersistentVolumeClaimRetentionPolicy and specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will not be deleted. + RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" + // DeletePersistentVolumeClaimRetentionPolicyType specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will be deleted in the scenario specified in + // StatefulSetPersistentVolumeClaimPolicy. + DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +type StatefulSetPersistentVolumeClaimRetentionPolicy struct { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` +} + // StatefulSetSpec defines the desired state of StatefulSet type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -188,6 +222,12 @@ type StatefulSetSpec struct { // scaleStrategy indicates the StatefulSetScaleStrategy that will be // employed to scale Pods in the StatefulSet. ScaleStrategy *StatefulSetScaleStrategy `json:"scaleStrategy,omitempty"` + + // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from + // the StatefulSet VolumeClaimTemplates. This requires the + // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // +optional + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"` } // StatefulSetScaleStrategy defines strategies for pods scale. diff --git a/apis/apps/v1beta1/zz_generated.deepcopy.go b/apis/apps/v1beta1/zz_generated.deepcopy.go index 3f7339bc91..58ca61bc70 100644 --- a/apis/apps/v1beta1/zz_generated.deepcopy.go +++ b/apis/apps/v1beta1/zz_generated.deepcopy.go @@ -129,6 +129,21 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy { + if in == nil { + return nil + } + out := new(StatefulSetPersistentVolumeClaimRetentionPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetScaleStrategy) DeepCopyInto(out *StatefulSetScaleStrategy) { *out = *in @@ -191,6 +206,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(StatefulSetScaleStrategy) (*in).DeepCopyInto(*out) } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. diff --git a/config/crd/bases/apps.kruise.io_statefulsets.yaml b/config/crd/bases/apps.kruise.io_statefulsets.yaml index b6b80775bb..72efc0d4be 100644 --- a/config/crd/bases/apps.kruise.io_statefulsets.yaml +++ b/config/crd/bases/apps.kruise.io_statefulsets.yaml @@ -528,6 +528,27 @@ spec: type: object type: object type: object + persistentVolumeClaimRetentionPolicy: + description: PersistentVolumeClaimRetentionPolicy describes the policy + used for PVCs created from the StatefulSet VolumeClaimTemplates. + This requires the StatefulSetAutoDeletePVC feature gate to be enabled, + which is alpha. + properties: + whenDeleted: + description: WhenDeleted specifies what happens to PVCs created + from StatefulSet VolumeClaimTemplates when the StatefulSet is + deleted. The default policy of `Retain` causes PVCs to not be + affected by StatefulSet deletion. The `Delete` policy causes + those PVCs to be deleted. + type: string + whenScaled: + description: WhenScaled specifies what happens to PVCs created + from StatefulSet VolumeClaimTemplates when the StatefulSet is + scaled down. The default policy of `Retain` causes PVCs to not + be affected by a scaledown. The `Delete` policy causes the associated + PVCs for any excess pods above the replica count to be deleted. + type: string + type: object podManagementPolicy: description: podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling diff --git a/pkg/features/kruise_features.go b/pkg/features/kruise_features.go index 70c1695973..ddce52f2a8 100644 --- a/pkg/features/kruise_features.go +++ b/pkg/features/kruise_features.go @@ -82,7 +82,6 @@ const ( // Enables policies controlling deletion of PVCs created by a StatefulSet. StatefulSetAutoDeletePVC featuregate.Feature = "StatefulSetAutoDeletePVC" - ) var defaultFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ diff --git a/pkg/webhook/statefulset/validating/statefulset_validation.go b/pkg/webhook/statefulset/validating/statefulset_validation.go index 1d1495ce89..80403d1920 100644 --- a/pkg/webhook/statefulset/validating/statefulset_validation.go +++ b/pkg/webhook/statefulset/validating/statefulset_validation.go @@ -67,6 +67,26 @@ func validateScaleStrategy(spec *appsv1beta1.StatefulSetSpec, fldPath *field.Pat return allErrs } +func ValidatePersistentVolumeClaimRetentionPolicyType(policy appsv1beta1.PersistentVolumeClaimRetentionPolicyType, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + switch policy { + case appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType: + case appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType: + default: + allErrs = append(allErrs, field.NotSupported(fldPath, policy, []string{string(appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType), string(appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType)})) + } + return allErrs +} + +func ValidatePersistentVolumeClaimRetentionPolicy(policy *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if policy != nil { + allErrs = append(allErrs, ValidatePersistentVolumeClaimRetentionPolicyType(policy.WhenDeleted, fldPath.Child("whenDeleted"))...) + allErrs = append(allErrs, ValidatePersistentVolumeClaimRetentionPolicyType(policy.WhenScaled, fldPath.Child("whenScaled"))...) + } + return allErrs +} + func validateOnDeleteStatefulSetStrategyType(spec *appsv1beta1.StatefulSetSpec, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList @@ -208,6 +228,7 @@ func validateStatefulSetSpec(spec *appsv1beta1.StatefulSetSpec, fldPath *field.P allErrs = append(allErrs, validateReserveOrdinals(spec, fldPath)...) allErrs = append(allErrs, validateScaleStrategy(spec, fldPath)...) allErrs = append(allErrs, validateUpdateStrategyType(spec, fldPath)...) + allErrs = append(allErrs, ValidatePersistentVolumeClaimRetentionPolicy(spec.PersistentVolumeClaimRetentionPolicy, fldPath.Child("persistentVolumeClaimRetentionPolicy"))...) allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.Replicas), fldPath.Child("replicas"))...) @@ -291,6 +312,9 @@ func ValidateStatefulSetUpdate(statefulSet, oldStatefulSet *appsv1beta1.Stateful restoreStrategy := statefulSet.Spec.UpdateStrategy statefulSet.Spec.UpdateStrategy = oldStatefulSet.Spec.UpdateStrategy + restorePersistentVolumeClaimRetentionPolicy := statefulSet.Spec.PersistentVolumeClaimRetentionPolicy + statefulSet.Spec.PersistentVolumeClaimRetentionPolicy = oldStatefulSet.Spec.PersistentVolumeClaimRetentionPolicy + restoreScaleStrategy := statefulSet.Spec.ScaleStrategy statefulSet.Spec.ScaleStrategy = oldStatefulSet.Spec.ScaleStrategy @@ -300,15 +324,17 @@ func ValidateStatefulSetUpdate(statefulSet, oldStatefulSet *appsv1beta1.Stateful statefulSet.Spec.RevisionHistoryLimit = oldStatefulSet.Spec.RevisionHistoryLimit if !apiequality.Semantic.DeepEqual(statefulSet.Spec, oldStatefulSet.Spec) { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to statefulset spec for fields other than 'replicas', 'template', 'reserveOrdinals', 'lifecycle', 'revisionHistoryLimit' and 'updateStrategy' are forbidden")) + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to statefulset spec for fields other than 'replicas', 'template', 'reserveOrdinals', 'lifecycle', 'revisionHistoryLimit', 'persistentVolumeClaimRetentionPolicy' and 'updateStrategy' are forbidden")) } statefulSet.Spec.Replicas = restoreReplicas statefulSet.Spec.Template = restoreTemplate statefulSet.Spec.UpdateStrategy = restoreStrategy statefulSet.Spec.ScaleStrategy = restoreScaleStrategy statefulSet.Spec.ReserveOrdinals = restoreReserveOrdinals + statefulSet.Spec.PersistentVolumeClaimRetentionPolicy = restorePersistentVolumeClaimRetentionPolicy allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*statefulSet.Spec.Replicas), field.NewPath("spec", "replicas"))...) + allErrs = append(allErrs, ValidatePersistentVolumeClaimRetentionPolicy(statefulSet.Spec.PersistentVolumeClaimRetentionPolicy, field.NewPath("spec", "persistentVolumeClaimRetentionPolicy"))...) return allErrs } From 39b618436fa4a21f48a13220ec29d461830e3113 Mon Sep 17 00:00:00 2001 From: veophi Date: Fri, 7 Jan 2022 10:27:09 +0800 Subject: [PATCH 3/5] controller change for statefulset auto-delete (implementation) Signed-off-by: veophi --- .../statefulset/stateful_pod_control.go | 230 ++++++++++++++---- .../statefulset/stateful_set_control.go | 90 +++++-- .../statefulset/stateful_set_utils.go | 174 +++++++++++++ .../statefulset/statefulset_controller.go | 4 +- 4 files changed, 426 insertions(+), 72 deletions(-) diff --git a/pkg/controller/statefulset/stateful_pod_control.go b/pkg/controller/statefulset/stateful_pod_control.go index 47630c3914..fa5ca3e711 100644 --- a/pkg/controller/statefulset/stateful_pod_control.go +++ b/pkg/controller/statefulset/stateful_pod_control.go @@ -24,6 +24,8 @@ import ( appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1" + "github.com/openkruise/kruise/pkg/features" + utilfeature "github.com/openkruise/kruise/pkg/util/feature" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -33,64 +35,109 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" ) -// StatefulPodControlInterface defines the interface that StatefulSetController uses to create, update, and delete Pods, -// and to update the Status of a StatefulSet. It follows the design paradigms used for PodControl, but its -// implementation provides for PVC creation, ordered Pod creation, ordered Pod termination, and Pod identity enforcement. -// Like controller.PodControlInterface, it is implemented as an interface to provide for testing fakes. -type StatefulPodControlInterface interface { - // CreateStatefulPod create a Pod in a StatefulSet. Any PVCs necessary for the Pod are created prior to creating - // the Pod. If the returned error is nil the Pod and its PVCs have been created. - CreateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error - // UpdateStatefulPod Updates a Pod in a StatefulSet. If the Pod already has the correct identity and stable - // storage this method is a no-op. If the Pod must be mutated to conform to the Set, it is mutated and updated. - // pod is an in-out parameter, and any updates made to the pod are reflected as mutations to this parameter. If - // the create is successful, the returned error is nil. - UpdateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error - // DeleteStatefulPod deletes a Pod in a StatefulSet. The pods PVCs are not deleted. If the delete is successful, - // the returned error is nil. - DeleteStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error -} - -// NewRealStatefulPodControl returns a new realStatefulPodControl -func NewRealStatefulPodControl( +// StatefulPodControlObjectManager abstracts the manipulation of Pods and PVCs. The real controller implements this +// with a clientset for writes and listers for reads; for tests we provide stubs. +type StatefulPodControlObjectManager interface { + CreatePod(pod *v1.Pod) error + GetPod(namespace, podName string) (*v1.Pod, error) + UpdatePod(pod *v1.Pod) error + DeletePod(pod *v1.Pod) error + CreateClaim(claim *v1.PersistentVolumeClaim) error + GetClaim(namespace, claimName string) (*v1.PersistentVolumeClaim, error) + UpdateClaim(claim *v1.PersistentVolumeClaim) error +} + +// StatefulPodControl defines the interface that StatefulSetController uses to create, update, and delete Pods, +// Manipulation of objects is provided through objectMgr, which allows the k8s API to be mocked out for testing. +type StatefulPodControl struct { + objectMgr StatefulPodControlObjectManager + recorder record.EventRecorder +} + +// NewStatefulPodControl constructs a StatefulPodControl using a realStatefulPodControlObjectManager with the given +// clientset, listers and EventRecorder. +func NewStatefulPodControl( client clientset.Interface, setLister kruiseappslisters.StatefulSetLister, podLister corelisters.PodLister, - pvcLister corelisters.PersistentVolumeClaimLister, + claimLister corelisters.PersistentVolumeClaimLister, recorder record.EventRecorder, -) StatefulPodControlInterface { - return &realStatefulPodControl{client, setLister, podLister, pvcLister, recorder} +) *StatefulPodControl { + return &StatefulPodControl{&realStatefulPodControlObjectManager{client, setLister, podLister, claimLister}, recorder} } -// realStatefulPodControl implements StatefulPodControlInterface using a clientset.Interface to communicate with the -// API server. The struct is package private as the internal details are irrelevant to importing packages. -type realStatefulPodControl struct { - client clientset.Interface - setLister kruiseappslisters.StatefulSetLister - podLister corelisters.PodLister - pvcLister corelisters.PersistentVolumeClaimLister - recorder record.EventRecorder +// NewStatefulPodControlFromManager creates a StatefulPodControl using the given StatefulPodControlObjectManager and recorder. +func NewStatefulPodControlFromManager(om StatefulPodControlObjectManager, recorder record.EventRecorder) *StatefulPodControl { + return &StatefulPodControl{om, recorder} +} + +// realStatefulPodControlObjectManager uses a clientset.Interface and listers. +type realStatefulPodControlObjectManager struct { + client clientset.Interface + setLister kruiseappslisters.StatefulSetLister + podLister corelisters.PodLister + claimLister corelisters.PersistentVolumeClaimLister +} + +func (om *realStatefulPodControlObjectManager) CreatePod(pod *v1.Pod) error { + _, err := om.client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + return err +} + +func (om *realStatefulPodControlObjectManager) GetPod(namespace, podName string) (*v1.Pod, error) { + return om.podLister.Pods(namespace).Get(podName) +} + +func (om *realStatefulPodControlObjectManager) UpdatePod(pod *v1.Pod) error { + _, err := om.client.CoreV1().Pods(pod.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) + return err } -func (spc *realStatefulPodControl) CreateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error { +func (om *realStatefulPodControlObjectManager) DeletePod(pod *v1.Pod) error { + return om.client.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) +} + +func (om *realStatefulPodControlObjectManager) CreateClaim(claim *v1.PersistentVolumeClaim) error { + _, err := om.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{}) + return err +} + +func (om *realStatefulPodControlObjectManager) GetClaim(namespace, claimName string) (*v1.PersistentVolumeClaim, error) { + return om.claimLister.PersistentVolumeClaims(namespace).Get(claimName) +} + +func (om *realStatefulPodControlObjectManager) UpdateClaim(claim *v1.PersistentVolumeClaim) error { + _, err := om.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{}) + return err +} + +func (spc *StatefulPodControl) CreateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error { // Create the Pod's PVCs prior to creating the Pod if err := spc.createPersistentVolumeClaims(set, pod); err != nil { spc.recordPodEvent("create", set, pod, err) return err } // If we created the PVCs attempt to create the Pod - _, err := spc.client.CoreV1().Pods(set.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + err := spc.objectMgr.CreatePod(pod) // sink already exists errors if apierrors.IsAlreadyExists(err) { return err } + if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) { + // Set PVC policy as much as is possible at this point. + if err := spc.UpdatePodClaimForRetentionPolicy(set, pod); err != nil { + spc.recordPodEvent("update", set, pod, err) + return err + } + } spc.recordPodEvent("create", set, pod, err) return err } -func (spc *realStatefulPodControl) UpdateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error { +func (spc *StatefulPodControl) UpdateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error { attemptedUpdate := false err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { // assume the Pod is consistent @@ -110,6 +157,21 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *appsv1beta1.StatefulSe return err } } + if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) { + // if the Pod's PVCs are not consistent with the StatefulSet's PVC deletion policy, update the PVC + // and dirty the pod. + if match, err := spc.ClaimsMatchRetentionPolicy(set, pod); err != nil { + spc.recordPodEvent("update", set, pod, err) + return err + } else if !match { + if err := spc.UpdatePodClaimForRetentionPolicy(set, pod); err != nil { + spc.recordPodEvent("update", set, pod, err) + return err + } + consistent = false + } + } + // if the Pod is not dirty, do nothing if consistent { return nil @@ -117,16 +179,17 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *appsv1beta1.StatefulSe attemptedUpdate = true // commit the update, retrying on conflicts - _, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) + + updateErr := spc.objectMgr.UpdatePod(pod) if updateErr == nil { return nil } - if updated, err := spc.podLister.Pods(set.Namespace).Get(pod.Name); err == nil { + if updated, err := spc.objectMgr.GetPod(set.Namespace, pod.Name); err == nil { // make a copy so we don't mutate the shared cache pod = updated.DeepCopy() } else { - utilruntime.HandleError(fmt.Errorf("error getting updated Pod %s/%s from lister: %v", set.Namespace, pod.Name, err)) + utilruntime.HandleError(fmt.Errorf("error getting updated Pod %s/%s: %w", set.Namespace, pod.Name, err)) } return updateErr @@ -137,15 +200,92 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *appsv1beta1.StatefulSe return err } -func (spc *realStatefulPodControl) DeleteStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error { - err := spc.client.CoreV1().Pods(set.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) +func (spc *StatefulPodControl) DeleteStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error { + err := spc.objectMgr.DeletePod(pod) spc.recordPodEvent("delete", set, pod, err) return err } +// ClaimsMatchRetentionPolicy returns false if the PVCs for pod are not consistent with set's PVC deletion policy. +// An error is returned if something is not consistent. This is expected if the pod is being otherwise updated, +// but a problem otherwise (see usage of this method in UpdateStatefulPod). +func (spc *StatefulPodControl) ClaimsMatchRetentionPolicy(set *appsv1beta1.StatefulSet, pod *v1.Pod) (bool, error) { + ordinal := getOrdinal(pod) + templates := set.Spec.VolumeClaimTemplates + for i := range templates { + claimName := getPersistentVolumeClaimName(set, &templates[i], ordinal) + claim, err := spc.objectMgr.GetClaim(set.Namespace, claimName) + switch { + case apierrors.IsNotFound(err): + klog.V(4).Infof("Expected claim %s missing, continuing to pick up in next iteration", claimName) + case err != nil: + return false, fmt.Errorf("Could not retrieve claim %s for %s when checking PVC deletion policy", claimName, pod.Name) + default: + if !claimOwnerMatchesSetAndPod(claim, set, pod) { + return false, nil + } + } + } + return true, nil +} + +// UpdatePodClaimForRetentionPolicy updates the PVCs used by pod to match the PVC deletion policy of set. +func (spc *StatefulPodControl) UpdatePodClaimForRetentionPolicy(set *appsv1beta1.StatefulSet, pod *v1.Pod) error { + ordinal := getOrdinal(pod) + templates := set.Spec.VolumeClaimTemplates + for i := range templates { + claimName := getPersistentVolumeClaimName(set, &templates[i], ordinal) + claim, err := spc.objectMgr.GetClaim(set.Namespace, claimName) + switch { + case apierrors.IsNotFound(err): + klog.V(4).Infof("Expected claim %s missing, continuing to pick up in next iteration.") + case err != nil: + return fmt.Errorf("Could not retrieve claim %s not found for %s when checking PVC deletion policy: %w", claimName, pod.Name, err) + default: + if !claimOwnerMatchesSetAndPod(claim, set, pod) { + needsUpdate := updateClaimOwnerRefForSetAndPod(claim, set, pod) + if needsUpdate { + err := spc.objectMgr.UpdateClaim(claim) + if err != nil { + return fmt.Errorf("Could not update claim %s for delete policy ownerRefs: %w", claimName, err) + } + } + } + } + } + return nil +} + +// PodClaimIsStale returns true for a stale PVC that should block pod creation. If the scaling +// policy is deletion, and a PVC has an ownerRef that does not match the pod, the PVC is stale. This +// includes pods whose UID has not been created. +func (spc *StatefulPodControl) PodClaimIsStale(set *appsv1beta1.StatefulSet, pod *v1.Pod) (bool, error) { + policy := getPersistentVolumeClaimRetentionPolicy(set) + if policy.WhenScaled == appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType { + // PVCs are meant to be reused and so can't be stale. + return false, nil + } + for _, claim := range getPersistentVolumeClaims(set, pod) { + pvc, err := spc.objectMgr.GetClaim(claim.Namespace, claim.Name) + switch { + case apierrors.IsNotFound(err): + // If the claim doesn't exist yet, it can't be stale. + continue + case err != nil: + return false, err + case err == nil: + // A claim is stale if it doesn't match the pod's UID, including if the pod has no UID. + if hasStaleOwnerRef(pvc, pod) { + return true, nil + } + } + } + return false, nil +} + // recordPodEvent records an event for verb applied to a Pod in a StatefulSet. If err is nil the generated event will // have a reason of v1.EventTypeNormal. If err is not nil the generated event will have a reason of v1.EventTypeWarning. -func (spc *realStatefulPodControl) recordPodEvent(verb string, set *appsv1beta1.StatefulSet, pod *v1.Pod, err error) { +func (spc *StatefulPodControl) recordPodEvent(verb string, set *appsv1beta1.StatefulSet, pod *v1.Pod, err error) { if err == nil { reason := fmt.Sprintf("Successful%s", strings.Title(verb)) message := fmt.Sprintf("%s Pod %s in StatefulSet %s successful", @@ -162,7 +302,7 @@ func (spc *realStatefulPodControl) recordPodEvent(verb string, set *appsv1beta1. // recordClaimEvent records an event for verb applied to the PersistentVolumeClaim of a Pod in a StatefulSet. If err is // nil the generated event will have a reason of v1.EventTypeNormal. If err is not nil the generated event will have a // reason of v1.EventTypeWarning. -func (spc *realStatefulPodControl) recordClaimEvent(verb string, set *appsv1beta1.StatefulSet, pod *v1.Pod, claim *v1.PersistentVolumeClaim, err error) { +func (spc *StatefulPodControl) recordClaimEvent(verb string, set *appsv1beta1.StatefulSet, pod *v1.Pod, claim *v1.PersistentVolumeClaim, err error) { if err == nil { reason := fmt.Sprintf("Successful%s", strings.Title(verb)) message := fmt.Sprintf("%s Claim %s Pod %s in StatefulSet %s success", @@ -180,13 +320,13 @@ func (spc *realStatefulPodControl) recordClaimEvent(verb string, set *appsv1beta // set. If all of the claims for Pod are successfully created, the returned error is nil. If creation fails, this method // may be called again until no error is returned, indicating the PersistentVolumeClaims for pod are consistent with // set's Spec. -func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *appsv1beta1.StatefulSet, pod *v1.Pod) error { +func (spc *StatefulPodControl) createPersistentVolumeClaims(set *appsv1beta1.StatefulSet, pod *v1.Pod) error { var errs []error for _, claim := range getPersistentVolumeClaims(set, pod) { - pvc, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name) + pvc, err := spc.objectMgr.GetClaim(claim.Namespace, claim.Name) switch { case apierrors.IsNotFound(err): - _, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), &claim, metav1.CreateOptions{}) + err := spc.objectMgr.CreateClaim(&claim) if err != nil { errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err)) } @@ -203,5 +343,3 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *appsv1beta1 } return errorutils.NewAggregate(errs) } - -var _ StatefulPodControlInterface = &realStatefulPodControl{} diff --git a/pkg/controller/statefulset/stateful_set_control.go b/pkg/controller/statefulset/stateful_set_control.go index fc4614fbf2..aaeec852a4 100644 --- a/pkg/controller/statefulset/stateful_set_control.go +++ b/pkg/controller/statefulset/stateful_set_control.go @@ -19,6 +19,7 @@ package statefulset import ( "fmt" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "math" "sort" "time" @@ -36,14 +37,16 @@ import ( appspub "github.com/openkruise/kruise/apis/apps/pub" appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" + "github.com/openkruise/kruise/pkg/features" + utilfeature "github.com/openkruise/kruise/pkg/util/feature" imagejobutilfunc "github.com/openkruise/kruise/pkg/util/imagejob/utilfunction" "github.com/openkruise/kruise/pkg/util/inplaceupdate" "github.com/openkruise/kruise/pkg/util/lifecycle" ) -// ControlInterface implements the control logic for updating StatefulSets and their children Pods. It is implemented +// StatefulSetControlInterface implements the control logic for updating StatefulSets and their children Pods. It is implemented // as an interface to allow for extensions that provide different semantics. Currently, there is only one implementation. -type ControlInterface interface { +type StatefulSetControlInterface interface { // UpdateStatefulSet implements the control logic for Pod creation, update, and deletion, and // persistent volume creation, update, and deletion. // If an implementation returns a non-nil error, the invocation will be retried using a rate-limited strategy. @@ -64,12 +67,12 @@ type ControlInterface interface { // to update the status of StatefulSets. You should use an instance returned from NewRealStatefulPodControl() for any // scenario other than testing. func NewDefaultStatefulSetControl( - podControl StatefulPodControlInterface, + podControl *StatefulPodControl, inplaceControl inplaceupdate.Interface, lifecycleControl lifecycle.Interface, statusUpdater StatusUpdaterInterface, controllerHistory history.Interface, - recorder record.EventRecorder) ControlInterface { + recorder record.EventRecorder) StatefulSetControlInterface { return &defaultStatefulSetControl{ podControl, statusUpdater, @@ -81,10 +84,10 @@ func NewDefaultStatefulSetControl( } // defaultStatefulSetControl implements ControlInterface -var _ ControlInterface = &defaultStatefulSetControl{} +var _ StatefulSetControlInterface = &defaultStatefulSetControl{} type defaultStatefulSetControl struct { - podControl StatefulPodControlInterface + podControl *StatefulPodControl statusUpdater StatusUpdaterInterface controllerHistory history.Interface recorder record.EventRecorder @@ -99,7 +102,6 @@ type defaultStatefulSetControl struct { // in no particular order. Clients using the burst strategy should be careful to ensure they // understand the consistency implications of having unpredictable numbers of pods available. func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *appsv1beta1.StatefulSet, pods []*v1.Pod) error { - // list all revisions and sort them revisions, err := ssc.ListRevisions(set) if err != nil { @@ -107,10 +109,22 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *appsv1beta1.Statefu } history.SortControllerRevisions(revisions) + currentRevision, updateRevision, err := ssc.performUpdate(set, pods, revisions) + if err != nil { + return utilerrors.NewAggregate([]error{err, ssc.truncateHistory(set, pods, revisions, currentRevision, updateRevision)}) + } + + // maintain the set's revision history limit + return ssc.truncateHistory(set, pods, revisions, currentRevision, updateRevision) +} + +func (ssc *defaultStatefulSetControl) performUpdate( + set *appsv1beta1.StatefulSet, pods []*v1.Pod, revisions []*apps.ControllerRevision) (*apps.ControllerRevision, *apps.ControllerRevision, error) { + var currentStatus *appsv1beta1.StatefulSetStatus // get the current, and update revisions currentRevision, updateRevision, collisionCount, err := ssc.getStatefulSetRevisions(set, revisions) if err != nil { - return err + return currentRevision, updateRevision, err } // Refresh update expectations @@ -119,33 +133,31 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *appsv1beta1.Statefu } // perform the main update function and get the status - status, getStatusErr := ssc.updateStatefulSet(set, currentRevision, updateRevision, collisionCount, pods, revisions) - updateStatusErr := ssc.updateStatefulSetStatus(set, status) + currentStatus, getStatusErr := ssc.updateStatefulSet(set, currentRevision, updateRevision, collisionCount, pods, revisions) + updateStatusErr := ssc.updateStatefulSetStatus(set, currentStatus) if getStatusErr != nil { - return getStatusErr + return currentRevision, updateRevision, getStatusErr } if updateStatusErr != nil { - return updateStatusErr + return currentRevision, updateRevision, updateStatusErr } - klog.V(4).Infof("StatefulSet %s/%s pod status replicas=%d ready=%d available=%d current=%d updated=%d", + klog.V(4).Infof("StatefulSet %s/%s pod status replicas=%d ready=%d current=%d updated=%d", set.Namespace, set.Name, - status.Replicas, - status.ReadyReplicas, - status.AvailableReplicas, - status.CurrentReplicas, - status.UpdatedReplicas) + currentStatus.Replicas, + currentStatus.ReadyReplicas, + currentStatus.CurrentReplicas, + currentStatus.UpdatedReplicas) klog.V(4).Infof("StatefulSet %s/%s revisions current=%s update=%s", set.Namespace, set.Name, - status.CurrentRevision, - status.UpdateRevision) + currentStatus.CurrentRevision, + currentStatus.UpdateRevision) - // maintain the set's revision history limit - return ssc.truncateHistory(set, pods, revisions, currentRevision, updateRevision) + return currentRevision, updateRevision, nil } func (ssc *defaultStatefulSetControl) ListRevisions(set *appsv1beta1.StatefulSet) ([]*apps.ControllerRevision, error) { @@ -500,6 +512,15 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( } // If we find a Pod that has not been created we create the Pod if !isCreated(replicas[i]) { + if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) { + if isStale, err := ssc.podControl.PodClaimIsStale(set, replicas[i]); err != nil { + return &status, err + } else if isStale { + // If a pod has a stale PVC, no more work can be done this round. + return &status, err + } + } + lifecycle.SetPodLifecycle(appspub.LifecycleStateNormal)(replicas[i]) if err := ssc.podControl.CreateStatefulPod(set, replicas[i]); err != nil { msg := fmt.Sprintf("StatefulPodControl failed to create Pod error: %s", err) @@ -514,7 +535,6 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( if getPodRevision(replicas[i]) == updateRevision.Name { status.UpdatedReplicas++ } - // if the set does not allow bursting, return immediately if monotonic || decreaseAndCheckMaxUnavailable(scaleMaxUnavailable) { return &status, nil @@ -568,7 +588,16 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( } } // Enforce the StatefulSet invariants - if identityMatches(set, replicas[i]) && storageMatches(set, replicas[i]) { + retentionMatch := true + if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) { + var err error + retentionMatch, err = ssc.podControl.ClaimsMatchRetentionPolicy(updateSet, replicas[i]) + // An error is expected if the pod is not yet fully updated, and so return is treated as matching. + if err != nil { + retentionMatch = true + } + } + if identityMatches(set, replicas[i]) && storageMatches(set, replicas[i]) && retentionMatch { continue } // Make a deep copy so we don't mutate the shared cache @@ -581,6 +610,19 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( } } + if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) { + // Ensure ownerRefs are set correctly for the condemned pods. + for i := range condemned { + if matchPolicy, err := ssc.podControl.ClaimsMatchRetentionPolicy(updateSet, condemned[i]); err != nil { + return &status, err + } else if !matchPolicy { + if err := ssc.podControl.UpdatePodClaimForRetentionPolicy(updateSet, condemned[i]); err != nil { + return &status, err + } + } + } + } + // At this point, all of the current Replicas are Running and Ready, we can consider termination. // We will wait for all predecessors to be Running and Ready prior to attempting a deletion. // We will terminate Pods in a monotonically decreasing order over [len(pods),set.Spec.Replicas). diff --git a/pkg/controller/statefulset/stateful_set_utils.go b/pkg/controller/statefulset/stateful_set_utils.go index dd7a6515c9..6d80217737 100644 --- a/pkg/controller/statefulset/stateful_set_utils.go +++ b/pkg/controller/statefulset/stateful_set_utils.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog/v2" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/history" @@ -124,6 +125,179 @@ func storageMatches(set *appsv1beta1.StatefulSet, pod *v1.Pod) bool { return true } +// getPersistentVolumeClaimPolicy returns the PVC policy for a StatefulSet, returning a retain policy if the set policy is nil. +func getPersistentVolumeClaimRetentionPolicy(set *appsv1beta1.StatefulSet) appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy { + policy := appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + } + if set.Spec.PersistentVolumeClaimRetentionPolicy != nil { + policy = *set.Spec.PersistentVolumeClaimRetentionPolicy + } + return policy +} + +// claimOwnerMatchesSetAndPod returns false if the ownerRefs of the claim are not set consistently with the +// PVC deletion policy for the StatefulSet. +func claimOwnerMatchesSetAndPod(claim *v1.PersistentVolumeClaim, set *appsv1beta1.StatefulSet, pod *v1.Pod) bool { + policy := set.Spec.PersistentVolumeClaimRetentionPolicy + const retain = appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType + const delete = appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType + switch { + default: + klog.Errorf("Unknown policy %v; treating as Retain", set.Spec.PersistentVolumeClaimRetentionPolicy) + fallthrough + case policy.WhenScaled == retain && policy.WhenDeleted == retain: + if hasOwnerRef(claim, set) || + hasOwnerRef(claim, pod) { + return false + } + case policy.WhenScaled == retain && policy.WhenDeleted == delete: + if !hasOwnerRef(claim, set) || + hasOwnerRef(claim, pod) { + return false + } + case policy.WhenScaled == delete && policy.WhenDeleted == retain: + if hasOwnerRef(claim, set) { + return false + } + podScaledDown := getOrdinal(pod) >= int(*set.Spec.Replicas) + if podScaledDown != hasOwnerRef(claim, pod) { + return false + } + case policy.WhenScaled == delete && policy.WhenDeleted == delete: + podScaledDown := getOrdinal(pod) >= int(*set.Spec.Replicas) + // If a pod is scaled down, there should be no set ref and a pod ref; + // if the pod is not scaled down it's the other way around. + if podScaledDown == hasOwnerRef(claim, set) { + return false + } + if podScaledDown != hasOwnerRef(claim, pod) { + return false + } + } + return true +} + +// updateClaimOwnerRefForSetAndPod updates the ownerRefs for the claim according to the deletion policy of +// the StatefulSet. Returns true if the claim was changed and should be updated and false otherwise. +func updateClaimOwnerRefForSetAndPod(claim *v1.PersistentVolumeClaim, set *appsv1beta1.StatefulSet, pod *v1.Pod) bool { + needsUpdate := false + // Sometimes the version and kind are not set {pod,set}.TypeMeta. These are necessary for the ownerRef. + // This is the case both in real clusters and the unittests. + // TODO: there must be a better way to do this other than hardcoding the pod version? + updateMeta := func(tm *metav1.TypeMeta, kind string) { + if tm.APIVersion == "" { + if kind == "StatefulSet" { + tm.APIVersion = "apps/v1" + } else { + tm.APIVersion = "v1" + } + } + if tm.Kind == "" { + tm.Kind = kind + } + } + podMeta := pod.TypeMeta + updateMeta(&podMeta, "Pod") + setMeta := set.TypeMeta + updateMeta(&setMeta, "StatefulSet") + policy := set.Spec.PersistentVolumeClaimRetentionPolicy + const retain = appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType + const delete = appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType + switch { + default: + klog.Errorf("Unknown policy %v, treating as Retain", set.Spec.PersistentVolumeClaimRetentionPolicy) + fallthrough + case policy.WhenScaled == retain && policy.WhenDeleted == retain: + needsUpdate = removeOwnerRef(claim, set) || needsUpdate + needsUpdate = removeOwnerRef(claim, pod) || needsUpdate + case policy.WhenScaled == retain && policy.WhenDeleted == delete: + needsUpdate = setOwnerRef(claim, set, &setMeta) || needsUpdate + needsUpdate = removeOwnerRef(claim, pod) || needsUpdate + case policy.WhenScaled == delete && policy.WhenDeleted == retain: + needsUpdate = removeOwnerRef(claim, set) || needsUpdate + podScaledDown := getOrdinal(pod) >= int(*set.Spec.Replicas) + if podScaledDown { + needsUpdate = setOwnerRef(claim, pod, &podMeta) || needsUpdate + } + if !podScaledDown { + needsUpdate = removeOwnerRef(claim, pod) || needsUpdate + } + case policy.WhenScaled == delete && policy.WhenDeleted == delete: + podScaledDown := getOrdinal(pod) >= int(*set.Spec.Replicas) + if podScaledDown { + needsUpdate = removeOwnerRef(claim, set) || needsUpdate + needsUpdate = setOwnerRef(claim, pod, &podMeta) || needsUpdate + } + if !podScaledDown { + needsUpdate = setOwnerRef(claim, set, &setMeta) || needsUpdate + needsUpdate = removeOwnerRef(claim, pod) || needsUpdate + } + } + return needsUpdate +} + +// hasOwnerRef returns true if target has an ownerRef to owner. +func hasOwnerRef(target, owner metav1.Object) bool { + ownerUID := owner.GetUID() + for _, ownerRef := range target.GetOwnerReferences() { + if ownerRef.UID == ownerUID { + return true + } + } + return false +} + +// hasStaleOwnerRef returns true if target has a ref to owner that appears to be stale. +func hasStaleOwnerRef(target, owner metav1.Object) bool { + for _, ownerRef := range target.GetOwnerReferences() { + if ownerRef.Name == owner.GetName() && ownerRef.UID != owner.GetUID() { + return true + } + } + return false +} + +// setOwnerRef adds owner to the ownerRefs of target, if necessary. Returns true if target needs to be +// updated and false otherwise. +func setOwnerRef(target, owner metav1.Object, ownerType *metav1.TypeMeta) bool { + if hasOwnerRef(target, owner) { + return false + } + ownerRefs := append( + target.GetOwnerReferences(), + metav1.OwnerReference{ + APIVersion: ownerType.APIVersion, + Kind: ownerType.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + }) + target.SetOwnerReferences(ownerRefs) + return true +} + +// removeOwnerRef removes owner from the ownerRefs of target, if necessary. Returns true if target needs +// to be updated and false otherwise. +func removeOwnerRef(target, owner metav1.Object) bool { + if !hasOwnerRef(target, owner) { + return false + } + ownerUID := owner.GetUID() + oldRefs := target.GetOwnerReferences() + newRefs := make([]metav1.OwnerReference, len(oldRefs)-1) + skip := 0 + for i := range oldRefs { + if oldRefs[i].UID == ownerUID { + skip = -1 + } else { + newRefs[i+skip] = oldRefs[i] + } + } + target.SetOwnerReferences(newRefs) + return true +} + // getPersistentVolumeClaims gets a map of PersistentVolumeClaims to their template names, as defined in set. The // returned PersistentVolumeClaims are each constructed with a the name specific to the Pod. This name is determined // by getPersistentVolumeClaimName. diff --git a/pkg/controller/statefulset/statefulset_controller.go b/pkg/controller/statefulset/statefulset_controller.go index 31d29077a6..a35ee806f1 100644 --- a/pkg/controller/statefulset/statefulset_controller.go +++ b/pkg/controller/statefulset/statefulset_controller.go @@ -141,7 +141,7 @@ func newReconciler(mgr manager.Manager) (reconcile.Reconciler, error) { return &ReconcileStatefulSet{ kruiseClient: genericClient.KruiseClient, control: NewDefaultStatefulSetControl( - NewRealStatefulPodControl( + NewStatefulPodControl( genericClient.KubeClient, statefulSetLister, podLister, @@ -167,7 +167,7 @@ type ReconcileStatefulSet struct { kruiseClient kruiseclientset.Interface // control returns an interface capable of syncing a stateful set. // Abstracted out for testing. - control ControlInterface + control StatefulSetControlInterface // podControl is used for patching pods. podControl kubecontroller.PodControlInterface // podLister is able to list/get pods from a shared informer's store From 075c52cd32048026b7a233628416ef67228864bc Mon Sep 17 00:00:00 2001 From: veophi Date: Fri, 7 Jan 2022 10:27:16 +0800 Subject: [PATCH 4/5] controller change for statefulset auto-delete (tests) Signed-off-by: veophi --- .../apps.kruise.io_uniteddeployments.yaml | 24 + go.sum | 2 - .../statefulset/stateful_pod_control.go | 9 +- .../statefulset/stateful_pod_control_test.go | 415 ++++++- .../statefulset/stateful_set_control.go | 2 +- .../statefulset/stateful_set_control_test.go | 1085 +++++++++++------ .../stateful_set_status_updater.go | 7 +- .../stateful_set_status_updater_test.go | 7 +- .../statefulset/stateful_set_utils.go | 2 +- .../statefulset/stateful_set_utils_test.go | 471 ++++++- .../statefulset/stateful_update_utils.go | 3 +- .../statefulset/stateful_update_utils_test.go | 5 +- .../statefulset/statefulset_controller.go | 31 +- .../statefulset_controller_suite_test.go | 3 +- .../statefulset_controller_test.go | 42 +- .../statefulset_predownload_image.go | 13 +- pkg/util/feature/testing.go | 28 + test/e2e/apps/statefulset.go | 464 +++++-- test/e2e/framework/pv_util.go | 58 +- test/e2e/framework/statefulset_utils.go | 127 +- test/e2e/manifest/manifest.go | 6 +- 21 files changed, 2203 insertions(+), 601 deletions(-) create mode 100644 pkg/util/feature/testing.go diff --git a/config/crd/bases/apps.kruise.io_uniteddeployments.yaml b/config/crd/bases/apps.kruise.io_uniteddeployments.yaml index 7fe4fbb7d1..9d394082c9 100644 --- a/config/crd/bases/apps.kruise.io_uniteddeployments.yaml +++ b/config/crd/bases/apps.kruise.io_uniteddeployments.yaml @@ -160,6 +160,30 @@ spec: type: object type: object type: object + persistentVolumeClaimRetentionPolicy: + description: PersistentVolumeClaimRetentionPolicy describes + the policy used for PVCs created from the StatefulSet + VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC + feature gate to be enabled, which is alpha. + properties: + whenDeleted: + description: WhenDeleted specifies what happens to + PVCs created from StatefulSet VolumeClaimTemplates + when the StatefulSet is deleted. The default policy + of `Retain` causes PVCs to not be affected by StatefulSet + deletion. The `Delete` policy causes those PVCs + to be deleted. + type: string + whenScaled: + description: WhenScaled specifies what happens to + PVCs created from StatefulSet VolumeClaimTemplates + when the StatefulSet is scaled down. The default + policy of `Retain` causes PVCs to not be affected + by a scaledown. The `Delete` policy causes the associated + PVCs for any excess pods above the replica count + to be deleted. + type: string + type: object podManagementPolicy: description: podManagementPolicy controls how pods are created during initial scale up, when replacing pods diff --git a/go.sum b/go.sum index cf01d48136..62ce79a72c 100644 --- a/go.sum +++ b/go.sum @@ -938,7 +938,6 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -1022,7 +1021,6 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= diff --git a/pkg/controller/statefulset/stateful_pod_control.go b/pkg/controller/statefulset/stateful_pod_control.go index fa5ca3e711..1b3b5058ba 100644 --- a/pkg/controller/statefulset/stateful_pod_control.go +++ b/pkg/controller/statefulset/stateful_pod_control.go @@ -22,10 +22,6 @@ import ( "fmt" "strings" - appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" - kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1" - "github.com/openkruise/kruise/pkg/features" - utilfeature "github.com/openkruise/kruise/pkg/util/feature" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -36,6 +32,11 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" + + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" + kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1" + "github.com/openkruise/kruise/pkg/features" + utilfeature "github.com/openkruise/kruise/pkg/util/feature" ) // StatefulPodControlObjectManager abstracts the manipulation of Pods and PVCs. The real controller implements this diff --git a/pkg/controller/statefulset/stateful_pod_control_test.go b/pkg/controller/statefulset/stateful_pod_control_test.go index 5b1d37b41c..0efd68cf6e 100644 --- a/pkg/controller/statefulset/stateful_pod_control_test.go +++ b/pkg/controller/statefulset/stateful_pod_control_test.go @@ -19,14 +19,17 @@ package statefulset import ( "errors" + "fmt" "strings" "testing" "time" + apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/fake" corelisters "k8s.io/client-go/listers/core/v1" core "k8s.io/client-go/testing" @@ -34,6 +37,10 @@ import ( "k8s.io/client-go/tools/record" _ "k8s.io/kubernetes/pkg/apis/apps/install" _ "k8s.io/kubernetes/pkg/apis/core/install" + + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" + "github.com/openkruise/kruise/pkg/features" + utilfeature "github.com/openkruise/kruise/pkg/util/feature" ) func TestStatefulPodControlCreatesPods(t *testing.T) { @@ -41,14 +48,15 @@ func TestStatefulPodControlCreatesPods(t *testing.T) { set := newStatefulSet(3) pod := newStatefulSetPod(set, 0) fakeClient := &fake.Clientset{} - pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer) - control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) + claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + claimLister := corelisters.NewPersistentVolumeClaimLister(claimIndexer) + control := NewStatefulPodControl(fakeClient, nil, nil, claimLister, recorder) fakeClient.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) { return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), action.GetResource().Resource) }) fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) { create := action.(core.CreateAction) + claimIndexer.Add(create.GetObject()) return true, create.GetObject(), nil }) fakeClient.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) { @@ -81,7 +89,7 @@ func TestStatefulPodControlCreatePodExists(t *testing.T) { pvcIndexer.Add(&pvc) } pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer) - control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) + control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) { create := action.(core.CreateAction) return true, create.GetObject(), nil @@ -108,7 +116,7 @@ func TestStatefulPodControlCreatePodPvcCreateFailure(t *testing.T) { fakeClient := &fake.Clientset{} pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer) - control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) + control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) { return true, nil, apierrors.NewInternalError(errors.New("API server down")) }) @@ -130,7 +138,7 @@ func TestStatefulPodControlCreatePodPvcCreateFailure(t *testing.T) { } } -func TestStatefulPodControlCreatePodPvcDeleting(t *testing.T) { +func TestStatefulPodControlCreatePodPVCDeleting(t *testing.T) { recorder := record.NewFakeRecorder(10) set := newStatefulSet(3) pod := newStatefulSetPod(set, 0) @@ -144,7 +152,7 @@ func TestStatefulPodControlCreatePodPvcDeleting(t *testing.T) { pvcIndexer.Add(&pvc) } pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer) - control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) + control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) { create := action.(core.CreateAction) return true, create.GetObject(), nil @@ -183,7 +191,7 @@ func TestStatefulPodControlCreatePodPvcGetFailure(t *testing.T) { fakeClient := &fake.Clientset{} pvcIndexer := &fakeIndexer{getError: errors.New("API server down")} pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer) - control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) + control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) { return true, nil, apierrors.NewInternalError(errors.New("API server down")) }) @@ -212,7 +220,7 @@ func TestStatefulPodControlCreatePodFailed(t *testing.T) { fakeClient := &fake.Clientset{} pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer) - control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) + control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) { create := action.(core.CreateAction) return true, create.GetObject(), nil @@ -231,7 +239,6 @@ func TestStatefulPodControlCreatePodFailed(t *testing.T) { } else if !strings.Contains(events[1], v1.EventTypeWarning) { t.Errorf("Found unexpected non-warning event %s", events[1]) - } } @@ -240,7 +247,14 @@ func TestStatefulPodControlNoOpUpdate(t *testing.T) { set := newStatefulSet(3) pod := newStatefulSetPod(set, 0) fakeClient := &fake.Clientset{} - control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder) + claims := getPersistentVolumeClaims(set, pod) + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + for k := range claims { + claim := claims[k] + indexer.Add(&claim) + } + claimLister := corelisters.NewPersistentVolumeClaimLister(indexer) + control := NewStatefulPodControl(fakeClient, nil, nil, claimLister, recorder) fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { t.Error("no-op update should not make any client invocation") return true, nil, apierrors.NewInternalError(errors.New("If we are here we have a problem")) @@ -259,7 +273,9 @@ func TestStatefulPodControlUpdatesIdentity(t *testing.T) { set := newStatefulSet(3) pod := newStatefulSetPod(set, 0) fakeClient := fake.NewSimpleClientset(pod) - control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder) + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + claimLister := corelisters.NewPersistentVolumeClaimLister(indexer) + control := NewStatefulPodControl(fakeClient, nil, nil, claimLister, recorder) var updated *v1.Pod fakeClient.PrependReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) { update := action.(core.UpdateAction) @@ -286,12 +302,14 @@ func TestStatefulPodControlUpdateIdentityFailure(t *testing.T) { set := newStatefulSet(3) pod := newStatefulSetPod(set, 0) fakeClient := &fake.Clientset{} - indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + podIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) gooPod := newStatefulSetPod(set, 0) gooPod.Name = "goo-0" - indexer.Add(gooPod) - podLister := corelisters.NewPodLister(indexer) - control := NewRealStatefulPodControl(fakeClient, nil, podLister, nil, recorder) + podIndexer.Add(gooPod) + podLister := corelisters.NewPodLister(podIndexer) + claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + claimLister := corelisters.NewPersistentVolumeClaimLister(claimIndexer) + control := NewStatefulPodControl(fakeClient, nil, podLister, claimLister, recorder) fakeClient.AddReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) { pod.Name = "goo-0" return true, nil, apierrors.NewInternalError(errors.New("API server down")) @@ -318,7 +336,7 @@ func TestStatefulPodControlUpdatesPodStorage(t *testing.T) { fakeClient := &fake.Clientset{} pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer) - control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) + control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) pvcs := getPersistentVolumeClaims(set, pod) volumes := make([]v1.Volume, 0, len(pod.Spec.Volumes)) for i := range pod.Spec.Volumes { @@ -365,7 +383,7 @@ func TestStatefulPodControlUpdatePodStorageFailure(t *testing.T) { fakeClient := &fake.Clientset{} pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer) - control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) + control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder) pvcs := getPersistentVolumeClaims(set, pod) volumes := make([]v1.Volume, 0, len(pod.Spec.Volumes)) for i := range pod.Spec.Volumes { @@ -400,12 +418,19 @@ func TestStatefulPodControlUpdatePodConflictSuccess(t *testing.T) { set := newStatefulSet(3) pod := newStatefulSetPod(set, 0) fakeClient := &fake.Clientset{} - indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + podIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + podLister := corelisters.NewPodLister(podIndexer) + claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + claimLister := corelisters.NewPersistentVolumeClaimLister(podIndexer) gooPod := newStatefulSetPod(set, 0) - gooPod.Name = "goo-0" - indexer.Add(gooPod) - podLister := corelisters.NewPodLister(indexer) - control := NewRealStatefulPodControl(fakeClient, nil, podLister, nil, recorder) + gooPod.Labels[apps.StatefulSetPodNameLabel] = "goo-starts" + podIndexer.Add(gooPod) + claims := getPersistentVolumeClaims(set, gooPod) + for k := range claims { + claim := claims[k] + claimIndexer.Add(&claim) + } + control := NewStatefulPodControl(fakeClient, nil, podLister, claimLister, recorder) conflict := false fakeClient.AddReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) { update := action.(core.UpdateAction) @@ -415,7 +440,7 @@ func TestStatefulPodControlUpdatePodConflictSuccess(t *testing.T) { } return true, update.GetObject(), nil }) - pod.Name = "goo-0" + pod.Labels[apps.StatefulSetPodNameLabel] = "goo-0" if err := control.UpdateStatefulPod(set, pod); err != nil { t.Errorf("Successful update returned an error: %s", err) } @@ -435,7 +460,7 @@ func TestStatefulPodControlDeletesStatefulPod(t *testing.T) { set := newStatefulSet(3) pod := newStatefulSetPod(set, 0) fakeClient := &fake.Clientset{} - control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder) + control := NewStatefulPodControl(fakeClient, nil, nil, nil, recorder) fakeClient.AddReactor("delete", "pods", func(action core.Action) (bool, runtime.Object, error) { return true, nil, nil }) @@ -455,7 +480,7 @@ func TestStatefulPodControlDeleteFailure(t *testing.T) { set := newStatefulSet(3) pod := newStatefulSetPod(set, 0) fakeClient := &fake.Clientset{} - control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder) + control := NewStatefulPodControl(fakeClient, nil, nil, nil, recorder) fakeClient.AddReactor("delete", "pods", func(action core.Action) (bool, runtime.Object, error) { return true, nil, apierrors.NewInternalError(errors.New("API server down")) }) @@ -470,6 +495,344 @@ func TestStatefulPodControlDeleteFailure(t *testing.T) { } } +func TestStatefulPodControlClaimsMatchDeletionPolcy(t *testing.T) { + // The claimOwnerMatchesSetAndPod is tested exhaustively in stateful_set_utils_test; this + // test is for the wiring to the method tested there. + fakeClient := &fake.Clientset{} + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + claimLister := corelisters.NewPersistentVolumeClaimLister(indexer) + set := newStatefulSet(3) + pod := newStatefulSetPod(set, 0) + claims := getPersistentVolumeClaims(set, pod) + for k := range claims { + claim := claims[k] + indexer.Add(&claim) + } + control := NewStatefulPodControl(fakeClient, nil, nil, claimLister, &noopRecorder{}) + set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + } + if matches, err := control.ClaimsMatchRetentionPolicy(set, pod); err != nil { + t.Errorf("Unexpected error for ClaimsMatchRetentionPolicy (retain): %v", err) + } else if !matches { + t.Error("Unexpected non-match for ClaimsMatchRetentionPolicy (retain)") + } + set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + } + if matches, err := control.ClaimsMatchRetentionPolicy(set, pod); err != nil { + t.Errorf("Unexpected error for ClaimsMatchRetentionPolicy (set deletion): %v", err) + } else if matches { + t.Error("Unexpected match for ClaimsMatchRetentionPolicy (set deletion)") + } +} + +func TestStatefulPodControlUpdatePodClaimForRetentionPolicy(t *testing.T) { + // All the update conditions are tested exhaustively in stateful_set_utils_test. This + // tests the wiring from the pod control to that method. + testFn := func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)() + fakeClient := &fake.Clientset{} + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + claimLister := corelisters.NewPersistentVolumeClaimLister(indexer) + set := newStatefulSet(3) + set.GetObjectMeta().SetUID("set-123") + pod := newStatefulSetPod(set, 0) + claims := getPersistentVolumeClaims(set, pod) + for k := range claims { + claim := claims[k] + indexer.Add(&claim) + } + control := NewStatefulPodControl(fakeClient, nil, nil, claimLister, &noopRecorder{}) + set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + } + if err := control.UpdatePodClaimForRetentionPolicy(set, pod); err != nil { + t.Errorf("Unexpected error for UpdatePodClaimForRetentionPolicy (retain): %v", err) + } + expectRef := utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) + for k := range claims { + claim, err := claimLister.PersistentVolumeClaims(claims[k].Namespace).Get(claims[k].Name) + if err != nil { + t.Errorf("Unexpected error getting Claim %s/%s: %v", claim.Namespace, claim.Name, err) + } + if hasOwnerRef(claim, set) != expectRef { + t.Errorf("Claim %s/%s bad set owner ref", claim.Namespace, claim.Name) + } + } + } + t.Run("StatefulSetAutoDeletePVCEnabled", func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)() + testFn(t) + }) + t.Run("StatefulSetAutoDeletePVCDisabled", func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)() + testFn(t) + }) +} + +func TestPodClaimIsStale(t *testing.T) { + const missing = "missing" + const exists = "exists" + const stale = "stale" + const withRef = "with-ref" + testCases := []struct { + name string + claimStates []string + expected bool + skipPodUID bool + }{ + { + name: "all missing", + claimStates: []string{missing, missing}, + expected: false, + }, + { + name: "no claims", + claimStates: []string{}, + expected: false, + }, + { + name: "exists", + claimStates: []string{missing, exists}, + expected: false, + }, + { + name: "all refs", + claimStates: []string{withRef, withRef}, + expected: false, + }, + { + name: "stale & exists", + claimStates: []string{stale, exists}, + expected: true, + }, + { + name: "stale & missing", + claimStates: []string{stale, missing}, + expected: true, + }, + { + name: "withRef & stale", + claimStates: []string{withRef, stale}, + expected: true, + }, + { + name: "withRef, no UID", + claimStates: []string{withRef}, + skipPodUID: true, + expected: true, + }, + } + for _, tc := range testCases { + set := appsv1beta1.StatefulSet{} + set.Name = "set" + set.Namespace = "default" + set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenScaled: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + } + set.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"key": "value"}} + claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + for i, claimState := range tc.claimStates { + claim := v1.PersistentVolumeClaim{} + claim.Name = fmt.Sprintf("claim-%d", i) + set.Spec.VolumeClaimTemplates = append(set.Spec.VolumeClaimTemplates, claim) + claim.Name = fmt.Sprintf("%s-set-3", claim.Name) + claim.Namespace = set.Namespace + switch claimState { + case missing: + // Do nothing, the claim shouldn't exist. + case exists: + claimIndexer.Add(&claim) + case stale: + claim.SetOwnerReferences([]metav1.OwnerReference{ + {Name: "set-3", UID: types.UID("stale")}, + }) + claimIndexer.Add(&claim) + case withRef: + claim.SetOwnerReferences([]metav1.OwnerReference{ + {Name: "set-3", UID: types.UID("123")}, + }) + claimIndexer.Add(&claim) + } + } + pod := v1.Pod{} + pod.Name = "set-3" + if !tc.skipPodUID { + pod.SetUID("123") + } + claimLister := corelisters.NewPersistentVolumeClaimLister(claimIndexer) + control := NewStatefulPodControl(&fake.Clientset{}, nil, nil, claimLister, &noopRecorder{}) + expected := tc.expected + // Note that the error isn't / can't be tested. + if stale, _ := control.PodClaimIsStale(&set, &pod); stale != expected { + t.Errorf("unexpected stale for %s", tc.name) + } + } +} + +func TestStatefulPodControlRetainDeletionPolicyUpdate(t *testing.T) { + testFn := func(t *testing.T) { + recorder := record.NewFakeRecorder(10) + set := newStatefulSet(1) + set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + } + pod := newStatefulSetPod(set, 0) + fakeClient := &fake.Clientset{} + podIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + podLister := corelisters.NewPodLister(podIndexer) + claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + claimLister := corelisters.NewPersistentVolumeClaimLister(claimIndexer) + podIndexer.Add(pod) + claims := getPersistentVolumeClaims(set, pod) + if len(claims) < 1 { + t.Errorf("Unexpected missing PVCs") + } + for k := range claims { + claim := claims[k] + setOwnerRef(&claim, set, &set.TypeMeta) // This ownerRef should be removed in the update. + claimIndexer.Add(&claim) + } + control := NewStatefulPodControl(fakeClient, nil, podLister, claimLister, recorder) + if err := control.UpdateStatefulPod(set, pod); err != nil { + t.Errorf("Successful update returned an error: %s", err) + } + for k := range claims { + claim := claims[k] + if hasOwnerRef(&claim, set) { + t.Errorf("ownerRef not removed: %s/%s", claim.Namespace, claim.Name) + } + } + events := collectEvents(recorder.Events) + if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) { + if eventCount := len(events); eventCount != 1 { + t.Errorf("delete failed: got %d events, but want 1", eventCount) + } + } else { + if len(events) != 0 { + t.Errorf("delete failed: expected no events, but got %v", events) + } + } + } + t.Run("StatefulSetAutoDeletePVCEnabled", func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)() + testFn(t) + }) + t.Run("StatefulSetAutoDeletePVCDisabled", func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)() + testFn(t) + }) +} + +func TestStatefulPodControlRetentionPolicyUpdate(t *testing.T) { + // Only applicable when the feature gate is on; the off case is tested in TestStatefulPodControlRetainRetentionPolicyUpdate. + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)() + + recorder := record.NewFakeRecorder(10) + set := newStatefulSet(1) + set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + } + pod := newStatefulSetPod(set, 0) + fakeClient := &fake.Clientset{} + podIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + podIndexer.Add(pod) + claims := getPersistentVolumeClaims(set, pod) + if len(claims) != 1 { + t.Errorf("Unexpected or missing PVCs") + } + var claim v1.PersistentVolumeClaim + for k := range claims { + claim = claims[k] + claimIndexer.Add(&claim) + } + fakeClient.AddReactor("update", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) { + update := action.(core.UpdateAction) + claimIndexer.Update(update.GetObject()) + return true, update.GetObject(), nil + }) + podLister := corelisters.NewPodLister(podIndexer) + claimLister := corelisters.NewPersistentVolumeClaimLister(claimIndexer) + control := NewStatefulPodControl(fakeClient, nil, podLister, claimLister, recorder) + if err := control.UpdateStatefulPod(set, pod); err != nil { + t.Errorf("Successful update returned an error: %s", err) + } + updatedClaim, err := claimLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name) + if err != nil { + t.Errorf("Error retrieving claim %s/%s: %v", claim.Namespace, claim.Name, err) + } + if !hasOwnerRef(updatedClaim, set) { + t.Errorf("ownerRef not added: %s/%s", claim.Namespace, claim.Name) + } + events := collectEvents(recorder.Events) + if eventCount := len(events); eventCount != 1 { + t.Errorf("update failed: got %d events, but want 1", eventCount) + } +} + +func TestStatefulPodControlRetentionPolicyUpdateMissingClaims(t *testing.T) { + // Only applicable when the feature gate is on. + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)() + + recorder := record.NewFakeRecorder(10) + set := newStatefulSet(1) + set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + } + pod := newStatefulSetPod(set, 0) + fakeClient := &fake.Clientset{} + podIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + podLister := corelisters.NewPodLister(podIndexer) + claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + claimLister := corelisters.NewPersistentVolumeClaimLister(claimIndexer) + podIndexer.Add(pod) + fakeClient.AddReactor("update", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) { + update := action.(core.UpdateAction) + claimIndexer.Update(update.GetObject()) + return true, update.GetObject(), nil + }) + control := NewStatefulPodControl(fakeClient, nil, podLister, claimLister, recorder) + if err := control.UpdateStatefulPod(set, pod); err != nil { + t.Error("Unexpected error on pod update when PVCs are missing") + } + claims := getPersistentVolumeClaims(set, pod) + if len(claims) != 1 { + t.Errorf("Unexpected or missing PVCs") + } + var claim v1.PersistentVolumeClaim + for k := range claims { + claim = claims[k] + claimIndexer.Add(&claim) + } + + if err := control.UpdateStatefulPod(set, pod); err != nil { + t.Errorf("Expected update to succeed, saw error %v", err) + } + updatedClaim, err := claimLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name) + if err != nil { + t.Errorf("Error retrieving claim %s/%s: %v", claim.Namespace, claim.Name, err) + } + if !hasOwnerRef(updatedClaim, set) { + t.Errorf("ownerRef not added: %s/%s", claim.Namespace, claim.Name) + } + events := collectEvents(recorder.Events) + if eventCount := len(events); eventCount != 1 { + t.Errorf("update failed: got %d events, but want 2", eventCount) + } + if !strings.Contains(events[0], "SuccessfulUpdate") { + t.Errorf("expected first event to be a successful update: %s", events[1]) + } +} + func collectEvents(source <-chan string) []string { done := false events := make([]string, 0) diff --git a/pkg/controller/statefulset/stateful_set_control.go b/pkg/controller/statefulset/stateful_set_control.go index aaeec852a4..d8c578533d 100644 --- a/pkg/controller/statefulset/stateful_set_control.go +++ b/pkg/controller/statefulset/stateful_set_control.go @@ -19,7 +19,6 @@ package statefulset import ( "fmt" - utilerrors "k8s.io/apimachinery/pkg/util/errors" "math" "sort" "time" @@ -27,6 +26,7 @@ import ( apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" intstrutil "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" diff --git a/pkg/controller/statefulset/stateful_set_control_test.go b/pkg/controller/statefulset/stateful_set_control_test.go index cd3fe36e41..f0b869f825 100644 --- a/pkg/controller/statefulset/stateful_set_control_test.go +++ b/pkg/controller/statefulset/stateful_set_control_test.go @@ -34,9 +34,10 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/informers" - coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" corelisters "k8s.io/client-go/listers/core/v1" @@ -54,19 +55,22 @@ import ( kruiseinformers "github.com/openkruise/kruise/pkg/client/informers/externalversions" kruiseappsinformers "github.com/openkruise/kruise/pkg/client/informers/externalversions/apps/v1beta1" kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1" + "github.com/openkruise/kruise/pkg/features" + utilfeature "github.com/openkruise/kruise/pkg/util/feature" "github.com/openkruise/kruise/pkg/util/inplaceupdate" "github.com/openkruise/kruise/pkg/util/lifecycle" "github.com/openkruise/kruise/pkg/util/revisionadapter" ) -type invariantFunc func(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodControl) error +type invariantFunc func(set *appsv1beta1.StatefulSet, om *fakeObjectManager) error -func setupController(client clientset.Interface, kruiseClient kruiseclientset.Interface) (*fakeStatefulPodControl, *fakeStatefulSetStatusUpdater, ControlInterface, chan struct{}) { +func setupController(client clientset.Interface, kruiseClient kruiseclientset.Interface) (*fakeObjectManager, *fakeStatefulSetStatusUpdater, StatefulSetControlInterface, chan struct{}) { informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) kruiseInformerFactory := kruiseinformers.NewSharedInformerFactory(kruiseClient, controller.NoResyncPeriodFunc()) - spc := newFakeStatefulPodControl(informerFactory.Core().V1().Pods(), kruiseInformerFactory.Apps().V1beta1().StatefulSets()) + om := newFakeObjectManager(informerFactory, kruiseInformerFactory) + spc := NewStatefulPodControlFromManager(om, &noopRecorder{}) ssu := newFakeStatefulSetStatusUpdater(kruiseInformerFactory.Apps().V1beta1().StatefulSets()) - recorder := record.NewFakeRecorder(10) + recorder := &noopRecorder{} inplaceControl := inplaceupdate.NewForInformer(informerFactory.Core().V1().Pods(), revisionadapter.NewDefaultImpl()) lifecycleControl := lifecycle.NewForInformer(informerFactory.Core().V1().Pods()) ssc := NewDefaultStatefulSetControl(spc, inplaceControl, lifecycleControl, ssu, history.NewFakeHistory(informerFactory.Apps().V1().ControllerRevisions()), recorder) @@ -81,7 +85,7 @@ func setupController(client clientset.Interface, kruiseClient kruiseclientset.In informerFactory.Core().V1().Pods().Informer().HasSynced, informerFactory.Apps().V1().ControllerRevisions().Informer().HasSynced, ) - return spc, ssu, ssc, stop + return om, ssu, ssc, stop } func burst(set *appsv1beta1.StatefulSet) *appsv1beta1.StatefulSet { @@ -89,6 +93,64 @@ func burst(set *appsv1beta1.StatefulSet) *appsv1beta1.StatefulSet { return set } +func runTestOverPVCRetentionPolicies(t *testing.T, testName string, testFn func(*testing.T, *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy)) { + subtestName := "StatefulSetAutoDeletePVCDisabled" + if testName != "" { + subtestName = fmt.Sprintf("%s/%s", testName, subtestName) + } + t.Run(subtestName, func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)() + testFn(t, &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + }) + }) + + for _, policy := range []*appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + { + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + }, + { + WhenScaled: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + }, + { + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + }, + { + WhenScaled: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + }, + } { + subtestName := pvcDeletePolicyString(policy) + "/StatefulSetAutoDeletePVCEnabled" + if testName != "" { + subtestName = fmt.Sprintf("%s/%s", testName, subtestName) + } + t.Run(subtestName, func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)() + testFn(t, policy) + }) + } +} + +func pvcDeletePolicyString(policy *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy) string { + const retain = appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType + const delete = appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType + switch { + case policy.WhenScaled == retain && policy.WhenDeleted == retain: + return "Retain" + case policy.WhenScaled == retain && policy.WhenDeleted == delete: + return "SetDeleteOnly" + case policy.WhenScaled == delete && policy.WhenDeleted == retain: + return "ScaleDownOnly" + case policy.WhenScaled == delete && policy.WhenDeleted == delete: + return "Delete" + } + return "invalid" +} + func TestStatefulSetControl(t *testing.T) { t.SkipNow() simpleSetFn := func() *appsv1beta1.StatefulSet { return newStatefulSet(3) } @@ -114,17 +176,24 @@ func TestStatefulSetControl(t *testing.T) { if i := strings.LastIndex(fnName, "."); i != -1 { fnName = fnName[i+1:] } - t.Run( + testObj := testCase.obj + testFn := testCase.fn + runTestOverPVCRetentionPolicies( + t, fmt.Sprintf("%s/Monotonic", fnName), - func(t *testing.T) { - testCase.fn(t, testCase.obj(), assertMonotonicInvariants) + func(t *testing.T, policy *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy) { + set := testObj() + set.Spec.PersistentVolumeClaimRetentionPolicy = policy + testFn(t, set, assertMonotonicInvariants) }, ) - t.Run( + runTestOverPVCRetentionPolicies( + t, fmt.Sprintf("%s/Burst", fnName), - func(t *testing.T) { - set := burst(testCase.obj()) - testCase.fn(t, set, assertBurstInvariants) + func(t *testing.T, policy *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy) { + set := burst(testObj()) + set.Spec.PersistentVolumeClaimRetentionPolicy = policy + testFn(t, set, assertBurstInvariants) }, ) } @@ -133,14 +202,14 @@ func TestStatefulSetControl(t *testing.T) { func CreatesPods(t *testing.T, set *appsv1beta1.StatefulSet, invariants invariantFunc) { client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) - if err := scaleUpStatefulSetControl(set, ssc, spc, invariants); err != nil { + if err := scaleUpStatefulSetControl(set, ssc, om, invariants); err != nil { t.Errorf("Failed to turn up StatefulSet : %s", err) } var err error - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("Error getting updated StatefulSet: %v", err) } @@ -161,18 +230,18 @@ func CreatesPods(t *testing.T, set *appsv1beta1.StatefulSet, invariants invarian func ScalesUp(t *testing.T, set *appsv1beta1.StatefulSet, invariants invariantFunc) { client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) - if err := scaleUpStatefulSetControl(set, ssc, spc, invariants); err != nil { + if err := scaleUpStatefulSetControl(set, ssc, om, invariants); err != nil { t.Errorf("Failed to turn up StatefulSet : %s", err) } *set.Spec.Replicas = 4 - if err := scaleUpStatefulSetControl(set, ssc, spc, invariants); err != nil { + if err := scaleUpStatefulSetControl(set, ssc, om, invariants); err != nil { t.Errorf("Failed to scale StatefulSet : %s", err) } var err error - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("Error getting updated StatefulSet: %v", err) } @@ -193,16 +262,27 @@ func ScalesUp(t *testing.T, set *appsv1beta1.StatefulSet, invariants invariantFu func ScalesDown(t *testing.T, set *appsv1beta1.StatefulSet, invariants invariantFunc) { client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) - if err := scaleUpStatefulSetControl(set, ssc, spc, invariants); err != nil { + if err := scaleUpStatefulSetControl(set, ssc, om, invariants); err != nil { t.Errorf("Failed to turn up StatefulSet : %s", err) } + var err error + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) + if err != nil { + t.Fatalf("Error getting updated StatefulSet: %v", err) + } *set.Spec.Replicas = 0 - if err := scaleDownStatefulSetControl(set, ssc, spc, invariants); err != nil { + if err := scaleDownStatefulSetControl(set, ssc, om, invariants); err != nil { t.Errorf("Failed to scale StatefulSet : %s", err) } + + // Check updated set. + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) + if err != nil { + t.Fatalf("Error getting updated StatefulSet: %v", err) + } if set.Status.Replicas != 0 { t.Error("Failed to scale statefulset to 0 replicas") } @@ -220,14 +300,14 @@ func ScalesDown(t *testing.T, set *appsv1beta1.StatefulSet, invariants invariant func ReplacesPods(t *testing.T, set *appsv1beta1.StatefulSet, invariants invariantFunc) { client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) - if err := scaleUpStatefulSetControl(set, ssc, spc, invariants); err != nil { + if err := scaleUpStatefulSetControl(set, ssc, om, invariants); err != nil { t.Errorf("Failed to turn up StatefulSet : %s", err) } var err error - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("Error getting updated StatefulSet: %v", err) } @@ -238,49 +318,64 @@ func ReplacesPods(t *testing.T, set *appsv1beta1.StatefulSet, invariants invaria if err != nil { t.Error(err) } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + claims, err := om.claimsLister.PersistentVolumeClaims(set.Namespace).List(selector) + if err != nil { + t.Error(err) + } + + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Error(err) } + for _, pod := range pods { + podClaims := getPersistentVolumeClaims(set, pod) + for _, claim := range claims { + if _, found := podClaims[claim.Name]; found { + if hasOwnerRef(claim, pod) { + t.Errorf("Unexpected ownerRef on %s", claim.Name) + } + } + } + } sort.Sort(ascendingOrdinal(pods)) - spc.podsIndexer.Delete(pods[0]) - spc.podsIndexer.Delete(pods[2]) - spc.podsIndexer.Delete(pods[4]) + om.podsIndexer.Delete(pods[0]) + om.podsIndexer.Delete(pods[2]) + om.podsIndexer.Delete(pods[4]) for i := 0; i < 5; i += 2 { - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Error(err) } if err = ssc.UpdateStatefulSet(set, pods); err != nil { t.Errorf("Failed to update StatefulSet : %s", err) } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("Error getting updated StatefulSet: %v", err) } - if pods, err = spc.setPodRunning(set, i); err != nil { + if pods, err = om.setPodRunning(set, i); err != nil { t.Error(err) } if err = ssc.UpdateStatefulSet(set, pods); err != nil { t.Errorf("Failed to update StatefulSet : %s", err) } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("Error getting updated StatefulSet: %v", err) } - if _, err = spc.setPodReady(set, i); err != nil { + if _, err = om.setPodReady(set, i); err != nil { t.Error(err) } } - pods, err = spc.podsLister.Pods(set.Namespace).List(selector) + pods, err = om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Error(err) } if err := ssc.UpdateStatefulSet(set, pods); err != nil { t.Errorf("Failed to update StatefulSet : %s", err) } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("Error getting updated StatefulSet: %v", err) } @@ -292,35 +387,35 @@ func ReplacesPods(t *testing.T, set *appsv1beta1.StatefulSet, invariants invaria func RecreatesFailedPod(t *testing.T, set *appsv1beta1.StatefulSet, invariants invariantFunc) { client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset() - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { t.Error(err) } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Error(err) } if err := ssc.UpdateStatefulSet(set, pods); err != nil { t.Errorf("Error updating StatefulSet %s", err) } - if err := invariants(set, spc); err != nil { + if err := invariants(set, om); err != nil { t.Error(err) } - pods, err = spc.podsLister.Pods(set.Namespace).List(selector) + pods, err = om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Error(err) } pods[0].Status.Phase = v1.PodFailed - spc.podsIndexer.Update(pods[0]) + om.podsIndexer.Update(pods[0]) if err := ssc.UpdateStatefulSet(set, pods); err != nil { t.Errorf("Error updating StatefulSet %s", err) } - if err := invariants(set, spc); err != nil { + if err := invariants(set, om); err != nil { t.Error(err) } - pods, err = spc.podsLister.Pods(set.Namespace).List(selector) + pods, err = om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Error(err) } @@ -332,18 +427,23 @@ func RecreatesFailedPod(t *testing.T, set *appsv1beta1.StatefulSet, invariants i func CreatePodFailure(t *testing.T, set *appsv1beta1.StatefulSet, invariants invariantFunc) { client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) - spc.SetCreateStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 2) + om.SetCreateStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 2) - if err := scaleUpStatefulSetControl(set, ssc, spc, invariants); !apierrors.IsInternalError(err) { + if err := scaleUpStatefulSetControl(set, ssc, om, invariants); err != nil && isOrHasInternalError(err) { t.Errorf("StatefulSetControl did not return InternalError found %s", err) } - if err := scaleUpStatefulSetControl(set, ssc, spc, invariants); err != nil { + // Update so set.Status is set for the next scaleUpStatefulSetControl call. + var err error + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) + if err != nil { + t.Fatalf("Error getting updated StatefulSet: %v", err) + } + if err := scaleUpStatefulSetControl(set, ssc, om, invariants); err != nil { t.Errorf("Failed to turn up StatefulSet : %s", err) } - var err error - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("Error getting updated StatefulSet: %v", err) } @@ -364,16 +464,16 @@ func CreatePodFailure(t *testing.T, set *appsv1beta1.StatefulSet, invariants inv func UpdatePodFailure(t *testing.T, set *appsv1beta1.StatefulSet, invariants invariantFunc) { client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) - spc.SetUpdateStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 0) + om.SetUpdateStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 0) // have to have 1 successful loop first - if err := scaleUpStatefulSetControl(set, ssc, spc, invariants); err != nil { + if err := scaleUpStatefulSetControl(set, ssc, om, invariants); err != nil { t.Fatalf("Unexpected error: %v", err) } var err error - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("Error getting updated StatefulSet: %v", err) } @@ -391,7 +491,7 @@ func UpdatePodFailure(t *testing.T, set *appsv1beta1.StatefulSet, invariants inv } // now mutate a pod's identity - pods, err := spc.podsLister.List(labels.Everything()) + pods, err := om.podsLister.List(labels.Everything()) if err != nil { t.Fatalf("Error listing pods: %v", err) } @@ -400,7 +500,7 @@ func UpdatePodFailure(t *testing.T, set *appsv1beta1.StatefulSet, invariants inv } sort.Sort(ascendingOrdinal(pods)) pods[0].Name = "goo-0" - spc.podsIndexer.Update(pods[0]) + om.podsIndexer.Update(pods[0]) // now it should fail if err := ssc.UpdateStatefulSet(set, pods); !apierrors.IsInternalError(err) { @@ -411,18 +511,23 @@ func UpdatePodFailure(t *testing.T, set *appsv1beta1.StatefulSet, invariants inv func UpdateSetStatusFailure(t *testing.T, set *appsv1beta1.StatefulSet, invariants invariantFunc) { client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, ssu, ssc, stop := setupController(client, kruiseClient) + om, ssu, ssc, stop := setupController(client, kruiseClient) defer close(stop) ssu.SetUpdateStatefulSetStatusError(apierrors.NewInternalError(errors.New("API server failed")), 2) - if err := scaleUpStatefulSetControl(set, ssc, spc, invariants); !apierrors.IsInternalError(err) { + if err := scaleUpStatefulSetControl(set, ssc, om, invariants); !apierrors.IsInternalError(err) { t.Errorf("StatefulSetControl did not return InternalError found %s", err) } - if err := scaleUpStatefulSetControl(set, ssc, spc, invariants); err != nil { + // Update so set.Status is set for the next scaleUpStatefulSetControl call. + var err error + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) + if err != nil { + t.Fatalf("Error getting updated StatefulSet: %v", err) + } + if err := scaleUpStatefulSetControl(set, ssc, om, invariants); err != nil { t.Errorf("Failed to turn up StatefulSet : %s", err) } - var err error - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("Error getting updated StatefulSet: %v", err) } @@ -443,43 +548,43 @@ func UpdateSetStatusFailure(t *testing.T, set *appsv1beta1.StatefulSet, invarian func PodRecreateDeleteFailure(t *testing.T, set *appsv1beta1.StatefulSet, invariants invariantFunc) { client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { t.Error(err) } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Error(err) } if err := ssc.UpdateStatefulSet(set, pods); err != nil { t.Errorf("Error updating StatefulSet %s", err) } - if err := invariants(set, spc); err != nil { + if err := invariants(set, om); err != nil { t.Error(err) } - pods, err = spc.podsLister.Pods(set.Namespace).List(selector) + pods, err = om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Error(err) } pods[0].Status.Phase = v1.PodFailed - spc.podsIndexer.Update(pods[0]) - spc.SetDeleteStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 0) - if err := ssc.UpdateStatefulSet(set, pods); !apierrors.IsInternalError(err) { + om.podsIndexer.Update(pods[0]) + om.SetDeleteStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 0) + if err := ssc.UpdateStatefulSet(set, pods); err != nil && isOrHasInternalError(err) { t.Errorf("StatefulSet failed to %s", err) } - if err := invariants(set, spc); err != nil { + if err := invariants(set, om); err != nil { t.Error(err) } if err := ssc.UpdateStatefulSet(set, pods); err != nil { t.Errorf("Error updating StatefulSet %s", err) } - if err := invariants(set, spc); err != nil { + if err := invariants(set, om); err != nil { t.Error(err) } - pods, err = spc.podsLister.Pods(set.Namespace).List(selector) + pods, err = om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Error(err) } @@ -489,45 +594,50 @@ func PodRecreateDeleteFailure(t *testing.T, set *appsv1beta1.StatefulSet, invari } func TestStatefulSetControlScaleDownDeleteError(t *testing.T) { - invariants := assertMonotonicInvariants - set := newStatefulSet(3) - client := fake.NewSimpleClientset() - kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) - defer close(stop) - - if err := scaleUpStatefulSetControl(set, ssc, spc, invariants); err != nil { - t.Errorf("Failed to turn up StatefulSet : %s", err) - } - var err error - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) - if err != nil { - t.Fatalf("Error getting updated StatefulSet: %v", err) - } - *set.Spec.Replicas = 0 - spc.SetDeleteStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 2) - if err := scaleDownStatefulSetControl(set, ssc, spc, invariants); !apierrors.IsInternalError(err) { - t.Errorf("StatefulSetControl failed to throw error on delete %s", err) - } - if err := scaleDownStatefulSetControl(set, ssc, spc, invariants); err != nil { - t.Errorf("Failed to turn down StatefulSet %s", err) - } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) - if err != nil { - t.Fatalf("Error getting updated StatefulSet: %v", err) - } - if set.Status.Replicas != 0 { - t.Error("Failed to scale statefulset to 0 replicas") - } - if set.Status.ReadyReplicas != 0 { - t.Error("Failed to set readyReplicas to 0") - } - if set.Status.AvailableReplicas != 0 { - t.Error("Failed to set readyReplicas to 0") - } - if set.Status.UpdatedReplicas != 0 { - t.Error("Failed to set updatedReplicas to 0") - } + runTestOverPVCRetentionPolicies( + t, "", func(t *testing.T, policy *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy) { + set := newStatefulSet(3) + set.Spec.PersistentVolumeClaimRetentionPolicy = policy + invariants := assertMonotonicInvariants + client := fake.NewSimpleClientset() + kruiseClient := kruisefake.NewSimpleClientset(set) + om, _, ssc, stop := setupController(client, kruiseClient) + defer close(stop) + + if err := scaleUpStatefulSetControl(set, ssc, om, invariants); err != nil { + t.Errorf("Failed to turn up StatefulSet : %s", err) + } + var err error + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) + if err != nil { + t.Fatalf("Error getting updated StatefulSet: %v", err) + } + *set.Spec.Replicas = 0 + om.SetDeleteStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 2) + if err := scaleDownStatefulSetControl(set, ssc, om, invariants); err != nil && isOrHasInternalError(err) { + t.Errorf("StatefulSetControl failed to throw error on delete %s", err) + } + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) + if err != nil { + t.Fatalf("Error getting updated StatefulSet: %v", err) + } + if err := scaleDownStatefulSetControl(set, ssc, om, invariants); err != nil { + t.Errorf("Failed to turn down StatefulSet %s", err) + } + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) + if err != nil { + t.Fatalf("Error getting updated StatefulSet: %v", err) + } + if set.Status.Replicas != 0 { + t.Error("Failed to scale statefulset to 0 replicas") + } + if set.Status.ReadyReplicas != 0 { + t.Error("Failed to set readyReplicas to 0") + } + if set.Status.UpdatedReplicas != 0 { + t.Error("Failed to set updatedReplicas to 0") + } + }) } func TestStatefulSetControl_getSetRevisions(t *testing.T) { @@ -546,7 +656,8 @@ func TestStatefulSetControl_getSetRevisions(t *testing.T) { kruiseClient := kruisefake.NewSimpleClientset() informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) kruiseInformerFactory := kruiseinformers.NewSharedInformerFactory(kruiseClient, controller.NoResyncPeriodFunc()) - spc := newFakeStatefulPodControl(informerFactory.Core().V1().Pods(), kruiseInformerFactory.Apps().V1beta1().StatefulSets()) + om := newFakeObjectManager(informerFactory, kruiseInformerFactory) + spc := NewStatefulPodControlFromManager(om, &noopRecorder{}) ssu := newFakeStatefulSetStatusUpdater(kruiseInformerFactory.Apps().V1beta1().StatefulSets()) recorder := record.NewFakeRecorder(10) inplaceControl := inplaceupdate.NewForInformer(informerFactory.Core().V1().Pods(), revisionadapter.NewDefaultImpl()) @@ -606,66 +717,70 @@ func TestStatefulSetControl_getSetRevisions(t *testing.T) { return clone } - set := newStatefulSet(3) - set.Status.CollisionCount = new(int32) - rev0 := newRevisionOrDie(set, 1) - set1 := set.DeepCopy() - set1.Spec.Template.Spec.Containers[0].Image = "foo" - set1.Status.CurrentRevision = rev0.Name - set1.Status.CollisionCount = new(int32) - rev1 := newRevisionOrDie(set1, 2) - set2 := set1.DeepCopy() - set2.Spec.Template.Labels["new"] = "label" - set2.Status.CurrentRevision = rev0.Name - set2.Status.CollisionCount = new(int32) - rev2 := newRevisionOrDie(set2, 3) - tests := []testcase{ - { - name: "creates initial revision", - existing: nil, - set: set, - expectedCount: 1, - expectedCurrent: rev0, - expectedUpdate: rev0, - err: false, - }, - { - name: "creates revision on update", - existing: []*apps.ControllerRevision{rev0}, - set: set1, - expectedCount: 2, - expectedCurrent: rev0, - expectedUpdate: rev1, - err: false, - }, - { - name: "must not recreate a new revision of same set", - existing: []*apps.ControllerRevision{rev0, rev1}, - set: set1, - expectedCount: 2, - expectedCurrent: rev0, - expectedUpdate: rev1, - err: false, - }, - { - name: "must rollback to a previous revision", - existing: []*apps.ControllerRevision{rev0, rev1, rev2}, - set: set1, - expectedCount: 3, - expectedCurrent: rev0, - expectedUpdate: updateRevision(rev1, 4), - err: false, - }, - } - for i := range tests { - testFn(&tests[i], t) - } + runTestOverPVCRetentionPolicies( + t, "", func(t *testing.T, policy *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy) { + set := newStatefulSet(3) + set.Spec.PersistentVolumeClaimRetentionPolicy = policy + set.Status.CollisionCount = new(int32) + rev0 := newRevisionOrDie(set, 1) + set1 := set.DeepCopy() + set1.Spec.Template.Spec.Containers[0].Image = "foo" + set1.Status.CurrentRevision = rev0.Name + set1.Status.CollisionCount = new(int32) + rev1 := newRevisionOrDie(set1, 2) + set2 := set1.DeepCopy() + set2.Spec.Template.Labels["new"] = "label" + set2.Status.CurrentRevision = rev0.Name + set2.Status.CollisionCount = new(int32) + rev2 := newRevisionOrDie(set2, 3) + tests := []testcase{ + { + name: "creates initial revision", + existing: nil, + set: set, + expectedCount: 1, + expectedCurrent: rev0, + expectedUpdate: rev0, + err: false, + }, + { + name: "creates revision on update", + existing: []*apps.ControllerRevision{rev0}, + set: set1, + expectedCount: 2, + expectedCurrent: rev0, + expectedUpdate: rev1, + err: false, + }, + { + name: "must not recreate a new revision of same set", + existing: []*apps.ControllerRevision{rev0, rev1}, + set: set1, + expectedCount: 2, + expectedCurrent: rev0, + expectedUpdate: rev1, + err: false, + }, + { + name: "must rollback to a previous revision", + existing: []*apps.ControllerRevision{rev0, rev1, rev2}, + set: set1, + expectedCount: 3, + expectedCurrent: rev0, + expectedUpdate: updateRevision(rev1, 4), + err: false, + }, + } + for i := range tests { + testFn(&tests[i], t) + } + }) } func TestStatefulSetControlRollingUpdate(t *testing.T) { type testcase struct { name string - invariants func(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodControl) error + invariants func(set *appsv1beta1.StatefulSet, om *fakeObjectManager) error initial func() *appsv1beta1.StatefulSet update func(set *appsv1beta1.StatefulSet) *appsv1beta1.StatefulSet validate func(set *appsv1beta1.StatefulSet, pods []*v1.Pod) error @@ -675,28 +790,28 @@ func TestStatefulSetControlRollingUpdate(t *testing.T) { set := test.initial() client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) - if err := scaleUpStatefulSetControl(set, ssc, spc, test.invariants); err != nil { + if err := scaleUpStatefulSetControl(set, ssc, om, test.invariants); err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err := spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err := om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("%s: %s", test.name, err) } set = test.update(set) - if err := updateStatefulSetControl(set, ssc, spc, assertUpdateInvariants); err != nil { + if err := updateStatefulSetControl(set, ssc, om, assertUpdateInvariants); err != nil { t.Fatalf("%s: %s", test.name, err) } selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("%s: %s", test.name, err) } @@ -839,7 +954,7 @@ func TestStatefulSetControlRollingUpdate(t *testing.T) { func TestStatefulSetControlOnDeleteUpdate(t *testing.T) { type testcase struct { name string - invariants func(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodControl) error + invariants func(set *appsv1beta1.StatefulSet, om *fakeObjectManager) error initial func() *appsv1beta1.StatefulSet update func(set *appsv1beta1.StatefulSet) *appsv1beta1.StatefulSet validateUpdate func(set *appsv1beta1.StatefulSet, pods []*v1.Pod) error @@ -848,34 +963,48 @@ func TestStatefulSetControlOnDeleteUpdate(t *testing.T) { originalImage := newStatefulSet(3).Spec.Template.Spec.Containers[0].Image - testFn := func(test *testcase, t *testing.T) { + testFn := func(t *testing.T, test *testcase, policy *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy) { set := test.initial() set.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{Type: apps.OnDeleteStatefulSetStrategyType} client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) - if err := scaleUpStatefulSetControl(set, ssc, spc, test.invariants); err != nil { + if err := scaleUpStatefulSetControl(set, ssc, om, test.invariants); err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err := spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err := om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("%s: %s", test.name, err) } set = test.update(set) - if err := updateStatefulSetControl(set, ssc, spc, assertUpdateInvariants); err != nil { + if err := updateStatefulSetControl(set, ssc, om, assertUpdateInvariants); err != nil { t.Fatalf("%s: %s", test.name, err) } + // Pods may have been deleted in the update. Delete any claims with a pod ownerRef. selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + claims, err := om.claimsLister.PersistentVolumeClaims(set.Namespace).List(selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + for _, claim := range claims { + for _, ref := range claim.GetOwnerReferences() { + if strings.HasPrefix(ref.Name, "foo-") { + om.claimsIndexer.Delete(claim) + break + } + } + } + + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) + if err != nil { + t.Fatalf("%s: %s", test.name, err) + } + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } @@ -886,24 +1015,49 @@ func TestStatefulSetControlOnDeleteUpdate(t *testing.T) { t.Fatalf("%s: %s", test.name, err) } + claims, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).List(selector) + if err != nil { + t.Fatalf("%s: %s", test.name, err) + } + for _, claim := range claims { + for _, ref := range claim.GetOwnerReferences() { + if strings.HasPrefix(ref.Name, "foo-") { + t.Fatalf("Unexpected pod reference on %s: %v", claim.Name, claim.GetOwnerReferences()) + } + } + } + replicas := *set.Spec.Replicas *set.Spec.Replicas = 0 - if err := scaleDownStatefulSetControl(set, ssc, spc, test.invariants); err != nil { + if err := scaleDownStatefulSetControl(set, ssc, om, test.invariants); err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("%s: %s", test.name, err) } *set.Spec.Replicas = replicas - if err := scaleUpStatefulSetControl(set, ssc, spc, test.invariants); err != nil { + + claims, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).List(selector) + if err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + for _, claim := range claims { + for _, ref := range claim.GetOwnerReferences() { + if strings.HasPrefix(ref.Name, "foo-") { + t.Fatalf("Unexpected pod reference on %s: %v", claim.Name, claim.GetOwnerReferences()) + } + } + } + + if err := scaleUpStatefulSetControl(set, ssc, om, test.invariants); err != nil { + t.Fatalf("%s: %s", test.name, err) + } + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("%s: %s", test.name, err) } - pods, err = spc.podsLister.Pods(set.Namespace).List(selector) + pods, err = om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } @@ -1098,24 +1252,27 @@ func TestStatefulSetControlOnDeleteUpdate(t *testing.T) { }, }, } - for i := range tests { - testFn(&tests[i], t) - } + runTestOverPVCRetentionPolicies(t, "", func(t *testing.T, policy *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy) { + for i := range tests { + testFn(t, &tests[i], policy) + } + }) } func TestStatefulSetControlRollingUpdateWithPaused(t *testing.T) { type testcase struct { name string paused bool - invariants func(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodControl) error + invariants func(set *appsv1beta1.StatefulSet, om *fakeObjectManager) error initial func() *appsv1beta1.StatefulSet update func(set *appsv1beta1.StatefulSet) *appsv1beta1.StatefulSet validate func(set *appsv1beta1.StatefulSet, pods []*v1.Pod) error } - testFn := func(test *testcase, t *testing.T) { + testFn := func(t *testing.T, test *testcase, policy *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy) { set := test.initial() var partition int32 + set.Spec.PersistentVolumeClaimRetentionPolicy = policy set.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{ Type: apps.RollingUpdateStatefulSetStrategyType, RollingUpdate: func() *appsv1beta1.RollingUpdateStatefulSetStrategy { @@ -1127,28 +1284,28 @@ func TestStatefulSetControlRollingUpdateWithPaused(t *testing.T) { } client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) - if err := scaleUpStatefulSetControl(set, ssc, spc, test.invariants); err != nil { + if err := scaleUpStatefulSetControl(set, ssc, om, test.invariants); err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err := spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err := om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("%s: %s", test.name, err) } set = test.update(set) - if err := updateStatefulSetControl(set, ssc, spc, assertUpdateInvariants); err != nil { + if err := updateStatefulSetControl(set, ssc, om, assertUpdateInvariants); err != nil { t.Fatalf("%s: %s", test.name, err) } selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("%s: %s", test.name, err) } @@ -1295,24 +1452,26 @@ func TestStatefulSetControlRollingUpdateWithPaused(t *testing.T) { }, }, } - for i := range tests { - testFn(&tests[i], t) - } + runTestOverPVCRetentionPolicies(t, "", func(t *testing.T, policy *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy) { + for i := range tests { + testFn(t, &tests[i], policy) + } + }) } func TestScaleUpStatefulSetWithMinReadySeconds(t *testing.T) { type testcase struct { name string minReadySeconds int32 - invariants func(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodControl) error + invariants func(set *appsv1beta1.StatefulSet, om *fakeObjectManager) error initial func() *appsv1beta1.StatefulSet - updatePod func(spc *fakeStatefulPodControl, set *appsv1beta1.StatefulSet, pods []*v1.Pod) error + updatePod func(om *fakeObjectManager, set *appsv1beta1.StatefulSet, pods []*v1.Pod) error validate func(set *appsv1beta1.StatefulSet, pods []*v1.Pod) error } - readyPods := func(partition, pauseSecond int) func(spc *fakeStatefulPodControl, set *appsv1beta1.StatefulSet, + readyPods := func(partition, pauseSecond int) func(om *fakeObjectManager, set *appsv1beta1.StatefulSet, pods []*v1.Pod) error { - return func(spc *fakeStatefulPodControl, set *appsv1beta1.StatefulSet, pods []*v1.Pod) error { + return func(om *fakeObjectManager, set *appsv1beta1.StatefulSet, pods []*v1.Pod) error { sort.Sort(ascendingOrdinal(pods)) for i := 0; i < partition; i++ { pod := pods[i].DeepCopy() @@ -1320,7 +1479,7 @@ func TestScaleUpStatefulSetWithMinReadySeconds(t *testing.T) { condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} podutil.UpdatePodCondition(&pod.Status, &condition) fakeResourceVersion(pod) - if err := spc.podsIndexer.Update(pod); err != nil { + if err := om.podsIndexer.Update(pod); err != nil { return err } } @@ -1495,16 +1654,16 @@ func TestUpdateStatefulSetWithMinReadySeconds(t *testing.T) { minReadySeconds int32 maxUnavailable intstr.IntOrString partition int - updatePod func(spc *fakeStatefulPodControl, set *appsv1beta1.StatefulSet, pods []*v1.Pod) error + updatePod func(om *fakeObjectManager, set *appsv1beta1.StatefulSet, pods []*v1.Pod) error validate func(set *appsv1beta1.StatefulSet, pods []*v1.Pod) error } const setSize = 5 //originalImage := newStatefulSet(1).Spec.Template.Spec.Containers[0].Image newImage := "foo" - readyPods := func(partition, pauseSecond int) func(spc *fakeStatefulPodControl, set *appsv1beta1.StatefulSet, + readyPods := func(partition, pauseSecond int) func(om *fakeObjectManager, set *appsv1beta1.StatefulSet, pods []*v1.Pod) error { - return func(spc *fakeStatefulPodControl, set *appsv1beta1.StatefulSet, pods []*v1.Pod) error { + return func(om *fakeObjectManager, set *appsv1beta1.StatefulSet, pods []*v1.Pod) error { sort.Sort(ascendingOrdinal(pods)) for i := setSize - 1; i >= partition; i-- { pod := pods[i].DeepCopy() @@ -1512,7 +1671,7 @@ func TestUpdateStatefulSetWithMinReadySeconds(t *testing.T) { condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} podutil.UpdatePodCondition(&pod.Status, &condition) fakeResourceVersion(pod) - if err := spc.podsIndexer.Update(pod); err != nil { + if err := om.podsIndexer.Update(pod); err != nil { return err } } @@ -1678,7 +1837,7 @@ func TestStatefulSetControlRollingUpdateWithPartition(t *testing.T) { type testcase struct { name string partition int32 - invariants func(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodControl) error + invariants func(set *appsv1beta1.StatefulSet, om *fakeObjectManager) error initial func() *appsv1beta1.StatefulSet update func(set *appsv1beta1.StatefulSet) *appsv1beta1.StatefulSet validate func(set *appsv1beta1.StatefulSet, pods []*v1.Pod) error @@ -2245,40 +2404,80 @@ func TestStatefulSetControlLifecycleHook(t *testing.T) { } } +func TestStatefulSetHonorRevisionHistoryLimit(t *testing.T) { + runTestOverPVCRetentionPolicies(t, "", func(t *testing.T, policy *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy) { + invariants := assertMonotonicInvariants + set := newStatefulSet(3) + set.Spec.PersistentVolumeClaimRetentionPolicy = policy + client := fake.NewSimpleClientset() + kruiseClient := kruisefake.NewSimpleClientset(set) + om, ssu, ssc, stop := setupController(client, kruiseClient) + defer close(stop) + + if err := scaleUpStatefulSetControl(set, ssc, om, invariants); err != nil { + t.Errorf("Failed to turn up StatefulSet : %s", err) + } + var err error + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) + if err != nil { + t.Fatalf("Error getting updated StatefulSet: %v", err) + } + + for i := 0; i < int(*set.Spec.RevisionHistoryLimit)+5; i++ { + set.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("foo-%d", i) + ssu.SetUpdateStatefulSetStatusError(apierrors.NewInternalError(errors.New("API server failed")), 2) + updateStatefulSetControl(set, ssc, om, assertUpdateInvariants) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) + if err != nil { + t.Fatalf("Error getting updated StatefulSet: %v", err) + } + revisions, err := ssc.ListRevisions(set) + if err != nil { + t.Fatalf("Error listing revisions: %v", err) + } + // the extra 2 revisions are `currentRevision` and `updateRevision` + // They're considered as `live`, and truncateHistory only cleans up non-live revisions + if len(revisions) > int(*set.Spec.RevisionHistoryLimit)+2 { + t.Fatalf("%s: %d greater than limit %d", "", len(revisions), *set.Spec.RevisionHistoryLimit) + } + } + }) +} + func TestStatefulSetControlLimitsHistory(t *testing.T) { type testcase struct { name string - invariants func(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodControl) error + invariants func(set *appsv1beta1.StatefulSet, om *fakeObjectManager) error initial func() *appsv1beta1.StatefulSet } - testFn := func(test *testcase, t *testing.T) { + testFn := func(t *testing.T, test *testcase) { set := test.initial() client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) - if err := scaleUpStatefulSetControl(set, ssc, spc, test.invariants); err != nil { + if err := scaleUpStatefulSetControl(set, ssc, om, test.invariants); err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err := spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err := om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("%s: %s", test.name, err) } for i := 0; i < 10; i++ { set.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("foo-%d", i) - if err := updateStatefulSetControl(set, ssc, spc, assertUpdateInvariants); err != nil { + if err := updateStatefulSetControl(set, ssc, om, assertUpdateInvariants); err != nil { t.Fatalf("%s: %s", test.name, err) } selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("%s: %s", test.name, err) } @@ -2313,14 +2512,14 @@ func TestStatefulSetControlLimitsHistory(t *testing.T) { }, } for i := range tests { - testFn(&tests[i], t) + testFn(t, &tests[i]) } } func TestStatefulSetControlRollback(t *testing.T) { type testcase struct { name string - invariants func(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodControl) error + invariants func(set *appsv1beta1.StatefulSet, om *fakeObjectManager) error initial func() *appsv1beta1.StatefulSet update func(set *appsv1beta1.StatefulSet) *appsv1beta1.StatefulSet validateUpdate func(set *appsv1beta1.StatefulSet, pods []*v1.Pod) error @@ -2333,28 +2532,28 @@ func TestStatefulSetControlRollback(t *testing.T) { set := test.initial() client := fake.NewSimpleClientset() kruiseClient := kruisefake.NewSimpleClientset(set) - spc, _, ssc, stop := setupController(client, kruiseClient) + om, _, ssc, stop := setupController(client, kruiseClient) defer close(stop) - if err := scaleUpStatefulSetControl(set, ssc, spc, test.invariants); err != nil { + if err := scaleUpStatefulSetControl(set, ssc, om, test.invariants); err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err := spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err := om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("%s: %s", test.name, err) } set = test.update(set) - if err := updateStatefulSetControl(set, ssc, spc, assertUpdateInvariants); err != nil { + if err := updateStatefulSetControl(set, ssc, om, assertUpdateInvariants); err != nil { t.Fatalf("%s: %s", test.name, err) } selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("%s: %s", test.name, err) } @@ -2370,17 +2569,17 @@ func TestStatefulSetControlRollback(t *testing.T) { if err != nil { t.Fatalf("%s: %s", test.name, err) } - if err := updateStatefulSetControl(set, ssc, spc, assertUpdateInvariants); err != nil { + if err := updateStatefulSetControl(set, ssc, om, assertUpdateInvariants); err != nil { t.Fatalf("%s: %s", test.name, err) } if err != nil { t.Fatalf("%s: %s", test.name, err) } - pods, err = spc.podsLister.Pods(set.Namespace).List(selector) + pods, err = om.podsLister.Pods(set.Namespace).List(selector) if err != nil { t.Fatalf("%s: %s", test.name, err) } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { t.Fatalf("%s: %s", test.name, err) } @@ -2593,55 +2792,114 @@ func (rt *requestTracker) reset() { rt.after = 0 } -type fakeStatefulPodControl struct { - podsLister corelisters.PodLister - claimsLister corelisters.PersistentVolumeClaimLister - setsLister kruiseappslisters.StatefulSetLister - podsIndexer cache.Indexer - claimsIndexer cache.Indexer - setsIndexer cache.Indexer - createPodTracker requestTracker - updatePodTracker requestTracker - inPlaceUpdatePodTracker requestTracker - deletePodTracker requestTracker +type fakeObjectManager struct { + podsLister corelisters.PodLister + claimsLister corelisters.PersistentVolumeClaimLister + setsLister kruiseappslisters.StatefulSetLister + podsIndexer cache.Indexer + claimsIndexer cache.Indexer + setsIndexer cache.Indexer + revisionsIndexer cache.Indexer + createPodTracker requestTracker + updatePodTracker requestTracker + deletePodTracker requestTracker } -func newFakeStatefulPodControl(podInformer coreinformers.PodInformer, setInformer kruiseappsinformers.StatefulSetInformer) *fakeStatefulPodControl { - claimsIndexer := cache.NewIndexer(controller.KeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - return &fakeStatefulPodControl{ +func newFakeObjectManager(informerFactory informers.SharedInformerFactory, kruiseInformerFactory kruiseinformers.SharedInformerFactory) *fakeObjectManager { + podInformer := informerFactory.Core().V1().Pods() + claimInformer := informerFactory.Core().V1().PersistentVolumeClaims() + revisionInformer := informerFactory.Apps().V1().ControllerRevisions() + setInformer := kruiseInformerFactory.Apps().V1beta1().StatefulSets() + + return &fakeObjectManager{ podInformer.Lister(), - corelisters.NewPersistentVolumeClaimLister(claimsIndexer), + claimInformer.Lister(), setInformer.Lister(), podInformer.Informer().GetIndexer(), - claimsIndexer, + claimInformer.Informer().GetIndexer(), setInformer.Informer().GetIndexer(), - requestTracker{0, nil, 0}, + revisionInformer.Informer().GetIndexer(), requestTracker{0, nil, 0}, requestTracker{0, nil, 0}, requestTracker{0, nil, 0}} } -func (spc *fakeStatefulPodControl) SetCreateStatefulPodError(err error, after int) { - spc.createPodTracker.err = err - spc.createPodTracker.after = after +func (om *fakeObjectManager) CreatePod(pod *v1.Pod) error { + defer om.createPodTracker.inc() + if om.createPodTracker.errorReady() { + defer om.createPodTracker.reset() + return om.createPodTracker.err + } + pod.SetUID(types.UID(pod.Name + "-uid")) + return om.podsIndexer.Update(pod) } -func (spc *fakeStatefulPodControl) SetUpdateStatefulPodError(err error, after int) { - spc.updatePodTracker.err = err - spc.updatePodTracker.after = after +func (om *fakeObjectManager) GetPod(namespace, podName string) (*v1.Pod, error) { + return om.podsLister.Pods(namespace).Get(podName) } -func (spc *fakeStatefulPodControl) SetDeleteStatefulPodError(err error, after int) { - spc.deletePodTracker.err = err - spc.deletePodTracker.after = after +func (om *fakeObjectManager) UpdatePod(pod *v1.Pod) error { + return om.podsIndexer.Update(pod) } -func (spc *fakeStatefulPodControl) setPodPending(set *appsv1beta1.StatefulSet, ordinal int) ([]*v1.Pod, error) { +func (om *fakeObjectManager) DeletePod(pod *v1.Pod) error { + defer om.deletePodTracker.inc() + if om.deletePodTracker.errorReady() { + defer om.deletePodTracker.reset() + return om.deletePodTracker.err + } + if key, err := controller.KeyFunc(pod); err != nil { + return err + } else if obj, found, err := om.podsIndexer.GetByKey(key); err != nil { + return err + } else if found { + return om.podsIndexer.Delete(obj) + } + return nil // Not found, no error in deleting. +} + +func (om *fakeObjectManager) CreateClaim(claim *v1.PersistentVolumeClaim) error { + om.claimsIndexer.Update(claim) + return nil +} + +func (om *fakeObjectManager) GetClaim(namespace, claimName string) (*v1.PersistentVolumeClaim, error) { + return om.claimsLister.PersistentVolumeClaims(namespace).Get(claimName) +} + +func (om *fakeObjectManager) UpdateClaim(claim *v1.PersistentVolumeClaim) error { + // Validate ownerRefs. + refs := claim.GetOwnerReferences() + for _, ref := range refs { + if ref.APIVersion == "" || ref.Kind == "" || ref.Name == "" { + return fmt.Errorf("invalid ownerRefs: %s %v", claim.Name, refs) + } + } + om.claimsIndexer.Update(claim) + return nil +} + +func (om *fakeObjectManager) SetCreateStatefulPodError(err error, after int) { + om.createPodTracker.err = err + om.createPodTracker.after = after +} + +func (om *fakeObjectManager) SetUpdateStatefulPodError(err error, after int) { + om.updatePodTracker.err = err + om.updatePodTracker.after = after +} + +func (om *fakeObjectManager) SetDeleteStatefulPodError(err error, after int) { + om.deletePodTracker.err = err + om.deletePodTracker.after = after +} + +func (om *fakeObjectManager) setPodPending(set *appsv1beta1.StatefulSet, ordinal int) ([]*v1.Pod, error) { selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { return nil, err } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return nil, err } @@ -2652,16 +2910,16 @@ func (spc *fakeStatefulPodControl) setPodPending(set *appsv1beta1.StatefulSet, o pod := pods[ordinal].DeepCopy() pod.Status.Phase = v1.PodPending fakeResourceVersion(pod) - spc.podsIndexer.Update(pod) - return spc.podsLister.Pods(set.Namespace).List(selector) + om.podsIndexer.Update(pod) + return om.podsLister.Pods(set.Namespace).List(selector) } -func (spc *fakeStatefulPodControl) setPodRunning(set *appsv1beta1.StatefulSet, ordinal int) ([]*v1.Pod, error) { +func (om *fakeObjectManager) setPodRunning(set *appsv1beta1.StatefulSet, ordinal int) ([]*v1.Pod, error) { selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { return nil, err } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return nil, err } @@ -2672,16 +2930,16 @@ func (spc *fakeStatefulPodControl) setPodRunning(set *appsv1beta1.StatefulSet, o pod := pods[ordinal].DeepCopy() pod.Status.Phase = v1.PodRunning fakeResourceVersion(pod) - spc.podsIndexer.Update(pod) - return spc.podsLister.Pods(set.Namespace).List(selector) + om.podsIndexer.Update(pod) + return om.podsLister.Pods(set.Namespace).List(selector) } -func (spc *fakeStatefulPodControl) setPodReady(set *appsv1beta1.StatefulSet, ordinal int) ([]*v1.Pod, error) { +func (om *fakeObjectManager) setPodReady(set *appsv1beta1.StatefulSet, ordinal int) ([]*v1.Pod, error) { selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { return nil, err } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return nil, err } @@ -2693,90 +2951,74 @@ func (spc *fakeStatefulPodControl) setPodReady(set *appsv1beta1.StatefulSet, ord condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} podutil.UpdatePodCondition(&pod.Status, &condition) fakeResourceVersion(pod) - spc.podsIndexer.Update(pod) - return spc.podsLister.Pods(set.Namespace).List(selector) + om.podsIndexer.Update(pod) + return om.podsLister.Pods(set.Namespace).List(selector) +} + +func (om *fakeObjectManager) setPodAvailable(set *appsv1beta1.StatefulSet, ordinal int, lastTransitionTime time.Time) ([]*v1.Pod, error) { + selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) + if err != nil { + return nil, err + } + pods, err := om.podsLister.Pods(set.Namespace).List(selector) + if err != nil { + return nil, err + } + if 0 > ordinal || ordinal >= len(pods) { + return nil, fmt.Errorf("ordinal %d out of range [0,%d)", ordinal, len(pods)) + } + sort.Sort(ascendingOrdinal(pods)) + pod := pods[ordinal].DeepCopy() + condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: metav1.Time{Time: lastTransitionTime}} + _, existingCondition := podutil.GetPodCondition(&pod.Status, condition.Type) + if existingCondition != nil { + existingCondition.Status = v1.ConditionTrue + existingCondition.LastTransitionTime = metav1.Time{Time: lastTransitionTime} + } else { + existingCondition = &v1.PodCondition{ + Type: v1.PodReady, + Status: v1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: lastTransitionTime}, + } + pod.Status.Conditions = append(pod.Status.Conditions, *existingCondition) + } + podutil.UpdatePodCondition(&pod.Status, &condition) + fakeResourceVersion(pod) + om.podsIndexer.Update(pod) + return om.podsLister.Pods(set.Namespace).List(selector) } -func (spc *fakeStatefulPodControl) addTerminatingPod(set *appsv1beta1.StatefulSet, ordinal int) ([]*v1.Pod, error) { +func (om *fakeObjectManager) addTerminatingPod(set *appsv1beta1.StatefulSet, ordinal int) ([]*v1.Pod, error) { pod := newStatefulSetPod(set, ordinal) + pod.SetUID(types.UID(pod.Name + "-uid")) // To match fakeObjectManager.CreatePod pod.Status.Phase = v1.PodRunning deleted := metav1.NewTime(time.Now()) pod.DeletionTimestamp = &deleted condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} fakeResourceVersion(pod) podutil.UpdatePodCondition(&pod.Status, &condition) - spc.podsIndexer.Update(pod) + om.podsIndexer.Update(pod) selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { return nil, err } - return spc.podsLister.Pods(set.Namespace).List(selector) + return om.podsLister.Pods(set.Namespace).List(selector) } -func (spc *fakeStatefulPodControl) setPodTerminated(set *appsv1beta1.StatefulSet, ordinal int) ([]*v1.Pod, error) { +func (om *fakeObjectManager) setPodTerminated(set *appsv1beta1.StatefulSet, ordinal int) ([]*v1.Pod, error) { pod := newStatefulSetPod(set, ordinal) deleted := metav1.NewTime(time.Now()) pod.DeletionTimestamp = &deleted fakeResourceVersion(pod) - spc.podsIndexer.Update(pod) + om.podsIndexer.Update(pod) selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { return nil, err } - return spc.podsLister.Pods(set.Namespace).List(selector) + return om.podsLister.Pods(set.Namespace).List(selector) } -func (spc *fakeStatefulPodControl) CreateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error { - defer spc.createPodTracker.inc() - if spc.createPodTracker.errorReady() { - defer spc.createPodTracker.reset() - return spc.createPodTracker.err - } - - for _, claim := range getPersistentVolumeClaims(set, pod) { - spc.claimsIndexer.Update(&claim) - } - spc.podsIndexer.Update(pod) - return nil -} - -func (spc *fakeStatefulPodControl) UpdateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error { - defer spc.updatePodTracker.inc() - if spc.updatePodTracker.errorReady() { - defer spc.updatePodTracker.reset() - return spc.updatePodTracker.err - } - if !identityMatches(set, pod) { - updateIdentity(set, pod) - } - if !storageMatches(set, pod) { - updateStorage(set, pod) - for _, claim := range getPersistentVolumeClaims(set, pod) { - spc.claimsIndexer.Update(&claim) - } - } - spc.podsIndexer.Update(pod) - return nil -} - -func (spc *fakeStatefulPodControl) DeleteStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error { - defer spc.deletePodTracker.inc() - if spc.deletePodTracker.errorReady() { - defer spc.deletePodTracker.reset() - return spc.deletePodTracker.err - } - if key, err := controller.KeyFunc(pod); err != nil { - return err - } else if obj, found, err := spc.podsIndexer.GetByKey(key); err != nil { - return err - } else if found { - spc.podsIndexer.Delete(obj) - } - - return nil -} - -var _ StatefulPodControlInterface = &fakeStatefulPodControl{} +var _ StatefulPodControlObjectManager = &fakeObjectManager{} type fakeStatefulSetStatusUpdater struct { setsLister kruiseappslisters.StatefulSetLister @@ -2810,12 +3052,12 @@ func (ssu *fakeStatefulSetStatusUpdater) SetUpdateStatefulSetStatusError(err err var _ StatusUpdaterInterface = &fakeStatefulSetStatusUpdater{} -func assertMonotonicInvariants(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodControl) error { +func assertMonotonicInvariants(set *appsv1beta1.StatefulSet, om *fakeObjectManager) error { selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { return err } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return err } @@ -2834,13 +3076,10 @@ func assertMonotonicInvariants(set *appsv1beta1.StatefulSet, spc *fakeStatefulPo } for _, claim := range getPersistentVolumeClaims(set, pods[ord]) { - claim, err := spc.claimsLister.PersistentVolumeClaims(set.Namespace).Get(claim.Name) - if err != nil { + claim, _ := om.claimsLister.PersistentVolumeClaims(set.Namespace).Get(claim.Name) + if err := checkClaimInvarients(set, pods[ord], claim, ord); err != nil { return err } - if claim == nil { - return fmt.Errorf("claim %s for Pod %s was not created", claim.Name, pods[ord].Name) - } } if !identityMatches(set, pods[ord]) { @@ -2850,12 +3089,12 @@ func assertMonotonicInvariants(set *appsv1beta1.StatefulSet, spc *fakeStatefulPo return nil } -func assertBurstInvariants(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodControl) error { +func assertBurstInvariants(set *appsv1beta1.StatefulSet, om *fakeObjectManager) error { selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { return err } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return err } @@ -2866,12 +3105,12 @@ func assertBurstInvariants(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodCon } for _, claim := range getPersistentVolumeClaims(set, pods[ord]) { - claim, err := spc.claimsLister.PersistentVolumeClaims(set.Namespace).Get(claim.Name) + claim, err := om.claimsLister.PersistentVolumeClaims(set.Namespace).Get(claim.Name) if err != nil { return err } - if claim == nil { - return fmt.Errorf("claim %s for Pod %s was not created", claim.Name, pods[ord].Name) + if err := checkClaimInvarients(set, pods[ord], claim, ord); err != nil { + return err } } @@ -2884,12 +3123,12 @@ func assertBurstInvariants(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodCon return nil } -func assertUpdateInvariants(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodControl) error { +func assertUpdateInvariants(set *appsv1beta1.StatefulSet, om *fakeObjectManager) error { selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { return err } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return err } @@ -2901,12 +3140,12 @@ func assertUpdateInvariants(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodCo } for _, claim := range getPersistentVolumeClaims(set, pods[ord]) { - claim, err := spc.claimsLister.PersistentVolumeClaims(set.Namespace).Get(claim.Name) + claim, err := om.claimsLister.PersistentVolumeClaims(set.Namespace).Get(claim.Name) if err != nil { return err } - if claim == nil { - return fmt.Errorf("claim %s for Pod %s was not created", claim.Name, pods[ord].Name) + if err := checkClaimInvarients(set, pods[ord], claim, ord); err != nil { + return err } } @@ -2932,6 +3171,68 @@ func assertUpdateInvariants(set *appsv1beta1.StatefulSet, spc *fakeStatefulPodCo return nil } +func checkClaimInvarients(set *appsv1beta1.StatefulSet, pod *v1.Pod, claim *v1.PersistentVolumeClaim, ordinal int) error { + policy := appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + } + if set.Spec.PersistentVolumeClaimRetentionPolicy != nil && utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) { + policy = *set.Spec.PersistentVolumeClaimRetentionPolicy + } + claimShouldBeRetained := policy.WhenScaled == appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType + if claim == nil { + if claimShouldBeRetained { + return fmt.Errorf("claim for Pod %s was not created", pod.Name) + } + return nil // A non-retained claim has no invariants to satisfy. + } + + if pod.Status.Phase != v1.PodRunning || !podutil.IsPodReady(pod) { + // The pod has spun up yet, we do not expect the owner refs on the claim to have been set. + return nil + } + + const retain = appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType + const delete = appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType + switch { + case policy.WhenScaled == retain && policy.WhenDeleted == retain: + if hasOwnerRef(claim, set) { + return fmt.Errorf("claim %s has unexpected owner ref on %s for StatefulSet retain", claim.Name, set.Name) + } + if hasOwnerRef(claim, pod) { + return fmt.Errorf("claim %s has unexpected owner ref on pod %s for StatefulSet retain", claim.Name, pod.Name) + } + case policy.WhenScaled == retain && policy.WhenDeleted == delete: + if !hasOwnerRef(claim, set) { + return fmt.Errorf("claim %s does not have owner ref on %s for StatefulSet deletion", claim.Name, set.Name) + } + if hasOwnerRef(claim, pod) { + return fmt.Errorf("claim %s has unexpected owner ref on pod %s for StatefulSet deletion", claim.Name, pod.Name) + } + case policy.WhenScaled == delete && policy.WhenDeleted == retain: + if hasOwnerRef(claim, set) { + return fmt.Errorf("claim %s has unexpected owner ref on %s for scaledown only", claim.Name, set.Name) + } + if ordinal >= int(*set.Spec.Replicas) && !hasOwnerRef(claim, pod) { + return fmt.Errorf("claim %s does not have owner ref on condemned pod %s for scaledown delete", claim.Name, pod.Name) + } + if ordinal < int(*set.Spec.Replicas) && hasOwnerRef(claim, pod) { + return fmt.Errorf("claim %s has unexpected owner ref on condemned pod %s for scaledown delete", claim.Name, pod.Name) + } + case policy.WhenScaled == delete && policy.WhenDeleted == delete: + if ordinal >= int(*set.Spec.Replicas) { + if !hasOwnerRef(claim, pod) || hasOwnerRef(claim, set) { + return fmt.Errorf("condemned claim %s has bad owner refs: %v", claim.Name, claim.GetOwnerReferences()) + } + } else { + if hasOwnerRef(claim, pod) || !hasOwnerRef(claim, set) { + return fmt.Errorf("live claim %s has bad owner refs: %v", claim.Name, claim.GetOwnerReferences()) + } + } + } + return nil +} + func fakeResourceVersion(object interface{}) { obj, isObj := object.(metav1.Object) if !isObj { @@ -2945,33 +3246,29 @@ func fakeResourceVersion(object interface{}) { } func scaleUpStatefulSetControl(set *appsv1beta1.StatefulSet, - ssc ControlInterface, - spc *fakeStatefulPodControl, + ssc StatefulSetControlInterface, + om *fakeObjectManager, invariants invariantFunc) error { selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { return err } for set.Status.ReadyReplicas < *set.Spec.Replicas { - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return err } sort.Sort(ascendingOrdinal(pods)) // ensure all pods are valid (have a phase) - initialized := false for ord, pod := range pods { if pod.Status.Phase == "" { - if pods, err = spc.setPodPending(set, ord); err != nil { + if pods, err = om.setPodPending(set, ord); err != nil { return err } break } } - if initialized { - continue - } // select one of the pods and move it forward in status if len(pods) > 0 { @@ -2979,11 +3276,11 @@ func scaleUpStatefulSetControl(set *appsv1beta1.StatefulSet, pod := pods[ord] switch pod.Status.Phase { case v1.PodPending: - if pods, err = spc.setPodRunning(set, ord); err != nil { + if pods, err = om.setPodRunning(set, ord); err != nil { return err } case v1.PodRunning: - if pods, err = spc.setPodReady(set, ord); err != nil { + if pods, err = om.setPodReady(set, ord); err != nil { return err } default: @@ -2995,24 +3292,25 @@ func scaleUpStatefulSetControl(set *appsv1beta1.StatefulSet, if err = ssc.UpdateStatefulSet(set, pods); err != nil { return err } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { return err } - if err := invariants(set, spc); err != nil { + if err := invariants(set, om); err != nil { return err } } - return invariants(set, spc) + return invariants(set, om) } -func scaleDownStatefulSetControl(set *appsv1beta1.StatefulSet, ssc ControlInterface, spc *fakeStatefulPodControl, invariants invariantFunc) error { +func scaleDownStatefulSetControl(set *appsv1beta1.StatefulSet, ssc StatefulSetControlInterface, om *fakeObjectManager, invariants invariantFunc) error { selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { return err } + for set.Status.Replicas > *set.Spec.Replicas { - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return err } @@ -3021,42 +3319,72 @@ func scaleDownStatefulSetControl(set *appsv1beta1.StatefulSet, ssc ControlInterf if err := ssc.UpdateStatefulSet(set, pods); err != nil { return err } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { return err } - if pods, err = spc.addTerminatingPod(set, ordinal); err != nil { + if pods, err = om.addTerminatingPod(set, ordinal); err != nil { return err } if err = ssc.UpdateStatefulSet(set, pods); err != nil { return err } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { return err } - pods, err = spc.podsLister.Pods(set.Namespace).List(selector) + pods, err = om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return err } sort.Sort(ascendingOrdinal(pods)) if len(pods) > 0 { - spc.podsIndexer.Delete(pods[len(pods)-1]) + om.podsIndexer.Delete(pods[len(pods)-1]) } } if err := ssc.UpdateStatefulSet(set, pods); err != nil { return err } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { return err } - if err := invariants(set, spc); err != nil { + + if err := invariants(set, om); err != nil { return err } } - return invariants(set, spc) + // If there are claims with ownerRefs on pods that have been deleted, delete them. + pods, err := om.podsLister.Pods(set.Namespace).List(selector) + if err != nil { + return err + } + currentPods := map[string]bool{} + for _, pod := range pods { + currentPods[pod.Name] = true + } + claims, err := om.claimsLister.PersistentVolumeClaims(set.Namespace).List(selector) + if err != nil { + return err + } + for _, claim := range claims { + claimPodName := getClaimPodName(set, claim) + if claimPodName == "" { + continue // Skip claims not related to a stateful set pod. + } + if _, found := currentPods[claimPodName]; found { + continue // Skip claims which still have a current pod. + } + for _, refs := range claim.GetOwnerReferences() { + if refs.Name == claimPodName { + om.claimsIndexer.Delete(claim) + break + } + } + } + + return invariants(set, om) } func updateComplete(set *appsv1beta1.StatefulSet, pods []*v1.Pod) bool { @@ -3100,14 +3428,14 @@ func updateComplete(set *appsv1beta1.StatefulSet, pods []*v1.Pod) bool { } func updateStatefulSetControl(set *appsv1beta1.StatefulSet, - ssc ControlInterface, - spc *fakeStatefulPodControl, + ssc StatefulSetControlInterface, + om *fakeObjectManager, invariants invariantFunc) error { selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { return err } - pods, err := spc.podsLister.Pods(set.Namespace).List(selector) + pods, err := om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return err } @@ -3115,16 +3443,16 @@ func updateStatefulSetControl(set *appsv1beta1.StatefulSet, return err } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { return err } - pods, err = spc.podsLister.Pods(set.Namespace).List(selector) + pods, err = om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return err } for !updateComplete(set, pods) { - pods, err = spc.podsLister.Pods(set.Namespace).List(selector) + pods, err = om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return err } @@ -3132,7 +3460,7 @@ func updateStatefulSetControl(set *appsv1beta1.StatefulSet, initialized := false for ord, pod := range pods { if pod.Status.Phase == "" { - if pods, err = spc.setPodPending(set, ord); err != nil { + if pods, err = om.setPodPending(set, ord); err != nil { return err } break @@ -3147,11 +3475,11 @@ func updateStatefulSetControl(set *appsv1beta1.StatefulSet, pod := pods[ord] switch pod.Status.Phase { case v1.PodPending: - if pods, err = spc.setPodRunning(set, ord); err != nil { + if pods, err = om.setPodRunning(set, ord); err != nil { return err } case v1.PodRunning: - if pods, err = spc.setPodReady(set, ord); err != nil { + if pods, err = om.setPodReady(set, ord); err != nil { return err } default: @@ -3162,20 +3490,20 @@ func updateStatefulSetControl(set *appsv1beta1.StatefulSet, if err = ssc.UpdateStatefulSet(set, pods); err != nil { return err } - set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name) + set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { return err } - if err := invariants(set, spc); err != nil { + if err := invariants(set, om); err != nil { return err } - pods, err = spc.podsLister.Pods(set.Namespace).List(selector) + pods, err = om.podsLister.Pods(set.Namespace).List(selector) if err != nil { return err } } - return invariants(set, spc) + return invariants(set, om) } func newRevisionOrDie(set *appsv1beta1.StatefulSet, revision int64) *apps.ControllerRevision { @@ -3249,3 +3577,8 @@ func TestScaleUpWithMaxUnavailable(t *testing.T) { t.Fatalf("Expect status replicas=3, got %v", set.Status.Replicas) } } + +func isOrHasInternalError(err error) bool { + agg, ok := err.(utilerrors.Aggregate) + return !ok && !apierrors.IsInternalError(err) || ok && len(agg.Errors()) > 0 && !apierrors.IsInternalError(agg.Errors()[0]) +} diff --git a/pkg/controller/statefulset/stateful_set_status_updater.go b/pkg/controller/statefulset/stateful_set_status_updater.go index 89d7741087..e2b86a25ff 100644 --- a/pkg/controller/statefulset/stateful_set_status_updater.go +++ b/pkg/controller/statefulset/stateful_set_status_updater.go @@ -21,12 +21,13 @@ import ( "context" "fmt" - appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" - clientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" - appslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/util/retry" + + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" + clientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" + appslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1" ) // StatusUpdaterInterface is an interface used to update the StatefulSetStatus associated with a StatefulSet. diff --git a/pkg/controller/statefulset/stateful_set_status_updater_test.go b/pkg/controller/statefulset/stateful_set_status_updater_test.go index a0fbe28fd2..ae5b445e0e 100644 --- a/pkg/controller/statefulset/stateful_set_status_updater_test.go +++ b/pkg/controller/statefulset/stateful_set_status_updater_test.go @@ -21,13 +21,14 @@ import ( "errors" "testing" - appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" - "github.com/openkruise/kruise/pkg/client/clientset/versioned/fake" - kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" core "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" + + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" + "github.com/openkruise/kruise/pkg/client/clientset/versioned/fake" + kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1" ) func TestStatefulSetUpdaterUpdatesSetStatus(t *testing.T) { diff --git a/pkg/controller/statefulset/stateful_set_utils.go b/pkg/controller/statefulset/stateful_set_utils.go index 6d80217737..9b7c5ec0e5 100644 --- a/pkg/controller/statefulset/stateful_set_utils.go +++ b/pkg/controller/statefulset/stateful_set_utils.go @@ -189,7 +189,7 @@ func updateClaimOwnerRefForSetAndPod(claim *v1.PersistentVolumeClaim, set *appsv updateMeta := func(tm *metav1.TypeMeta, kind string) { if tm.APIVersion == "" { if kind == "StatefulSet" { - tm.APIVersion = "apps/v1" + tm.APIVersion = "apps.kruise.io/v1beta1" } else { tm.APIVersion = "v1" } diff --git a/pkg/controller/statefulset/stateful_set_utils_test.go b/pkg/controller/statefulset/stateful_set_utils_test.go index 75aac76648..8f9725ddfb 100644 --- a/pkg/controller/statefulset/stateful_set_utils_test.go +++ b/pkg/controller/statefulset/stateful_set_utils_test.go @@ -21,6 +21,7 @@ import ( "fmt" "math/rand" "reflect" + "regexp" "sort" "strconv" "testing" @@ -31,6 +32,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller/history" @@ -39,6 +41,28 @@ import ( appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" ) +// noopRecorder is an EventRecorder that does nothing. record.FakeRecorder has a fixed +// buffer size, which causes tests to hang if that buffer's exceeded. +type noopRecorder struct{} + +func (r *noopRecorder) Event(object runtime.Object, eventtype, reason, message string) {} +func (r *noopRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { +} +func (r *noopRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { +} + +// getClaimPodName gets the name of the Pod associated with the Claim, or an empty string if this doesn't look matching. +func getClaimPodName(set *appsv1beta1.StatefulSet, claim *v1.PersistentVolumeClaim) string { + podName := "" + + statefulClaimRegex := regexp.MustCompile(fmt.Sprintf(".*-(%s-[0-9]+)$", set.Name)) + matches := statefulClaimRegex.FindStringSubmatch(claim.Name) + if len(matches) != 2 { + return podName + } + return matches[1] +} + // overlappingStatefulSets sorts a list of StatefulSets by creation timestamp, using their names as a tie breaker. // Generally used to tie break between StatefulSets that have overlapping selectors. type overlappingStatefulSets []*appsv1beta1.StatefulSet @@ -70,6 +94,28 @@ func TestGetParentNameAndOrdinal(t *testing.T) { } } +func TestGetClaimPodName(t *testing.T) { + set := appsv1beta1.StatefulSet{} + set.Name = "my-set" + claim := v1.PersistentVolumeClaim{} + claim.Name = "volume-my-set-2" + if pod := getClaimPodName(&set, &claim); pod != "my-set-2" { + t.Errorf("Expected my-set-2 found %s", pod) + } + claim.Name = "long-volume-my-set-20" + if pod := getClaimPodName(&set, &claim); pod != "my-set-20" { + t.Errorf("Expected my-set-20 found %s", pod) + } + claim.Name = "volume-2-my-set" + if pod := getClaimPodName(&set, &claim); pod != "" { + t.Errorf("Expected empty string found %s", pod) + } + claim.Name = "volume-pod-2" + if pod := getClaimPodName(&set, &claim); pod != "" { + t.Errorf("Expected empty string found %s", pod) + } +} + func TestIsMemberOf(t *testing.T) { set := newStatefulSet(3) set2 := newStatefulSet(3) @@ -199,6 +245,350 @@ func TestUpdateStorage(t *testing.T) { } } +func TestGetPersistentVolumeClaimRetentionPolicy(t *testing.T) { + retainPolicy := appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + } + scaledownPolicy := appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenScaled: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + } + + set := appsv1beta1.StatefulSet{} + set.Spec.PersistentVolumeClaimRetentionPolicy = &retainPolicy + got := getPersistentVolumeClaimRetentionPolicy(&set) + if got.WhenScaled != appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType || got.WhenDeleted != appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType { + t.Errorf("Expected retain policy") + } + set.Spec.PersistentVolumeClaimRetentionPolicy = &scaledownPolicy + got = getPersistentVolumeClaimRetentionPolicy(&set) + if got.WhenScaled != appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType || got.WhenDeleted != appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType { + t.Errorf("Expected scaledown policy") + } +} + +func TestClaimOwnerMatchesSetAndPod(t *testing.T) { + testCases := []struct { + name string + scaleDownPolicy appsv1beta1.PersistentVolumeClaimRetentionPolicyType + setDeletePolicy appsv1beta1.PersistentVolumeClaimRetentionPolicyType + needsPodRef bool + needsSetRef bool + replicas int32 + ordinal int + }{ + { + name: "retain", + scaleDownPolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + setDeletePolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + needsPodRef: false, + needsSetRef: false, + }, + { + name: "on SS delete", + scaleDownPolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + setDeletePolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + needsPodRef: false, + needsSetRef: true, + }, + { + name: "on scaledown only, condemned", + scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + setDeletePolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + needsPodRef: true, + needsSetRef: false, + replicas: 2, + ordinal: 2, + }, + { + name: "on scaledown only, remains", + scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + setDeletePolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + needsPodRef: false, + needsSetRef: false, + replicas: 2, + ordinal: 1, + }, + { + name: "on both, condemned", + scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + setDeletePolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + needsPodRef: true, + needsSetRef: false, + replicas: 2, + ordinal: 2, + }, + { + name: "on both, remains", + scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + setDeletePolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + needsPodRef: false, + needsSetRef: true, + replicas: 2, + ordinal: 1, + }, + } + + for _, tc := range testCases { + for _, useOtherRefs := range []bool{false, true} { + for _, setPodRef := range []bool{false, true} { + for _, setSetRef := range []bool{false, true} { + claim := v1.PersistentVolumeClaim{} + claim.Name = "target-claim" + pod := v1.Pod{} + pod.Name = fmt.Sprintf("pod-%d", tc.ordinal) + pod.GetObjectMeta().SetUID("pod-123") + set := appsv1beta1.StatefulSet{} + set.Name = "stateful-set" + set.GetObjectMeta().SetUID("ss-456") + set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenScaled: tc.scaleDownPolicy, + WhenDeleted: tc.setDeletePolicy, + } + set.Spec.Replicas = &tc.replicas + if setPodRef { + setOwnerRef(&claim, &pod, &pod.TypeMeta) + } + if setSetRef { + setOwnerRef(&claim, &set, &set.TypeMeta) + } + if useOtherRefs { + randomObject1 := v1.Pod{} + randomObject1.Name = "rand1" + randomObject1.GetObjectMeta().SetUID("rand1-abc") + randomObject2 := v1.Pod{} + randomObject2.Name = "rand2" + randomObject2.GetObjectMeta().SetUID("rand2-def") + setOwnerRef(&claim, &randomObject1, &randomObject1.TypeMeta) + setOwnerRef(&claim, &randomObject2, &randomObject2.TypeMeta) + } + shouldMatch := setPodRef == tc.needsPodRef && setSetRef == tc.needsSetRef + if claimOwnerMatchesSetAndPod(&claim, &set, &pod) != shouldMatch { + t.Errorf("Bad match for %s with pod=%v,set=%v,others=%v", tc.name, setPodRef, setSetRef, useOtherRefs) + } + } + } + } + } +} + +func TestUpdateClaimOwnerRefForSetAndPod(t *testing.T) { + testCases := []struct { + name string + scaleDownPolicy appsv1beta1.PersistentVolumeClaimRetentionPolicyType + setDeletePolicy appsv1beta1.PersistentVolumeClaimRetentionPolicyType + condemned bool + needsPodRef bool + needsSetRef bool + }{ + { + name: "retain", + scaleDownPolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + setDeletePolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + condemned: false, + needsPodRef: false, + needsSetRef: false, + }, + { + name: "delete with set", + scaleDownPolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + setDeletePolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + condemned: false, + needsPodRef: false, + needsSetRef: true, + }, + { + name: "delete with scaledown, not condemned", + scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + setDeletePolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + condemned: false, + needsPodRef: false, + needsSetRef: false, + }, + { + name: "delete on scaledown, condemned", + scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + setDeletePolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + condemned: true, + needsPodRef: true, + needsSetRef: false, + }, + { + name: "delete on both, not condemned", + scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + setDeletePolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + condemned: false, + needsPodRef: false, + needsSetRef: true, + }, + { + name: "delete on both, condemned", + scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + setDeletePolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + condemned: true, + needsPodRef: true, + needsSetRef: false, + }, + } + for _, tc := range testCases { + for _, hasPodRef := range []bool{true, false} { + for _, hasSetRef := range []bool{true, false} { + set := appsv1beta1.StatefulSet{} + set.Name = "ss" + numReplicas := int32(5) + set.Spec.Replicas = &numReplicas + set.SetUID("ss-123") + set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenScaled: tc.scaleDownPolicy, + WhenDeleted: tc.setDeletePolicy, + } + pod := v1.Pod{} + if tc.condemned { + pod.Name = "pod-8" + } else { + pod.Name = "pod-1" + } + pod.SetUID("pod-456") + claim := v1.PersistentVolumeClaim{} + if hasPodRef { + setOwnerRef(&claim, &pod, &pod.TypeMeta) + } + if hasSetRef { + setOwnerRef(&claim, &set, &set.TypeMeta) + } + needsUpdate := hasPodRef != tc.needsPodRef || hasSetRef != tc.needsSetRef + shouldUpdate := updateClaimOwnerRefForSetAndPod(&claim, &set, &pod) + if shouldUpdate != needsUpdate { + t.Errorf("Bad update for %s hasPodRef=%v hasSetRef=%v", tc.name, hasPodRef, hasSetRef) + } + if hasOwnerRef(&claim, &pod) != tc.needsPodRef { + t.Errorf("Bad pod ref for %s hasPodRef=%v hasSetRef=%v", tc.name, hasPodRef, hasSetRef) + } + if hasOwnerRef(&claim, &set) != tc.needsSetRef { + t.Errorf("Bad set ref for %s hasPodRef=%v hasSetRef=%v", tc.name, hasPodRef, hasSetRef) + } + } + } + } +} + +func TestHasOwnerRef(t *testing.T) { + target := v1.Pod{} + target.SetOwnerReferences([]metav1.OwnerReference{ + {UID: "123"}, {UID: "456"}}) + ownerA := v1.Pod{} + ownerA.GetObjectMeta().SetUID("123") + ownerB := v1.Pod{} + ownerB.GetObjectMeta().SetUID("789") + if !hasOwnerRef(&target, &ownerA) { + t.Error("Missing owner") + } + if hasOwnerRef(&target, &ownerB) { + t.Error("Unexpected owner") + } +} + +func TestHasStaleOwnerRef(t *testing.T) { + target := v1.Pod{} + target.SetOwnerReferences([]metav1.OwnerReference{ + {Name: "bob", UID: "123"}, {Name: "shirley", UID: "456"}}) + ownerA := v1.Pod{} + ownerA.SetUID("123") + ownerA.Name = "bob" + ownerB := v1.Pod{} + ownerB.Name = "shirley" + ownerB.SetUID("789") + ownerC := v1.Pod{} + ownerC.Name = "yvonne" + ownerC.SetUID("345") + if hasStaleOwnerRef(&target, &ownerA) { + t.Error("ownerA should not be stale") + } + if !hasStaleOwnerRef(&target, &ownerB) { + t.Error("ownerB should be stale") + } + if hasStaleOwnerRef(&target, &ownerC) { + t.Error("ownerC should not be stale") + } +} + +func TestSetOwnerRef(t *testing.T) { + target := v1.Pod{} + ownerA := v1.Pod{} + ownerA.Name = "A" + ownerA.GetObjectMeta().SetUID("ABC") + if setOwnerRef(&target, &ownerA, &ownerA.TypeMeta) != true { + t.Errorf("Unexpected lack of update") + } + ownerRefs := target.GetObjectMeta().GetOwnerReferences() + if len(ownerRefs) != 1 { + t.Errorf("Unexpected owner ref count: %d", len(ownerRefs)) + } + if ownerRefs[0].UID != "ABC" { + t.Errorf("Unexpected owner UID %v", ownerRefs[0].UID) + } + if setOwnerRef(&target, &ownerA, &ownerA.TypeMeta) != false { + t.Errorf("Unexpected update") + } + if len(target.GetObjectMeta().GetOwnerReferences()) != 1 { + t.Error("Unexpected duplicate reference") + } + ownerB := v1.Pod{} + ownerB.Name = "B" + ownerB.GetObjectMeta().SetUID("BCD") + if setOwnerRef(&target, &ownerB, &ownerB.TypeMeta) != true { + t.Error("Unexpected lack of second update") + } + ownerRefs = target.GetObjectMeta().GetOwnerReferences() + if len(ownerRefs) != 2 { + t.Errorf("Unexpected owner ref count: %d", len(ownerRefs)) + } + if ownerRefs[0].UID != "ABC" || ownerRefs[1].UID != "BCD" { + t.Errorf("Bad second ownerRefs: %v", ownerRefs) + } +} + +func TestRemoveOwnerRef(t *testing.T) { + target := v1.Pod{} + ownerA := v1.Pod{} + ownerA.Name = "A" + ownerA.GetObjectMeta().SetUID("ABC") + if removeOwnerRef(&target, &ownerA) != false { + t.Error("Unexpected update on empty remove") + } + setOwnerRef(&target, &ownerA, &ownerA.TypeMeta) + if removeOwnerRef(&target, &ownerA) != true { + t.Error("Unexpected lack of update") + } + if len(target.GetObjectMeta().GetOwnerReferences()) != 0 { + t.Error("Unexpected owner reference remains") + } + + ownerB := v1.Pod{} + ownerB.Name = "B" + ownerB.GetObjectMeta().SetUID("BCD") + + setOwnerRef(&target, &ownerA, &ownerA.TypeMeta) + if removeOwnerRef(&target, &ownerB) != false { + t.Error("Unexpected update for mismatched owner") + } + if len(target.GetObjectMeta().GetOwnerReferences()) != 1 { + t.Error("Missing ref after no-op remove") + } + setOwnerRef(&target, &ownerB, &ownerB.TypeMeta) + if removeOwnerRef(&target, &ownerA) != true { + t.Error("Missing update for second remove") + } + ownerRefs := target.GetObjectMeta().GetOwnerReferences() + if len(ownerRefs) != 1 { + t.Error("Extra ref after second remove") + } + if ownerRefs[0].UID != "BCD" { + t.Error("Bad UID after second remove") + } +} + func TestIsRunningAndReady(t *testing.T) { set := newStatefulSet(3) pod := newStatefulSetPod(set, 1) @@ -400,7 +790,8 @@ func TestRollingUpdateApplyRevision(t *testing.T) { func newPVC(name string) v1.PersistentVolumeClaim { return v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Namespace: "default", + Name: name, }, Spec: v1.PersistentVolumeClaimSpec{ Resources: v1.ResourceRequirements{ @@ -465,6 +856,10 @@ func newStatefulSetWithVolumes(replicas int, name string, petMounts []v1.VolumeM VolumeClaimTemplates: claims, ServiceName: "governingsvc", UpdateStrategy: appsv1beta1.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType}, + PersistentVolumeClaimRetentionPolicy: &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, + }, RevisionHistoryLimit: func() *int32 { limit := int32(2) return &limit @@ -482,3 +877,77 @@ func newStatefulSet(replicas int) *appsv1beta1.StatefulSet { } return newStatefulSetWithVolumes(replicas, "foo", petMounts, podMounts) } + +//func newStatefulSetWithLabels(replicas int, name string, uid types.UID, labels map[string]string) *appsv1beta1.StatefulSet { +// // Converting all the map-only selectors to set-based selectors. +// var testMatchExpressions []metav1.LabelSelectorRequirement +// for key, value := range labels { +// sel := metav1.LabelSelectorRequirement{ +// Key: key, +// Operator: metav1.LabelSelectorOpIn, +// Values: []string{value}, +// } +// testMatchExpressions = append(testMatchExpressions, sel) +// } +// return &appsv1beta1.StatefulSet{ +// TypeMeta: metav1.TypeMeta{ +// Kind: "StatefulSet", +// APIVersion: "apps/v1", +// }, +// ObjectMeta: metav1.ObjectMeta{ +// Name: name, +// Namespace: v1.NamespaceDefault, +// UID: uid, +// }, +// Spec: appsv1beta1.StatefulSetSpec{ +// Selector: &metav1.LabelSelector{ +// // Purposely leaving MatchLabels nil, so to ensure it will break if any link +// // in the chain ignores the set-based MatchExpressions. +// MatchLabels: nil, +// MatchExpressions: testMatchExpressions, +// }, +// Replicas: func() *int32 { i := int32(replicas); return &i }(), +// PersistentVolumeClaimRetentionPolicy: &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ +// WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, +// WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType, +// }, +// Template: v1.PodTemplateSpec{ +// ObjectMeta: metav1.ObjectMeta{ +// Labels: labels, +// }, +// Spec: v1.PodSpec{ +// Containers: []v1.Container{ +// { +// Name: "nginx", +// Image: "nginx", +// VolumeMounts: []v1.VolumeMount{ +// {Name: "datadir", MountPath: "/tmp/"}, +// {Name: "home", MountPath: "/home"}, +// }, +// }, +// }, +// Volumes: []v1.Volume{{ +// Name: "home", +// VolumeSource: v1.VolumeSource{ +// HostPath: &v1.HostPathVolumeSource{ +// Path: fmt.Sprintf("/tmp/%v", "home"), +// }, +// }}}, +// }, +// }, +// VolumeClaimTemplates: []v1.PersistentVolumeClaim{ +// { +// ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "datadir"}, +// Spec: v1.PersistentVolumeClaimSpec{ +// Resources: v1.ResourceRequirements{ +// Requests: v1.ResourceList{ +// v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI), +// }, +// }, +// }, +// }, +// }, +// ServiceName: "governingsvc", +// }, +// } +//} diff --git a/pkg/controller/statefulset/stateful_update_utils.go b/pkg/controller/statefulset/stateful_update_utils.go index 080374cf93..ffa4d22d28 100644 --- a/pkg/controller/statefulset/stateful_update_utils.go +++ b/pkg/controller/statefulset/stateful_update_utils.go @@ -17,9 +17,10 @@ limitations under the License. package statefulset import ( + v1 "k8s.io/api/core/v1" + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" "github.com/openkruise/kruise/pkg/util/updatesort" - v1 "k8s.io/api/core/v1" ) func sortPodsToUpdate(rollingUpdateStrategy *appsv1beta1.RollingUpdateStatefulSetStrategy, updateRevision string, totalReplicas int32, replicas []*v1.Pod) []int { diff --git a/pkg/controller/statefulset/stateful_update_utils_test.go b/pkg/controller/statefulset/stateful_update_utils_test.go index 9fc483e5aa..4594b67730 100644 --- a/pkg/controller/statefulset/stateful_update_utils_test.go +++ b/pkg/controller/statefulset/stateful_update_utils_test.go @@ -20,11 +20,12 @@ import ( "reflect" "testing" - appspub "github.com/openkruise/kruise/apis/apps/pub" - appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + appspub "github.com/openkruise/kruise/apis/apps/pub" + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" ) func TestSortPodsToUpdate(t *testing.T) { diff --git a/pkg/controller/statefulset/statefulset_controller.go b/pkg/controller/statefulset/statefulset_controller.go index a35ee806f1..d31feb279f 100644 --- a/pkg/controller/statefulset/statefulset_controller.go +++ b/pkg/controller/statefulset/statefulset_controller.go @@ -23,21 +23,6 @@ import ( "fmt" "time" - appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" - appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" - "github.com/openkruise/kruise/pkg/client" - kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" - kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1" - "github.com/openkruise/kruise/pkg/features" - "github.com/openkruise/kruise/pkg/util" - utildiscovery "github.com/openkruise/kruise/pkg/util/discovery" - "github.com/openkruise/kruise/pkg/util/expectations" - utilfeature "github.com/openkruise/kruise/pkg/util/feature" - "github.com/openkruise/kruise/pkg/util/inplaceupdate" - "github.com/openkruise/kruise/pkg/util/lifecycle" - "github.com/openkruise/kruise/pkg/util/ratelimiter" - "github.com/openkruise/kruise/pkg/util/requeueduration" - "github.com/openkruise/kruise/pkg/util/revisionadapter" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -61,6 +46,22 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + + appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" + "github.com/openkruise/kruise/pkg/client" + kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" + kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1" + "github.com/openkruise/kruise/pkg/features" + "github.com/openkruise/kruise/pkg/util" + utildiscovery "github.com/openkruise/kruise/pkg/util/discovery" + "github.com/openkruise/kruise/pkg/util/expectations" + utilfeature "github.com/openkruise/kruise/pkg/util/feature" + "github.com/openkruise/kruise/pkg/util/inplaceupdate" + "github.com/openkruise/kruise/pkg/util/lifecycle" + "github.com/openkruise/kruise/pkg/util/ratelimiter" + "github.com/openkruise/kruise/pkg/util/requeueduration" + "github.com/openkruise/kruise/pkg/util/revisionadapter" ) func init() { diff --git a/pkg/controller/statefulset/statefulset_controller_suite_test.go b/pkg/controller/statefulset/statefulset_controller_suite_test.go index d57813bde5..9abd5f8a00 100644 --- a/pkg/controller/statefulset/statefulset_controller_suite_test.go +++ b/pkg/controller/statefulset/statefulset_controller_suite_test.go @@ -22,10 +22,11 @@ import ( "path/filepath" "testing" - "github.com/openkruise/kruise/apis" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/envtest" + + "github.com/openkruise/kruise/apis" ) var cfg *rest.Config diff --git a/pkg/controller/statefulset/statefulset_controller_test.go b/pkg/controller/statefulset/statefulset_controller_test.go index 22a9d20a71..b6a9363de5 100644 --- a/pkg/controller/statefulset/statefulset_controller_test.go +++ b/pkg/controller/statefulset/statefulset_controller_test.go @@ -25,15 +25,6 @@ import ( "testing" "time" - appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" - kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" - kruisefake "github.com/openkruise/kruise/pkg/client/clientset/versioned/fake" - kruiseinformers "github.com/openkruise/kruise/pkg/client/informers/externalversions" - kruiseappsinformers "github.com/openkruise/kruise/pkg/client/informers/externalversions/apps/v1beta1" - kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1" - "github.com/openkruise/kruise/pkg/util/inplaceupdate" - "github.com/openkruise/kruise/pkg/util/lifecycle" - "github.com/openkruise/kruise/pkg/util/revisionadapter" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -56,6 +47,16 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/history" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" + kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" + kruisefake "github.com/openkruise/kruise/pkg/client/clientset/versioned/fake" + kruiseinformers "github.com/openkruise/kruise/pkg/client/informers/externalversions" + kruiseappsinformers "github.com/openkruise/kruise/pkg/client/informers/externalversions/apps/v1beta1" + kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1" + "github.com/openkruise/kruise/pkg/util/inplaceupdate" + "github.com/openkruise/kruise/pkg/util/lifecycle" + "github.com/openkruise/kruise/pkg/util/revisionadapter" ) const statefulSetResyncPeriod = 30 * time.Second @@ -142,8 +143,8 @@ func TestStatefulSetControllerRespectsTermination(t *testing.T) { t.Error("StatefulSet does not respect termination") } sort.Sort(ascendingOrdinal(pods)) - spc.DeleteStatefulPod(set, pods[3]) - spc.DeleteStatefulPod(set, pods[4]) + spc.DeletePod(pods[3]) + spc.DeletePod(pods[4]) *set.Spec.Replicas = 0 if err := scaleDownStatefulSetController(set, ssc, spc); err != nil { t.Errorf("Failed to turn down StatefulSet : %s", err) @@ -193,7 +194,7 @@ func TestStatefulSetControllerBlocksScaling(t *testing.T) { t.Error("StatefulSet does not block scaling") } sort.Sort(ascendingOrdinal(pods)) - spc.DeleteStatefulPod(set, pods[0]) + spc.DeletePod(pods[0]) ssc.enqueueStatefulSet(set) fakeWorker(ssc) pods, err = spc.podsLister.Pods(set.Namespace).List(selector) @@ -613,13 +614,14 @@ func splitObjects(initialObjects []runtime.Object) ([]runtime.Object, []runtime. return kubeObjects, kruiseObjects } -func newFakeStatefulSetController(initialObjects ...runtime.Object) (*StatefulSetController, *fakeStatefulPodControl) { +func newFakeStatefulSetController(initialObjects ...runtime.Object) (*StatefulSetController, *fakeObjectManager) { kubeObjects, kruiseObjects := splitObjects(initialObjects) client := fake.NewSimpleClientset(kubeObjects...) kruiseClient := kruisefake.NewSimpleClientset(kruiseObjects...) informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) kruiseInformerFactory := kruiseinformers.NewSharedInformerFactory(kruiseClient, controller.NoResyncPeriodFunc()) - fpc := newFakeStatefulPodControl(informerFactory.Core().V1().Pods(), kruiseInformerFactory.Apps().V1beta1().StatefulSets()) + om := newFakeObjectManager(informerFactory, kruiseInformerFactory) + fpc := NewStatefulPodControlFromManager(om, &noopRecorder{}) ssu := newFakeStatefulSetStatusUpdater(kruiseInformerFactory.Apps().V1beta1().StatefulSets()) ssc := NewStatefulSetController( informerFactory.Core().V1().Pods(), @@ -637,7 +639,7 @@ func newFakeStatefulSetController(initialObjects ...runtime.Object) (*StatefulSe lifecycleControl := lifecycle.NewForInformer(informerFactory.Core().V1().Pods()) ssc.control = NewDefaultStatefulSetControl(fpc, inplaceControl, lifecycleControl, ssu, ssh, recorder) - return ssc, fpc + return ssc, om } func fakeWorker(ssc *StatefulSetController) { @@ -655,7 +657,7 @@ func getPodAtOrdinal(pods []*v1.Pod, ordinal int) *v1.Pod { return pods[ordinal] } -func scaleUpStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulSetController, spc *fakeStatefulPodControl) error { +func scaleUpStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulSetController, spc *fakeObjectManager) error { spc.setsIndexer.Add(set) ssc.enqueueStatefulSet(set) fakeWorker(ssc) @@ -703,7 +705,7 @@ func scaleUpStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulSet return assertMonotonicInvariants(set, spc) } -func scaleDownStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulSetController, spc *fakeStatefulPodControl) error { +func scaleDownStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulSetController, spc *fakeObjectManager) error { selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { return err @@ -726,7 +728,7 @@ func scaleDownStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulS pod = getPodAtOrdinal(pods, ord) ssc.updatePod(&prev, pod) fakeWorker(ssc) - spc.DeleteStatefulPod(set, pod) + spc.DeletePod(pod) ssc.deletePod(pod) fakeWorker(ssc) for set.Status.Replicas > *set.Spec.Replicas { @@ -743,7 +745,7 @@ func scaleDownStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulS pod = getPodAtOrdinal(pods, ord) ssc.updatePod(&prev, pod) fakeWorker(ssc) - spc.DeleteStatefulPod(set, pod) + spc.DeletePod(pod) ssc.deletePod(pod) fakeWorker(ssc) obj, _, err := spc.setsIndexer.Get(set) @@ -787,7 +789,7 @@ func NewStatefulSetController( ReconcileStatefulSet: ReconcileStatefulSet{ kruiseClient: kruiseClient, control: NewDefaultStatefulSetControl( - NewRealStatefulPodControl( + NewStatefulPodControl( kubeClient, setInformer.Lister(), podInformer.Lister(), diff --git a/pkg/controller/statefulset/statefulset_predownload_image.go b/pkg/controller/statefulset/statefulset_predownload_image.go index 747bb538a9..ea94b052e9 100644 --- a/pkg/controller/statefulset/statefulset_predownload_image.go +++ b/pkg/controller/statefulset/statefulset_predownload_image.go @@ -20,12 +20,6 @@ import ( "context" "fmt" - appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" - appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" - "github.com/openkruise/kruise/pkg/util/expectations" - imagejobutilfunc "github.com/openkruise/kruise/pkg/util/imagejob/utilfunction" - "github.com/openkruise/kruise/pkg/util/inplaceupdate" - "github.com/openkruise/kruise/pkg/util/revisionadapter" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -35,6 +29,13 @@ import ( "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/controller/history" "sigs.k8s.io/controller-runtime/pkg/client" + + appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" + "github.com/openkruise/kruise/pkg/util/expectations" + imagejobutilfunc "github.com/openkruise/kruise/pkg/util/imagejob/utilfunction" + "github.com/openkruise/kruise/pkg/util/inplaceupdate" + "github.com/openkruise/kruise/pkg/util/revisionadapter" ) func (dss *defaultStatefulSetControl) createImagePullJobsForInPlaceUpdate(sts *appsv1beta1.StatefulSet, currentRevision, updateRevision *apps.ControllerRevision) error { diff --git a/pkg/util/feature/testing.go b/pkg/util/feature/testing.go new file mode 100644 index 0000000000..003768b0b0 --- /dev/null +++ b/pkg/util/feature/testing.go @@ -0,0 +1,28 @@ +package feature + +import ( + "fmt" + "testing" + + "k8s.io/component-base/featuregate" +) + +// SetFeatureGateDuringTest sets the specified gate to the specified value, and returns a function that restores the original value. +// Failures to set or restore cause the test to fail. +// +// Example use: +// +// defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., true)() +func SetFeatureGateDuringTest(tb testing.TB, gate featuregate.FeatureGate, f featuregate.Feature, value bool) func() { + originalValue := gate.Enabled(f) + + if err := gate.(featuregate.MutableFeatureGate).Set(fmt.Sprintf("%s=%v", f, value)); err != nil { + tb.Errorf("error setting %s=%v: %v", f, value, err) + } + + return func() { + if err := gate.(featuregate.MutableFeatureGate).Set(fmt.Sprintf("%s=%v", f, originalValue)); err != nil { + tb.Errorf("error restoring %s=%v: %v", f, originalValue, err) + } + } +} diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index 72b11d5aab..ac3bf44fec 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -19,17 +19,13 @@ package apps import ( "context" + "encoding/json" "fmt" + "regexp" "strconv" "strings" "time" - "github.com/onsi/ginkgo" - "github.com/onsi/gomega" - appspub "github.com/openkruise/kruise/apis/apps/pub" - appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" - kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" - "github.com/openkruise/kruise/test/e2e/framework" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,6 +36,13 @@ import ( clientset "k8s.io/client-go/kubernetes" watchtools "k8s.io/client-go/tools/watch" imageutils "k8s.io/kubernetes/test/utils/image" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + appspub "github.com/openkruise/kruise/apis/apps/pub" + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" + kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" + "github.com/openkruise/kruise/test/e2e/framework" ) const ( @@ -84,7 +87,7 @@ var _ = SIGDescribe("StatefulSet", func() { } headlessSvcName := "test" var statefulPodMounts, podMounts []v1.VolumeMount - var ss *appsv1alpha1.StatefulSet + var ss *appsv1beta1.StatefulSet ginkgo.BeforeEach(func() { statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} @@ -113,7 +116,7 @@ var _ = SIGDescribe("StatefulSet", func() { sst := framework.NewStatefulSetTester(c, kc) sst.PauseNewPods(ss) - _, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + _, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Saturating stateful set " + ss.Name) @@ -155,7 +158,7 @@ var _ = SIGDescribe("StatefulSet", func() { // Replace ss with the one returned from Create() so it has the UID. // Save Kind since it won't be populated in the returned ss. kind := ss.Kind - ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ss.Kind = kind @@ -237,7 +240,7 @@ var _ = SIGDescribe("StatefulSet", func() { sst := framework.NewStatefulSetTester(c, kc) sst.PauseNewPods(ss) - _, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + _, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) time.Sleep(time.Minute) @@ -295,17 +298,17 @@ var _ = SIGDescribe("StatefulSet", func() { ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) sst := framework.NewStatefulSetTester(c, kc) sst.SetHTTPProbe(ss) - ss.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{ + ss.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{ Type: apps.RollingUpdateStatefulSetStrategyType, - RollingUpdate: func() *appsv1alpha1.RollingUpdateStatefulSetStrategy { - return &appsv1alpha1.RollingUpdateStatefulSetStrategy{ + RollingUpdate: func() *appsv1beta1.RollingUpdateStatefulSetStrategy { + return &appsv1beta1.RollingUpdateStatefulSetStrategy{ Partition: func() *int32 { i := int32(3) return &i }()} }(), } - ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) @@ -327,7 +330,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage)) gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image") - ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) { + ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -355,21 +358,21 @@ var _ = SIGDescribe("StatefulSet", func() { } ginkgo.By("Performing a canary update") - ss.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{ + ss.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{ Type: apps.RollingUpdateStatefulSetStrategyType, - RollingUpdate: func() *appsv1alpha1.RollingUpdateStatefulSetStrategy { - return &appsv1alpha1.RollingUpdateStatefulSetStrategy{ + RollingUpdate: func() *appsv1beta1.RollingUpdateStatefulSetStrategy { + return &appsv1beta1.RollingUpdateStatefulSetStrategy{ Partition: func() *int32 { i := int32(2) return &i }()} }(), } - ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) { - update.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{ + ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) { + update.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{ Type: apps.RollingUpdateStatefulSetStrategyType, - RollingUpdate: func() *appsv1alpha1.RollingUpdateStatefulSetStrategy { - return &appsv1alpha1.RollingUpdateStatefulSetStrategy{ + RollingUpdate: func() *appsv1beta1.RollingUpdateStatefulSetStrategy { + return &appsv1beta1.RollingUpdateStatefulSetStrategy{ Partition: func() *int32 { i := int32(2) return &i @@ -447,12 +450,12 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Performing a phased rolling update") for i := int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) - 1; i >= 0; i-- { - ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) { - update.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{ + ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) { + update.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{ Type: apps.RollingUpdateStatefulSetStrategyType, - RollingUpdate: func() *appsv1alpha1.RollingUpdateStatefulSetStrategy { + RollingUpdate: func() *appsv1beta1.RollingUpdateStatefulSetStrategy { j := int32(i) - return &appsv1alpha1.RollingUpdateStatefulSetStrategy{ + return &appsv1beta1.RollingUpdateStatefulSetStrategy{ Partition: &j, } }(), @@ -506,10 +509,10 @@ var _ = SIGDescribe("StatefulSet", func() { ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) sst := framework.NewStatefulSetTester(c, kc) sst.SetHTTPProbe(ss) - ss.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{ + ss.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{ Type: apps.OnDeleteStatefulSetStrategyType, } - ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) @@ -547,7 +550,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage)) gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image") - ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) { + ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -590,15 +593,15 @@ var _ = SIGDescribe("StatefulSet", func() { ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 3, nil, nil, labels) sst := framework.NewStatefulSetTester(c, kc) sst.SetHTTPProbe(ss) - ss.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{ + ss.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{ Type: apps.RollingUpdateStatefulSetStrategyType, - RollingUpdate: &appsv1alpha1.RollingUpdateStatefulSetStrategy{ - PodUpdatePolicy: appsv1alpha1.InPlaceIfPossiblePodUpdateStrategyType, + RollingUpdate: &appsv1beta1.RollingUpdateStatefulSetStrategy{ + PodUpdatePolicy: appsv1beta1.InPlaceIfPossiblePodUpdateStrategyType, InPlaceUpdateStrategy: &appspub.InPlaceUpdateStrategy{GracePeriodSeconds: 10}, }, } ss.Spec.Template.Spec.ReadinessGates = append(ss.Spec.Template.Spec.ReadinessGates, v1.PodReadinessGate{ConditionType: appspub.InPlaceUpdateReady}) - ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) @@ -637,10 +640,10 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage)) gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image") var partition int32 = 3 - ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) { + ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = newImage if update.Spec.UpdateStrategy.RollingUpdate == nil { - update.Spec.UpdateStrategy.RollingUpdate = &appsv1alpha1.RollingUpdateStatefulSetStrategy{} + update.Spec.UpdateStrategy.RollingUpdate = &appsv1beta1.RollingUpdateStatefulSetStrategy{} } update.Spec.UpdateStrategy.RollingUpdate.Partition = &partition }) @@ -651,7 +654,7 @@ var _ = SIGDescribe("StatefulSet", func() { currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision), "Current revision should not equal update revision during rolling update") - ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) { + ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) { partition = 0 update.Spec.UpdateStrategy.RollingUpdate.Partition = &partition }) @@ -683,10 +686,10 @@ var _ = SIGDescribe("StatefulSet", func() { ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 3, nil, nil, labels) sst := framework.NewStatefulSetTester(c, kc) sst.SetHTTPProbe(ss) - ss.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{ + ss.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{ Type: apps.RollingUpdateStatefulSetStrategyType, - RollingUpdate: &appsv1alpha1.RollingUpdateStatefulSetStrategy{ - PodUpdatePolicy: appsv1alpha1.InPlaceIfPossiblePodUpdateStrategyType, + RollingUpdate: &appsv1beta1.RollingUpdateStatefulSetStrategy{ + PodUpdatePolicy: appsv1beta1.InPlaceIfPossiblePodUpdateStrategyType, InPlaceUpdateStrategy: &appspub.InPlaceUpdateStrategy{GracePeriodSeconds: 10}, }, } @@ -699,7 +702,7 @@ var _ = SIGDescribe("StatefulSet", func() { ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.labels['test-env']"}}, }) ss.Spec.Template.Spec.ReadinessGates = append(ss.Spec.Template.Spec.ReadinessGates, v1.PodReadinessGate{ConditionType: appspub.InPlaceUpdateReady}) - ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) @@ -735,10 +738,10 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Updating stateful set template: update label for env") var partition int32 = 3 - ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) { + ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) { update.Spec.Template.ObjectMeta.Labels["test-env"] = "bar" if update.Spec.UpdateStrategy.RollingUpdate == nil { - update.Spec.UpdateStrategy.RollingUpdate = &appsv1alpha1.RollingUpdateStatefulSetStrategy{} + update.Spec.UpdateStrategy.RollingUpdate = &appsv1beta1.RollingUpdateStatefulSetStrategy{} } update.Spec.UpdateStrategy.RollingUpdate.Partition = &partition }) @@ -749,7 +752,7 @@ var _ = SIGDescribe("StatefulSet", func() { currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision), "Current revision should not equal update revision during rolling update") - ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) { + ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) { partition = 0 update.Spec.UpdateStrategy.RollingUpdate.Partition = &partition }) @@ -783,7 +786,7 @@ var _ = SIGDescribe("StatefulSet", func() { ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) sst := framework.NewStatefulSetTester(c, kc) sst.SetHTTPProbe(ss) - ss, err = kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err = kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) @@ -864,7 +867,7 @@ var _ = SIGDescribe("StatefulSet", func() { ss.Spec.PodManagementPolicy = apps.ParallelPodManagement sst := framework.NewStatefulSetTester(c, kc) sst.SetHTTPProbe(ss) - ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) @@ -931,7 +934,7 @@ var _ = SIGDescribe("StatefulSet", func() { statefulPodContainer := &ss.Spec.Template.Spec.Containers[0] statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort) ss.Spec.Template.Spec.NodeName = node.Name - _, err = kc.AppsV1alpha1().StatefulSets(f.Namespace.Name).Create(context.TODO(), ss, metav1.CreateOptions{}) + _, err = kc.AppsV1beta1().StatefulSets(f.Namespace.Name).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name) @@ -997,13 +1000,13 @@ var _ = SIGDescribe("StatefulSet", func() { ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels) sst := framework.NewStatefulSetTester(c, kc) sst.SetHTTPProbe(ss) - ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) ginkgo.By("getting scale subresource") - scale, err := kc.AppsV1alpha1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{}) + scale, err := kc.AppsV1beta1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get scale subresource: %v", err) } @@ -1015,14 +1018,14 @@ var _ = SIGDescribe("StatefulSet", func() { scale.ResourceVersion = "" // indicate the scale update should be unconditional } scale.Spec.Replicas = 2 - scaleResult, err := kc.AppsV1alpha1().StatefulSets(ns).UpdateScale(context.TODO(), ssName, scale, metav1.UpdateOptions{}) + scaleResult, err := kc.AppsV1beta1().StatefulSets(ns).UpdateScale(context.TODO(), ssName, scale, metav1.UpdateOptions{}) if err != nil { framework.Failf("Failed to put scale subresource: %v", err) } framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2)) ginkgo.By("verifying the statefulset Spec.Replicas was modified") - ss, err = kc.AppsV1alpha1().StatefulSets(ns).Get(context.TODO(), ssName, metav1.GetOptions{}) + ss, err = kc.AppsV1beta1().StatefulSets(ns).Get(context.TODO(), ssName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get statefulset resource: %v", err) } @@ -1030,13 +1033,11 @@ var _ = SIGDescribe("StatefulSet", func() { }) }) - //framework.KruiseDescribe("Deploy clustered applications [Feature:StatefulSet] [Slow]", func() { - // var sst *framework.StatefulSetTester + //ginkgo.Describe("Deploy clustered applications [Feature:StatefulSet] [Slow]", func() { // var appTester *clusterAppTester // // ginkgo.BeforeEach(func() { - // sst = framework.NewStatefulSetTester(c, kc) - // appTester = &clusterAppTester{tester: sst, ns: ns} + // appTester = &clusterAppTester{client: c, ns: ns} // }) // // ginkgo.AfterEach(func() { @@ -1044,37 +1045,248 @@ var _ = SIGDescribe("StatefulSet", func() { // framework.DumpDebugInfo(c, ns) // } // framework.Logf("Deleting all statefulset in ns %v", ns) - // framework.DeleteAllStatefulSets(c, kc, ns) + // e2estatefulset.DeleteAllStatefulSets(c, ns) // }) // // // Do not mark this as Conformance. // // StatefulSet Conformance should not be dependent on specific applications. // ginkgo.It("should creating a working zookeeper cluster", func() { - // appTester.statefulPod = &zookeeperTester{tester: sst} + // e2epv.SkipIfNoDefaultStorageClass(c) + // appTester.statefulPod = &zookeeperTester{client: c} // appTester.run() // }) // // // Do not mark this as Conformance. // // StatefulSet Conformance should not be dependent on specific applications. // ginkgo.It("should creating a working redis cluster", func() { - // appTester.statefulPod = &redisTester{tester: sst} + // e2epv.SkipIfNoDefaultStorageClass(c) + // appTester.statefulPod = &redisTester{client: c} // appTester.run() // }) // // // Do not mark this as Conformance. // // StatefulSet Conformance should not be dependent on specific applications. // ginkgo.It("should creating a working mysql cluster", func() { - // appTester.statefulPod = &mysqlGaleraTester{tester: sst} + // e2epv.SkipIfNoDefaultStorageClass(c) + // appTester.statefulPod = &mysqlGaleraTester{client: c} // appTester.run() // }) // // // Do not mark this as Conformance. // // StatefulSet Conformance should not be dependent on specific applications. // ginkgo.It("should creating a working CockroachDB cluster", func() { - // appTester.statefulPod = &cockroachDBTester{tester: sst} + // e2epv.SkipIfNoDefaultStorageClass(c) + // appTester.statefulPod = &cockroachDBTester{client: c} // appTester.run() // }) //}) + // + //// Make sure minReadySeconds is honored + //// Don't mark it as conformance yet + //ginkgo.It("MinReadySeconds should be honored when enabled", func() { + // ssName := "test-ss" + // headlessSvcName := "test" + // // Define StatefulSet Labels + // ssPodLabels := map[string]string{ + // "name": "sample-pod", + // "pod": WebserverImageName, + // } + // ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, ssPodLabels) + // setHTTPProbe(ss) + // ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + // framework.ExpectNoError(err) + // e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 1) + //}) + // + //ginkgo.It("AvailableReplicas should get updated accordingly when MinReadySeconds is enabled", func() { + // ssName := "test-ss" + // headlessSvcName := "test" + // // Define StatefulSet Labels + // ssPodLabels := map[string]string{ + // "name": "sample-pod", + // "pod": WebserverImageName, + // } + // ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 2, nil, nil, ssPodLabels) + // ss.Spec.MinReadySeconds = 30 + // setHTTPProbe(ss) + // ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + // framework.ExpectNoError(err) + // e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 0) + // // let's check that the availableReplicas have still not updated + // time.Sleep(5 * time.Second) + // ss, err = c.AppsV1().StatefulSets(ns).Get(context.TODO(), ss.Name, metav1.GetOptions{}) + // framework.ExpectNoError(err) + // if ss.Status.AvailableReplicas != 0 { + // framework.Failf("invalid number of availableReplicas: expected=%v received=%v", 0, ss.Status.AvailableReplicas) + // } + // e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 2) + // + // ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) { + // update.Spec.MinReadySeconds = 3600 + // }) + // framework.ExpectNoError(err) + // // We don't expect replicas to be updated till 1 hour, so the availableReplicas should be 0 + // e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 0) + // + // ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) { + // update.Spec.MinReadySeconds = 0 + // }) + // framework.ExpectNoError(err) + // e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 2) + // + // ginkgo.By("check availableReplicas are shown in status") + // out, err := framework.RunKubectl(ns, "get", "statefulset", ss.Name, "-o=yaml") + // framework.ExpectNoError(err) + // if !strings.Contains(out, "availableReplicas: 2") { + // framework.Failf("invalid number of availableReplicas: expected=%v received=%v", 2, out) + // } + //}) + + ginkgo.Describe("Non-retain StatefulSetPersistentVolumeClaimPolicy [Feature:StatefulSetAutoDeletePVC]", func() { + ssName := "ss" + labels := map[string]string{ + "foo": "bar", + "baz": "blah", + } + headlessSvcName := "test" + var statefulPodMounts, podMounts []v1.VolumeMount + var ss *appsv1beta1.StatefulSet + + ginkgo.BeforeEach(func() { + statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} + podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}} + ss = framework.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) + + ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) + headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels) + _, err := c.CoreV1().Services(ns).Create(context.TODO(), headlessService, metav1.CreateOptions{}) + framework.ExpectNoError(err) + }) + + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { + framework.DumpDebugInfo(c, ns) + } + framework.Logf("Deleting all statefulset in ns %v", ns) + framework.DeleteAllStatefulSets(c, kc, ns) + }) + + ginkgo.It("should delete PVCs with a WhenDeleted policy", func() { + if framework.SkipIfNoDefaultStorageClass(c) { + return + } + ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) + *(ss.Spec.Replicas) = 3 + ss.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + } + _, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + framework.ExpectNoError(err) + + ginkgo.By("Confirming all 3 PVCs exist with their owner refs") + err = verifyStatefulSetPVCsExistWithOwnerRefs(c, kc, ss, []int{0, 1, 2}, true, false) + framework.ExpectNoError(err) + + ginkgo.By("Deleting stateful set " + ss.Name) + err = kc.AppsV1beta1().StatefulSets(ns).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err) + + ginkgo.By("Verifying PVCs deleted") + err = verifyStatefulSetPVCsExist(c, ss, []int{}) + framework.ExpectNoError(err) + }) + + ginkgo.It("should delete PVCs with a OnScaledown policy", func() { + if framework.SkipIfNoDefaultStorageClass(c) { + return + } + ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) + *(ss.Spec.Replicas) = 3 + ss.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenScaled: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + } + _, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + framework.ExpectNoError(err) + + ginkgo.By("Confirming all 3 PVCs exist") + err = verifyStatefulSetPVCsExist(c, ss, []int{0, 1, 2}) + framework.ExpectNoError(err) + + ginkgo.By("Scaling stateful set " + ss.Name + " to one replica") + ss, err = framework.NewStatefulSetTester(c, kc).Scale(ss, 1) + framework.ExpectNoError(err) + + ginkgo.By("Verifying all but one PVC deleted") + err = verifyStatefulSetPVCsExist(c, ss, []int{0}) + framework.ExpectNoError(err) + }) + + ginkgo.It("should delete PVCs after adopting pod (WhenDeleted)", func() { + if framework.SkipIfNoDefaultStorageClass(c) { + return + } + ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) + *(ss.Spec.Replicas) = 3 + ss.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + } + _, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + framework.ExpectNoError(err) + + ginkgo.By("Confirming all 3 PVCs exist with their owner refs") + err = verifyStatefulSetPVCsExistWithOwnerRefs(c, kc, ss, []int{0, 1, 2}, true, false) + framework.ExpectNoError(err) + + ginkgo.By("Orphaning the 3rd pod") + patch, err := json.Marshal(metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{}, + }) + framework.ExpectNoError(err, "Could not Marshal JSON for patch payload") + _, err = c.CoreV1().Pods(ns).Patch(context.TODO(), fmt.Sprintf("%s-2", ss.Name), types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "") + framework.ExpectNoError(err, "Could not patch payload") + + ginkgo.By("Deleting stateful set " + ss.Name) + err = kc.AppsV1beta1().StatefulSets(ns).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err) + + ginkgo.By("Verifying PVCs deleted") + err = verifyStatefulSetPVCsExist(c, ss, []int{}) + framework.ExpectNoError(err) + }) + + ginkgo.It("should delete PVCs after adopting pod (WhenScaled) [Feature:StatefulSetAutoDeletePVC]", func() { + if framework.SkipIfNoDefaultStorageClass(c) { + return + } + ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) + *(ss.Spec.Replicas) = 3 + ss.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenScaled: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, + } + _, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + framework.ExpectNoError(err) + + ginkgo.By("Confirming all 3 PVCs exist") + err = verifyStatefulSetPVCsExist(c, ss, []int{0, 1, 2}) + framework.ExpectNoError(err) + + ginkgo.By("Orphaning the 3rd pod") + patch, err := json.Marshal(metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{}, + }) + framework.ExpectNoError(err, "Could not Marshal JSON for patch payload") + _, err = c.CoreV1().Pods(ns).Patch(context.TODO(), fmt.Sprintf("%s-2", ss.Name), types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "") + framework.ExpectNoError(err, "Could not patch payload") + + ginkgo.By("Scaling stateful set " + ss.Name + " to one replica") + ss, err = framework.NewStatefulSetTester(c, kc).Scale(ss, 1) + framework.ExpectNoError(err) + + ginkgo.By("Verifying all but one PVC deleted") + err = verifyStatefulSetPVCsExist(c, ss, []int{0}) + framework.ExpectNoError(err) + }) + }) }) func kubectlExecWithRetries(args ...string) (out string) { @@ -1090,7 +1302,7 @@ func kubectlExecWithRetries(args ...string) (out string) { } type statefulPodTester interface { - deploy(ns string) *appsv1alpha1.StatefulSet + deploy(ns string) *appsv1beta1.StatefulSet write(statefulPodIndex int, kv map[string]string) read(statefulPodIndex int, key string) string name() string @@ -1127,7 +1339,7 @@ type statefulPodTester interface { //} // //type zookeeperTester struct { -// ss *appsv1alpha1.StatefulSet +// ss *appsv1beta1.StatefulSet // tester *framework.StatefulSetTester //} // @@ -1135,7 +1347,7 @@ type statefulPodTester interface { // return "zookeeper" //} // -//func (z *zookeeperTester) deploy(ns string) *appsv1alpha1.StatefulSet { +//func (z *zookeeperTester) deploy(ns string) *appsv1beta1.StatefulSet { // z.ss = z.tester.CreateStatefulSet(zookeeperManifestPath, ns) // return z.ss //} @@ -1157,7 +1369,7 @@ type statefulPodTester interface { //} // //type mysqlGaleraTester struct { -// ss *appsv1alpha1.StatefulSet +// ss *appsv1beta1.StatefulSet // tester *framework.StatefulSetTester //} // @@ -1173,7 +1385,7 @@ type statefulPodTester interface { // return kubectlExecWithRetries(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd) //} // -//func (m *mysqlGaleraTester) deploy(ns string) *appsv1alpha1.StatefulSet { +//func (m *mysqlGaleraTester) deploy(ns string) *appsv1beta1.StatefulSet { // m.ss = m.tester.CreateStatefulSet(mysqlGaleraManifestPath, ns) // // framework.Logf("Deployed statefulset %v, initializing database", m.ss.Name) @@ -1200,7 +1412,7 @@ type statefulPodTester interface { //} // //type redisTester struct { -// ss *appsv1alpha1.StatefulSet +// ss *appsv1beta1.StatefulSet // tester *framework.StatefulSetTester //} // @@ -1213,7 +1425,7 @@ type statefulPodTester interface { // return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd) //} // -//func (m *redisTester) deploy(ns string) *appsv1alpha1.StatefulSet { +//func (m *redisTester) deploy(ns string) *appsv1beta1.StatefulSet { // m.ss = m.tester.CreateStatefulSet(redisManifestPath, ns) // return m.ss //} @@ -1231,7 +1443,7 @@ type statefulPodTester interface { //} // //type cockroachDBTester struct { -// ss *appsv1alpha1.StatefulSet +// ss *appsv1beta1.StatefulSet // tester *framework.StatefulSetTester //} // @@ -1244,7 +1456,7 @@ type statefulPodTester interface { // return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd) //} // -//func (c *cockroachDBTester) deploy(ns string) *appsv1alpha1.StatefulSet { +//func (c *cockroachDBTester) deploy(ns string) *appsv1beta1.StatefulSet { // c.ss = c.tester.CreateStatefulSet(cockroachDBManifestPath, ns) // framework.Logf("Deployed statefulset %v, initializing database", c.ss.Name) // for _, cmd := range []string{ @@ -1292,10 +1504,10 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k // This function is used by two tests to test StatefulSet rollbacks: one using // PVCs and one using no storage. -func rollbackTest(c clientset.Interface, kc kruiseclientset.Interface, ns string, ss *appsv1alpha1.StatefulSet) { +func rollbackTest(c clientset.Interface, kc kruiseclientset.Interface, ns string, ss *appsv1beta1.StatefulSet) { sst := framework.NewStatefulSetTester(c, kc) sst.SetHTTPProbe(ss) - ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) @@ -1321,7 +1533,7 @@ func rollbackTest(c clientset.Interface, kc kruiseclientset.Interface, ns string ginkgo.By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage)) gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image") - ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) { + ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1365,7 +1577,7 @@ func rollbackTest(c clientset.Interface, kc kruiseclientset.Interface, ns string gomega.Expect(err).NotTo(gomega.HaveOccurred()) ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name) priorRevision := currentRevision - ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) { + ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = oldImage }) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1404,3 +1616,111 @@ func rollbackTest(c clientset.Interface, kc kruiseclientset.Interface, ns string priorRevision)) } } + +// verifyStatefulSetPVCsExist confirms that exactly the PVCs for ss with the specified ids exist. This polls until the situation occurs, an error happens, or until timeout (in the latter case an error is also returned). Beware that this cannot tell if a PVC will be deleted at some point in the future, so if used to confirm that no PVCs are deleted, the caller should wait for some event giving the PVCs a reasonable chance to be deleted, before calling this function. +func verifyStatefulSetPVCsExist(c clientset.Interface, ss *appsv1beta1.StatefulSet, claimIds []int) error { + idSet := map[int]struct{}{} + for _, id := range claimIds { + idSet[id] = struct{}{} + } + return wait.PollImmediate(framework.StatefulSetPoll, framework.StatefulSetTimeout, func() (bool, error) { + pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()}) + if err != nil { + framework.Logf("WARNING: Failed to list pvcs for verification, retrying: %v", err) + return false, nil + } + for _, claim := range ss.Spec.VolumeClaimTemplates { + pvcNameRE := regexp.MustCompile(fmt.Sprintf("^%s-%s-([0-9]+)$", claim.Name, ss.Name)) + seenPVCs := map[int]struct{}{} + for _, pvc := range pvcList.Items { + matches := pvcNameRE.FindStringSubmatch(pvc.Name) + if len(matches) != 2 { + continue + } + ordinal, err := strconv.ParseInt(matches[1], 10, 32) + if err != nil { + framework.Logf("ERROR: bad pvc name %s (%v)", pvc.Name, err) + return false, err + } + if _, found := idSet[int(ordinal)]; !found { + return false, nil // Retry until the PVCs are consistent. + } else { + seenPVCs[int(ordinal)] = struct{}{} + } + } + if len(seenPVCs) != len(idSet) { + framework.Logf("Found %d of %d PVCs", len(seenPVCs), len(idSet)) + return false, nil // Retry until the PVCs are consistent. + } + } + return true, nil + }) +} + +// verifyStatefulSetPVCsExistWithOwnerRefs works as verifyStatefulSetPVCsExist, but also waits for the ownerRefs to match. +func verifyStatefulSetPVCsExistWithOwnerRefs(c clientset.Interface, kc kruiseclientset.Interface, ss *appsv1beta1.StatefulSet, claimIndicies []int, wantSetRef, wantPodRef bool) error { + indexSet := map[int]struct{}{} + for _, id := range claimIndicies { + indexSet[id] = struct{}{} + } + set, _ := kc.AppsV1beta1().StatefulSets(ss.Namespace).Get(context.TODO(), ss.Name, metav1.GetOptions{}) + setUID := set.GetUID() + if setUID == "" { + framework.Failf("Statefulset %s mising UID", ss.Name) + } + return wait.PollImmediate(framework.StatefulSetPoll, framework.StatefulSetTimeout, func() (bool, error) { + pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()}) + if err != nil { + framework.Logf("WARNING: Failed to list pvcs for verification, retrying: %v", err) + return false, nil + } + for _, claim := range ss.Spec.VolumeClaimTemplates { + pvcNameRE := regexp.MustCompile(fmt.Sprintf("^%s-%s-([0-9]+)$", claim.Name, ss.Name)) + seenPVCs := map[int]struct{}{} + for _, pvc := range pvcList.Items { + matches := pvcNameRE.FindStringSubmatch(pvc.Name) + if len(matches) != 2 { + continue + } + ordinal, err := strconv.ParseInt(matches[1], 10, 32) + if err != nil { + framework.Logf("ERROR: bad pvc name %s (%v)", pvc.Name, err) + return false, err + } + if _, found := indexSet[int(ordinal)]; !found { + framework.Logf("Unexpected, retrying") + return false, nil // Retry until the PVCs are consistent. + } + var foundSetRef, foundPodRef bool + for _, ref := range pvc.GetOwnerReferences() { + if ref.Kind == "StatefulSet" && ref.UID == setUID { + foundSetRef = true + } + if ref.Kind == "Pod" { + podName := fmt.Sprintf("%s-%d", ss.Name, ordinal) + pod, err := c.CoreV1().Pods(ss.Namespace).Get(context.TODO(), podName, metav1.GetOptions{}) + if err != nil { + framework.Logf("Pod %s not found, retrying (%v)", podName, err) + return false, nil + } + podUID := pod.GetUID() + if podUID == "" { + framework.Failf("Pod %s is missing UID", pod.Name) + } + if ref.UID == podUID { + foundPodRef = true + } + } + } + if foundSetRef == wantSetRef && foundPodRef == wantPodRef { + seenPVCs[int(ordinal)] = struct{}{} + } + } + if len(seenPVCs) != len(indexSet) { + framework.Logf("Only %d PVCs, retrying", len(seenPVCs)) + return false, nil // Retry until the PVCs are consistent. + } + } + return true, nil + }) +} diff --git a/test/e2e/framework/pv_util.go b/test/e2e/framework/pv_util.go index 1aa8dfc762..7c75c1d431 100644 --- a/test/e2e/framework/pv_util.go +++ b/test/e2e/framework/pv_util.go @@ -20,12 +20,22 @@ package framework import ( "context" "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" ) +const ( + // isDefaultStorageClassAnnotation represents a StorageClass annotation that + // marks a class as the default StorageClass + isDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class" + + // betaIsDefaultStorageClassAnnotation is the beta version of IsDefaultStorageClassAnnotation. + // TODO: remove Beta when no longer used + betaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" +) + // create the PV resource. Fails test on error. func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { pv, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) @@ -39,3 +49,49 @@ func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVol func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { return createPV(c, pv) } + +// SkipIfNoDefaultStorageClass skips tests if no default SC can be found. +func SkipIfNoDefaultStorageClass(c clientset.Interface) bool { + _, err := GetDefaultStorageClassName(c) + if err != nil { + Logf("error finding default storageClass : %v", err) + return true + } + return false +} + +// GetDefaultStorageClassName returns default storageClass or return error +func GetDefaultStorageClassName(c clientset.Interface) (string, error) { + list, err := c.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return "", fmt.Errorf("Error listing storage classes: %v", err) + } + var scName string + for _, sc := range list.Items { + if isDefaultAnnotation(sc.ObjectMeta) { + if len(scName) != 0 { + return "", fmt.Errorf("Multiple default storage classes found: %q and %q", scName, sc.Name) + } + scName = sc.Name + } + } + if len(scName) == 0 { + return "", fmt.Errorf("No default storage class found") + } + Logf("Default storage class: %q", scName) + return scName, nil +} + +// isDefaultAnnotation returns a boolean if the default storage class +// annotation is set +// TODO: remove Beta when no longer needed +func isDefaultAnnotation(obj metav1.ObjectMeta) bool { + if obj.Annotations[isDefaultStorageClassAnnotation] == "true" { + return true + } + if obj.Annotations[betaIsDefaultStorageClassAnnotation] == "true" { + return true + } + + return false +} diff --git a/test/e2e/framework/statefulset_utils.go b/test/e2e/framework/statefulset_utils.go index 4d33858fec..62020cb974 100644 --- a/test/e2e/framework/statefulset_utils.go +++ b/test/e2e/framework/statefulset_utils.go @@ -30,7 +30,7 @@ import ( "github.com/onsi/gomega" - appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" "github.com/openkruise/kruise/test/e2e/manifest" apps "k8s.io/api/apps/v1" @@ -87,8 +87,8 @@ func NewStatefulSetTester(c clientset.Interface, kc kruiseclientset.Interface) * } // GetStatefulSet gets the StatefulSet named name in namespace. -func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *appsv1alpha1.StatefulSet { - ss, err := s.kc.AppsV1alpha1().StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *appsv1beta1.StatefulSet { + ss, err := s.kc.AppsV1beta1().StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err) } @@ -96,7 +96,7 @@ func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *appsv1alpha1 } // CreateStatefulSet creates a StatefulSet from the manifest at manifestPath in the Namespace ns using kubectl create. -func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *appsv1alpha1.StatefulSet { +func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *appsv1beta1.StatefulSet { mkpath := func(file string) string { return filepath.Join(manifestPath, file) } @@ -113,14 +113,14 @@ func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *appsv1al gomega.Expect(err).NotTo(gomega.HaveOccurred()) Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector)) - _, err = s.kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + _, err = s.kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) s.WaitForRunningAndReady(*ss.Spec.Replicas, ss) return ss } // CheckMount checks that the mount at mountPath is valid for all Pods in ss. -func (s *StatefulSetTester) CheckMount(ss *appsv1alpha1.StatefulSet, mountPath string) error { +func (s *StatefulSetTester) CheckMount(ss *appsv1beta1.StatefulSet, mountPath string) error { for _, cmd := range []string{ // Print inode, size etc fmt.Sprintf("ls -idlh %v", mountPath), @@ -137,7 +137,7 @@ func (s *StatefulSetTester) CheckMount(ss *appsv1alpha1.StatefulSet, mountPath s } // ExecInStatefulPods executes cmd in all Pods in ss. If a error occurs it is returned and cmd is not execute in any subsequent Pods. -func (s *StatefulSetTester) ExecInStatefulPods(ss *appsv1alpha1.StatefulSet, cmd string) error { +func (s *StatefulSetTester) ExecInStatefulPods(ss *appsv1beta1.StatefulSet, cmd string) error { podList := s.GetPodList(ss) for _, statefulPod := range podList.Items { stdout, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout) @@ -150,7 +150,7 @@ func (s *StatefulSetTester) ExecInStatefulPods(ss *appsv1alpha1.StatefulSet, cmd } // CheckHostname verifies that all Pods in ss have the correct Hostname. If the returned error is not nil than verification failed. -func (s *StatefulSetTester) CheckHostname(ss *appsv1alpha1.StatefulSet) error { +func (s *StatefulSetTester) CheckHostname(ss *appsv1beta1.StatefulSet) error { cmd := "printf $(hostname)" podList := s.GetPodList(ss) for _, statefulPod := range podList.Items { @@ -166,7 +166,7 @@ func (s *StatefulSetTester) CheckHostname(ss *appsv1alpha1.StatefulSet) error { } // Saturate waits for all Pods in ss to become Running and Ready. -func (s *StatefulSetTester) Saturate(ss *appsv1alpha1.StatefulSet) { +func (s *StatefulSetTester) Saturate(ss *appsv1beta1.StatefulSet) { var i int32 for i = 0; i < *(ss.Spec.Replicas); i++ { Logf("Waiting for stateful pod at index %v to enter Running", i) @@ -177,7 +177,7 @@ func (s *StatefulSetTester) Saturate(ss *appsv1alpha1.StatefulSet) { } // DeleteStatefulPodAtIndex deletes the Pod with ordinal index in ss. -func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *appsv1alpha1.StatefulSet) { +func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *appsv1beta1.StatefulSet) { name := getStatefulSetPodNameAtIndex(index, ss) noGrace := int64(0) if err := s.c.CoreV1().Pods(ss.Namespace).Delete(context.TODO(), name, metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { @@ -189,26 +189,26 @@ func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *appsv1alpha1 type VerifyStatefulPodFunc func(*v1.Pod) // VerifyPodAtIndex applies a visitor patter to the Pod at index in ss. verify is applied to the Pod to "visit" it. -func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *appsv1alpha1.StatefulSet, verify VerifyStatefulPodFunc) { +func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *appsv1beta1.StatefulSet, verify VerifyStatefulPodFunc) { name := getStatefulSetPodNameAtIndex(index, ss) pod, err := s.c.CoreV1().Pods(ss.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ss.Namespace, ss.Name)) verify(pod) } -func getStatefulSetPodNameAtIndex(index int, ss *appsv1alpha1.StatefulSet) string { +func getStatefulSetPodNameAtIndex(index int, ss *appsv1beta1.StatefulSet) string { // TODO: we won't use "-index" as the name strategy forever, // pull the name out from an identity mapper. return fmt.Sprintf("%v-%v", ss.Name, index) } // Scale scales ss to count replicas. -func (s *StatefulSetTester) Scale(ss *appsv1alpha1.StatefulSet, count int32) (*appsv1alpha1.StatefulSet, error) { +func (s *StatefulSetTester) Scale(ss *appsv1beta1.StatefulSet, count int32) (*appsv1beta1.StatefulSet, error) { name := ss.Name ns := ss.Namespace Logf("Scaling statefulset %s to %d", name, count) - ss = s.update(ns, name, func(ss *appsv1alpha1.StatefulSet) { *(ss.Spec.Replicas) = count }) + ss = s.update(ns, name, func(ss *appsv1beta1.StatefulSet) { *(ss.Spec.Replicas) = count }) var statefulPodList *v1.PodList pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { @@ -232,12 +232,12 @@ func (s *StatefulSetTester) Scale(ss *appsv1alpha1.StatefulSet, count int32) (*a } // UpdateReplicas updates the replicas of ss to count. -func (s *StatefulSetTester) UpdateReplicas(ss *appsv1alpha1.StatefulSet, count int32) { - s.update(ss.Namespace, ss.Name, func(ss *appsv1alpha1.StatefulSet) { *(ss.Spec.Replicas) = count }) +func (s *StatefulSetTester) UpdateReplicas(ss *appsv1beta1.StatefulSet, count int32) { + s.update(ss.Namespace, ss.Name, func(ss *appsv1beta1.StatefulSet) { *(ss.Spec.Replicas) = count }) } // Restart scales ss to 0 and then back to its previous number of replicas. -func (s *StatefulSetTester) Restart(ss *appsv1alpha1.StatefulSet) { +func (s *StatefulSetTester) Restart(ss *appsv1beta1.StatefulSet) { oldReplicas := *(ss.Spec.Replicas) ss, err := s.Scale(ss, 0) ExpectNoError(err) @@ -245,17 +245,17 @@ func (s *StatefulSetTester) Restart(ss *appsv1alpha1.StatefulSet) { // This way we know the controller has observed all Pod deletions // before we scale it back up. s.WaitForStatusReplicas(ss, 0) - s.update(ss.Namespace, ss.Name, func(ss *appsv1alpha1.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas }) + s.update(ss.Namespace, ss.Name, func(ss *appsv1beta1.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas }) } -func (s *StatefulSetTester) update(ns, name string, update func(ss *appsv1alpha1.StatefulSet)) *appsv1alpha1.StatefulSet { +func (s *StatefulSetTester) update(ns, name string, update func(ss *appsv1beta1.StatefulSet)) *appsv1beta1.StatefulSet { for i := 0; i < 3; i++ { - ss, err := s.kc.AppsV1alpha1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + ss, err := s.kc.AppsV1beta1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { Failf("failed to get statefulset %q: %v", name, err) } update(ss) - ss, err = s.kc.AppsV1alpha1().StatefulSets(ns).Update(context.TODO(), ss, metav1.UpdateOptions{}) + ss, err = s.kc.AppsV1beta1().StatefulSets(ns).Update(context.TODO(), ss, metav1.UpdateOptions{}) if err == nil { return ss } @@ -268,7 +268,7 @@ func (s *StatefulSetTester) update(ns, name string, update func(ss *appsv1alpha1 } // GetPodList gets the current Pods in ss. -func (s *StatefulSetTester) GetPodList(ss *appsv1alpha1.StatefulSet) *v1.PodList { +func (s *StatefulSetTester) GetPodList(ss *appsv1beta1.StatefulSet) *v1.PodList { selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector) ExpectNoError(err) podList, err := s.c.CoreV1().Pods(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) @@ -278,7 +278,7 @@ func (s *StatefulSetTester) GetPodList(ss *appsv1alpha1.StatefulSet) *v1.PodList // ConfirmStatefulPodCount asserts that the current number of Pods in ss is count waiting up to timeout for ss to // to scale to count. -func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *appsv1alpha1.StatefulSet, timeout time.Duration, hard bool) { +func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *appsv1beta1.StatefulSet, timeout time.Duration, hard bool) { start := time.Now() deadline := start.Add(timeout) for t := time.Now(); t.Before(deadline); t = time.Now() { @@ -301,7 +301,7 @@ func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *appsv1alpha1. // WaitForRunning waits for numPodsRunning in ss to be Running and for the first // numPodsReady ordinals to be Ready. -func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, ss *appsv1alpha1.StatefulSet) { +func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, ss *appsv1beta1.StatefulSet) { pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { podList := s.GetPodList(ss) @@ -330,10 +330,10 @@ func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, s } // WaitForState periodically polls for the ss and its pods until the until function returns either true or an error -func (s *StatefulSetTester) WaitForState(ss *appsv1alpha1.StatefulSet, until func(*appsv1alpha1.StatefulSet, *v1.PodList) (bool, error)) { +func (s *StatefulSetTester) WaitForState(ss *appsv1beta1.StatefulSet, until func(*appsv1beta1.StatefulSet, *v1.PodList) (bool, error)) { pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - ssGet, err := s.kc.AppsV1alpha1().StatefulSets(ss.Namespace).Get(context.TODO(), ss.Name, metav1.GetOptions{}) + ssGet, err := s.kc.AppsV1beta1().StatefulSets(ss.Namespace).Get(context.TODO(), ss.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -347,8 +347,8 @@ func (s *StatefulSetTester) WaitForState(ss *appsv1alpha1.StatefulSet, until fun // WaitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation. // The returned StatefulSet contains such a StatefulSetStatus -func (s *StatefulSetTester) WaitForStatus(set *appsv1alpha1.StatefulSet) *appsv1alpha1.StatefulSet { - s.WaitForState(set, func(set2 *appsv1alpha1.StatefulSet, pods *v1.PodList) (bool, error) { +func (s *StatefulSetTester) WaitForStatus(set *appsv1beta1.StatefulSet) *appsv1beta1.StatefulSet { + s.WaitForState(set, func(set2 *appsv1beta1.StatefulSet, pods *v1.PodList) (bool, error) { if set2.Status.ObservedGeneration >= set.Generation { set = set2 return true, nil @@ -359,14 +359,14 @@ func (s *StatefulSetTester) WaitForStatus(set *appsv1alpha1.StatefulSet) *appsv1 } // WaitForRunningAndReady waits for numStatefulPods in ss to be Running and Ready. -func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *appsv1alpha1.StatefulSet) { +func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *appsv1beta1.StatefulSet) { s.WaitForRunning(numStatefulPods, numStatefulPods, ss) } // WaitForPodReady waits for the Pod named podName in set to exist and have a Ready condition. -func (s *StatefulSetTester) WaitForPodReady(set *appsv1alpha1.StatefulSet, podName string) (*appsv1alpha1.StatefulSet, *v1.PodList) { +func (s *StatefulSetTester) WaitForPodReady(set *appsv1beta1.StatefulSet, podName string) (*appsv1beta1.StatefulSet, *v1.PodList) { var pods *v1.PodList - s.WaitForState(set, func(set2 *appsv1alpha1.StatefulSet, pods2 *v1.PodList) (bool, error) { + s.WaitForState(set, func(set2 *appsv1beta1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 for i := range pods.Items { @@ -381,9 +381,9 @@ func (s *StatefulSetTester) WaitForPodReady(set *appsv1alpha1.StatefulSet, podNa } // WaitForPodNotReady waist for the Pod named podName in set to exist and to not have a Ready condition. -func (s *StatefulSetTester) WaitForPodNotReady(set *appsv1alpha1.StatefulSet, podName string) (*appsv1alpha1.StatefulSet, *v1.PodList) { +func (s *StatefulSetTester) WaitForPodNotReady(set *appsv1beta1.StatefulSet, podName string) (*appsv1beta1.StatefulSet, *v1.PodList) { var pods *v1.PodList - s.WaitForState(set, func(set2 *appsv1alpha1.StatefulSet, pods2 *v1.PodList) (bool, error) { + s.WaitForState(set, func(set2 *appsv1beta1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 for i := range pods.Items { @@ -399,7 +399,7 @@ func (s *StatefulSetTester) WaitForPodNotReady(set *appsv1alpha1.StatefulSet, po // WaitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to // complete. set must have a RollingUpdateStatefulSetStrategyType. -func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1alpha1.StatefulSet) (*appsv1alpha1.StatefulSet, *v1.PodList) { +func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1beta1.StatefulSet) (*appsv1beta1.StatefulSet, *v1.PodList) { var pods *v1.PodList if set.Spec.UpdateStrategy.Type != apps.RollingUpdateStatefulSetStrategyType { Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s", @@ -407,7 +407,7 @@ func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1alpha1.StatefulSet) set.Name, set.Spec.UpdateStrategy.Type) } - s.WaitForState(set, func(set2 *appsv1alpha1.StatefulSet, pods2 *v1.PodList) (bool, error) { + s.WaitForState(set, func(set2 *appsv1beta1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 if len(pods.Items) < int(*set.Spec.Replicas) { @@ -439,7 +439,7 @@ func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1alpha1.StatefulSet) // a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less // than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be // at its update revision. -func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *appsv1alpha1.StatefulSet) (*appsv1alpha1.StatefulSet, *v1.PodList) { +func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *appsv1beta1.StatefulSet) (*appsv1beta1.StatefulSet, *v1.PodList) { var pods *v1.PodList if set.Spec.UpdateStrategy.Type != apps.RollingUpdateStatefulSetStrategyType { Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s", @@ -452,7 +452,7 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *appsv1alpha1.St set.Namespace, set.Name) } - s.WaitForState(set, func(set2 *appsv1alpha1.StatefulSet, pods2 *v1.PodList) (bool, error) { + s.WaitForState(set, func(set2 *appsv1beta1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition) @@ -492,7 +492,7 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *appsv1alpha1.St } // WaitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready. -func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *appsv1alpha1.StatefulSet) { +func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *appsv1beta1.StatefulSet) { s.WaitForRunning(numStatefulPods, 0, ss) } @@ -511,12 +511,12 @@ var httpProbe = &v1.Probe{ // SetHTTPProbe sets the pod template's ReadinessProbe for Nginx StatefulSet containers. // This probe can then be controlled with BreakHTTPProbe() and RestoreHTTPProbe(). // Note that this cannot be used together with PauseNewPods(). -func (s *StatefulSetTester) SetHTTPProbe(ss *appsv1alpha1.StatefulSet) { +func (s *StatefulSetTester) SetHTTPProbe(ss *appsv1beta1.StatefulSet) { ss.Spec.Template.Spec.Containers[0].ReadinessProbe = httpProbe } // BreakHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in ss. -func (s *StatefulSetTester) BreakHTTPProbe(ss *appsv1alpha1.StatefulSet) error { +func (s *StatefulSetTester) BreakHTTPProbe(ss *appsv1beta1.StatefulSet) error { path := httpProbe.HTTPGet.Path if path == "" { return fmt.Errorf("Path expected to be not empty: %v", path) @@ -527,7 +527,7 @@ func (s *StatefulSetTester) BreakHTTPProbe(ss *appsv1alpha1.StatefulSet) error { } // BreakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod. -func (s *StatefulSetTester) BreakPodHTTPProbe(ss *appsv1alpha1.StatefulSet, pod *v1.Pod) error { +func (s *StatefulSetTester) BreakPodHTTPProbe(ss *appsv1beta1.StatefulSet, pod *v1.Pod) error { path := httpProbe.HTTPGet.Path if path == "" { return fmt.Errorf("Path expected to be not empty: %v", path) @@ -540,7 +540,7 @@ func (s *StatefulSetTester) BreakPodHTTPProbe(ss *appsv1alpha1.StatefulSet, pod } // RestoreHTTPProbe restores the readiness probe for Nginx StatefulSet containers in ss. -func (s *StatefulSetTester) RestoreHTTPProbe(ss *appsv1alpha1.StatefulSet) error { +func (s *StatefulSetTester) RestoreHTTPProbe(ss *appsv1beta1.StatefulSet) error { path := httpProbe.HTTPGet.Path if path == "" { return fmt.Errorf("Path expected to be not empty: %v", path) @@ -551,7 +551,7 @@ func (s *StatefulSetTester) RestoreHTTPProbe(ss *appsv1alpha1.StatefulSet) error } // RestorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod. -func (s *StatefulSetTester) RestorePodHTTPProbe(ss *appsv1alpha1.StatefulSet, pod *v1.Pod) error { +func (s *StatefulSetTester) RestorePodHTTPProbe(ss *appsv1beta1.StatefulSet, pod *v1.Pod) error { path := httpProbe.HTTPGet.Path if path == "" { return fmt.Errorf("Path expected to be not empty: %v", path) @@ -581,7 +581,7 @@ func hasPauseProbe(pod *v1.Pod) bool { // This causes all newly-created Pods to stay Unready until they are manually resumed // with ResumeNextPod(). // Note that this cannot be used together with SetHTTPProbe(). -func (s *StatefulSetTester) PauseNewPods(ss *appsv1alpha1.StatefulSet) { +func (s *StatefulSetTester) PauseNewPods(ss *appsv1beta1.StatefulSet) { ss.Spec.Template.Spec.Containers[0].ReadinessProbe = pauseProbe } @@ -590,7 +590,7 @@ func (s *StatefulSetTester) PauseNewPods(ss *appsv1alpha1.StatefulSet) { // It fails the test if it finds any pods that are not in phase Running, // or if it finds more than one paused Pod existing at the same time. // This is a no-op if there are no paused pods. -func (s *StatefulSetTester) ResumeNextPod(ss *appsv1alpha1.StatefulSet) { +func (s *StatefulSetTester) ResumeNextPod(ss *appsv1beta1.StatefulSet) { podList := s.GetPodList(ss) resumedPod := "" for _, pod := range podList.Items { @@ -611,13 +611,13 @@ func (s *StatefulSetTester) ResumeNextPod(ss *appsv1alpha1.StatefulSet) { } // WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas -func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *appsv1alpha1.StatefulSet, expectedReplicas int32) { +func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *appsv1beta1.StatefulSet, expectedReplicas int32) { Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas) ns, name := ss.Namespace, ss.Name pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - ssGet, err := s.kc.AppsV1alpha1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + ssGet, err := s.kc.AppsV1beta1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -636,13 +636,13 @@ func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *appsv1alpha1.Stateful } // WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas -func (s *StatefulSetTester) WaitForStatusReplicas(ss *appsv1alpha1.StatefulSet, expectedReplicas int32) { +func (s *StatefulSetTester) WaitForStatusReplicas(ss *appsv1beta1.StatefulSet, expectedReplicas int32) { Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas) ns, name := ss.Namespace, ss.Name pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - ssGet, err := s.kc.AppsV1alpha1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + ssGet, err := s.kc.AppsV1beta1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -661,7 +661,7 @@ func (s *StatefulSetTester) WaitForStatusReplicas(ss *appsv1alpha1.StatefulSet, } // CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName. -func (s *StatefulSetTester) CheckServiceName(ss *appsv1alpha1.StatefulSet, expectedServiceName string) error { +func (s *StatefulSetTester) CheckServiceName(ss *appsv1beta1.StatefulSet, expectedServiceName string) error { Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName) if expectedServiceName != ss.Spec.ServiceName { @@ -680,7 +680,7 @@ func (s *StatefulSetTester) SortStatefulPods(pods *v1.PodList) { // DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns. func DeleteAllStatefulSets(c clientset.Interface, kc kruiseclientset.Interface, ns string) { sst := &StatefulSetTester{c: c, kc: kc} - ssList, err := kc.AppsV1alpha1().StatefulSets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) + ssList, err := kc.AppsV1beta1().StatefulSets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) ExpectNoError(err) // Scale down each statefulset, then delete it completely. @@ -696,7 +696,7 @@ func DeleteAllStatefulSets(c clientset.Interface, kc kruiseclientset.Interface, Logf("Deleting statefulset %v", ss.Name) // Use OrphanDependents=false so it's deleted synchronously. // We already made sure the Pods are gone inside Scale(). - if err := kc.AppsV1alpha1().StatefulSets(ss.Namespace).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil { + if err := kc.AppsV1beta1().StatefulSets(ss.Namespace).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil { errList = append(errList, fmt.Sprintf("%v", err)) } } @@ -773,7 +773,7 @@ func NewStatefulSetPVC(name string) v1.PersistentVolumeClaim { // NewStatefulSet creates a new NGINX StatefulSet for testing. The StatefulSet is named name, is in namespace ns, // statefulPodsMounts are the mounts that will be backed by PVs. podsMounts are the mounts that are mounted directly // to the Pod. labels are the labels that will be usd for the StatefulSet selector. -func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *appsv1alpha1.StatefulSet { +func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *appsv1beta1.StatefulSet { mounts := append(statefulPodMounts, podMounts...) claims := []v1.PersistentVolumeClaim{} for _, m := range statefulPodMounts { @@ -792,7 +792,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP }) } - return &appsv1alpha1.StatefulSet{ + return &appsv1beta1.StatefulSet{ TypeMeta: metav1.TypeMeta{ Kind: "StatefulSet", APIVersion: "apps.kruise.io/v1alpha1", @@ -801,7 +801,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP Name: name, Namespace: ns, }, - Spec: appsv1alpha1.StatefulSetSpec{ + Spec: appsv1beta1.StatefulSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: labels, }, @@ -814,15 +814,16 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP Spec: v1.PodSpec{ Containers: []v1.Container{ { - Name: "nginx", - Image: imageutils.GetE2EImage(imageutils.Nginx), - VolumeMounts: mounts, + Name: "nginx", + Image: imageutils.GetE2EImage(imageutils.Nginx), + VolumeMounts: mounts, + ImagePullPolicy: v1.PullIfNotPresent, }, }, Volumes: vols, }, }, - UpdateStrategy: appsv1alpha1.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType}, + UpdateStrategy: appsv1beta1.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType}, VolumeClaimTemplates: claims, ServiceName: governingSvcName, }, @@ -830,7 +831,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP } // NewStatefulSetScale creates a new StatefulSet scale subresource and returns it -func NewStatefulSetScale(ss *appsv1alpha1.StatefulSet) *appsV1beta2.Scale { +func NewStatefulSetScale(ss *appsv1beta1.StatefulSet) *appsV1beta2.Scale { return &appsV1beta2.Scale{ // TODO: Create a variant of ObjectMeta type that only contains the fields below. ObjectMeta: metav1.ObjectMeta{ @@ -874,11 +875,11 @@ func (sp statefulPodsByOrdinal) Less(i, j int) bool { return getStatefulPodOrdinal(&sp[i]) < getStatefulPodOrdinal(&sp[j]) } -type updateStatefulSetFunc func(*appsv1alpha1.StatefulSet) +type updateStatefulSetFunc func(*appsv1beta1.StatefulSet) // UpdateStatefulSetWithRetries update StatefulSet with retries -func UpdateStatefulSetWithRetries(kc kruiseclientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *appsv1alpha1.StatefulSet, err error) { - statefulSets := kc.AppsV1alpha1().StatefulSets(namespace) +func UpdateStatefulSetWithRetries(kc kruiseclientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *appsv1beta1.StatefulSet, err error) { + statefulSets := kc.AppsV1beta1().StatefulSets(namespace) var updateErr error pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { if statefulSet, err = statefulSets.Get(context.TODO(), name, metav1.GetOptions{}); err != nil { diff --git a/test/e2e/manifest/manifest.go b/test/e2e/manifest/manifest.go index 4a9fdfe22b..77ada96698 100644 --- a/test/e2e/manifest/manifest.go +++ b/test/e2e/manifest/manifest.go @@ -18,7 +18,7 @@ limitations under the License. package manifest import ( - appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" "github.com/openkruise/kruise/test/e2e/framework/testfiles" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,8 +28,8 @@ import ( ) // StatefulSetFromManifest returns a StatefulSet from a manifest stored in fileName in the Namespace indicated by ns. -func StatefulSetFromManifest(fileName, ns string) (*appsv1alpha1.StatefulSet, error) { - var ss appsv1alpha1.StatefulSet +func StatefulSetFromManifest(fileName, ns string) (*appsv1beta1.StatefulSet, error) { + var ss appsv1beta1.StatefulSet data, err := testfiles.Read(fileName) if err != nil { return nil, err From 9476d70e63566fe75e5b9eb3666e42c119aec316 Mon Sep 17 00:00:00 2001 From: veophi Date: Mon, 21 Mar 2022 17:58:04 +0800 Subject: [PATCH 5/5] fix the underlying pollution to informer in statefulset controller Signed-off-by: veophi --- pkg/controller/statefulset/stateful_pod_control.go | 6 ++++-- pkg/controller/statefulset/stateful_pod_control_test.go | 7 +++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/pkg/controller/statefulset/stateful_pod_control.go b/pkg/controller/statefulset/stateful_pod_control.go index 1b3b5058ba..e3d167674f 100644 --- a/pkg/controller/statefulset/stateful_pod_control.go +++ b/pkg/controller/statefulset/stateful_pod_control.go @@ -244,9 +244,10 @@ func (spc *StatefulPodControl) UpdatePodClaimForRetentionPolicy(set *appsv1beta1 return fmt.Errorf("Could not retrieve claim %s not found for %s when checking PVC deletion policy: %w", claimName, pod.Name, err) default: if !claimOwnerMatchesSetAndPod(claim, set, pod) { - needsUpdate := updateClaimOwnerRefForSetAndPod(claim, set, pod) + claimClone := claim.DeepCopy() + needsUpdate := updateClaimOwnerRefForSetAndPod(claimClone, set, pod) if needsUpdate { - err := spc.objectMgr.UpdateClaim(claim) + err := spc.objectMgr.UpdateClaim(claimClone) if err != nil { return fmt.Errorf("Could not update claim %s for delete policy ownerRefs: %w", claimName, err) } @@ -341,6 +342,7 @@ func (spc *StatefulPodControl) createPersistentVolumeClaims(set *appsv1beta1.Sta errs = append(errs, fmt.Errorf("pvc %s is to be deleted", claim.Name)) } // TODO: Check resource requirements and accessmodes, update if necessary + // Don't forget to deep copy the PVC if you need to update it } return errorutils.NewAggregate(errs) } diff --git a/pkg/controller/statefulset/stateful_pod_control_test.go b/pkg/controller/statefulset/stateful_pod_control_test.go index 0efd68cf6e..1f8fab300d 100644 --- a/pkg/controller/statefulset/stateful_pod_control_test.go +++ b/pkg/controller/statefulset/stateful_pod_control_test.go @@ -18,6 +18,7 @@ limitations under the License. package statefulset import ( + "context" "errors" "fmt" "strings" @@ -534,17 +535,19 @@ func TestStatefulPodControlUpdatePodClaimForRetentionPolicy(t *testing.T) { // tests the wiring from the pod control to that method. testFn := func(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)() - fakeClient := &fake.Clientset{} indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) claimLister := corelisters.NewPersistentVolumeClaimLister(indexer) set := newStatefulSet(3) set.GetObjectMeta().SetUID("set-123") pod := newStatefulSetPod(set, 0) claims := getPersistentVolumeClaims(set, pod) + claimObjects := make([]runtime.Object, 0) for k := range claims { claim := claims[k] indexer.Add(&claim) + claimObjects = append(claimObjects, &claim) } + fakeClient := fake.NewSimpleClientset(claimObjects...) control := NewStatefulPodControl(fakeClient, nil, nil, claimLister, &noopRecorder{}) set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{ WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType, @@ -555,7 +558,7 @@ func TestStatefulPodControlUpdatePodClaimForRetentionPolicy(t *testing.T) { } expectRef := utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) for k := range claims { - claim, err := claimLister.PersistentVolumeClaims(claims[k].Namespace).Get(claims[k].Name) + claim, err := fakeClient.CoreV1().PersistentVolumeClaims(claims[k].Namespace).Get(context.TODO(), claims[k].Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error getting Claim %s/%s: %v", claim.Namespace, claim.Name, err) }