-
Notifications
You must be signed in to change notification settings - Fork 133
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Prevent calling controller-expand if volume in-use #86
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -23,10 +23,13 @@ import ( | |
|
||
"github.com/kubernetes-csi/external-resizer/pkg/resizer" | ||
"github.com/kubernetes-csi/external-resizer/pkg/util" | ||
"google.golang.org/grpc/codes" | ||
"google.golang.org/grpc/status" | ||
v1 "k8s.io/api/core/v1" | ||
k8serrors "k8s.io/apimachinery/pkg/api/errors" | ||
"k8s.io/apimachinery/pkg/api/resource" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
utilruntime "k8s.io/apimachinery/pkg/util/runtime" | ||
"k8s.io/apimachinery/pkg/util/wait" | ||
"k8s.io/client-go/informers" | ||
"k8s.io/client-go/kubernetes" | ||
|
@@ -56,6 +59,11 @@ type resizeController struct { | |
pvSynced cache.InformerSynced | ||
pvcLister corelisters.PersistentVolumeClaimLister | ||
pvcSynced cache.InformerSynced | ||
|
||
usedPVCs *inUsePVCStore | ||
|
||
podLister corelisters.PodLister | ||
podListerSynced cache.InformerSynced | ||
} | ||
|
||
// NewResizeController returns a ResizeController. | ||
|
@@ -69,6 +77,9 @@ func NewResizeController( | |
pvInformer := informerFactory.Core().V1().PersistentVolumes() | ||
pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims() | ||
|
||
// list pods so as we can identify PVC that are in-use | ||
podInformer := informerFactory.Core().V1().Pods() | ||
|
||
eventBroadcaster := record.NewBroadcaster() | ||
eventBroadcaster.StartLogging(klog.Infof) | ||
eventBroadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events(v1.NamespaceAll)}) | ||
|
@@ -79,15 +90,18 @@ func NewResizeController( | |
pvcRateLimiter, fmt.Sprintf("%s-pvc", name)) | ||
|
||
ctrl := &resizeController{ | ||
name: name, | ||
resizer: resizer, | ||
kubeClient: kubeClient, | ||
pvLister: pvInformer.Lister(), | ||
pvSynced: pvInformer.Informer().HasSynced, | ||
pvcLister: pvcInformer.Lister(), | ||
pvcSynced: pvcInformer.Informer().HasSynced, | ||
claimQueue: claimQueue, | ||
eventRecorder: eventRecorder, | ||
name: name, | ||
resizer: resizer, | ||
kubeClient: kubeClient, | ||
pvLister: pvInformer.Lister(), | ||
pvSynced: pvInformer.Informer().HasSynced, | ||
pvcLister: pvcInformer.Lister(), | ||
pvcSynced: pvcInformer.Informer().HasSynced, | ||
podLister: podInformer.Lister(), | ||
podListerSynced: podInformer.Informer().HasSynced, | ||
claimQueue: claimQueue, | ||
eventRecorder: eventRecorder, | ||
usedPVCs: newUsedPVCStore(), | ||
} | ||
|
||
// Add a resync period as the PVC's request size can be resized again when we handling | ||
|
@@ -98,17 +112,38 @@ func NewResizeController( | |
DeleteFunc: ctrl.deletePVC, | ||
}, resyncPeriod) | ||
|
||
podInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ | ||
AddFunc: ctrl.addPod, | ||
DeleteFunc: ctrl.deletePod, | ||
}, resyncPeriod) | ||
|
||
return ctrl | ||
} | ||
|
||
func (ctrl *resizeController) addPVC(obj interface{}) { | ||
objKey, err := getPVCKey(obj) | ||
objKey, err := getObjectKey(obj) | ||
if err != nil { | ||
return | ||
} | ||
ctrl.claimQueue.Add(objKey) | ||
} | ||
|
||
func (ctrl *resizeController) addPod(obj interface{}) { | ||
pod := parsePod(obj) | ||
if pod != nil { | ||
gnufied marked this conversation as resolved.
Show resolved
Hide resolved
|
||
return | ||
} | ||
ctrl.usedPVCs.addPod(pod) | ||
} | ||
|
||
func (ctrl *resizeController) deletePod(obj interface{}) { | ||
pod := parsePod(obj) | ||
if pod != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same comment here. |
||
return | ||
} | ||
ctrl.usedPVCs.removePod(pod) | ||
} | ||
|
||
func (ctrl *resizeController) updatePVC(oldObj, newObj interface{}) { | ||
oldPVC, ok := oldObj.(*v1.PersistentVolumeClaim) | ||
if !ok || oldPVC == nil { | ||
|
@@ -162,14 +197,14 @@ func (ctrl *resizeController) updatePVC(oldObj, newObj interface{}) { | |
} | ||
|
||
func (ctrl *resizeController) deletePVC(obj interface{}) { | ||
objKey, err := getPVCKey(obj) | ||
objKey, err := getObjectKey(obj) | ||
if err != nil { | ||
return | ||
} | ||
ctrl.claimQueue.Forget(objKey) | ||
} | ||
|
||
func getPVCKey(obj interface{}) (string, error) { | ||
func getObjectKey(obj interface{}) (string, error) { | ||
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { | ||
obj = unknown.Obj | ||
} | ||
|
@@ -191,8 +226,8 @@ func (ctrl *resizeController) Run( | |
|
||
stopCh := ctx.Done() | ||
|
||
if !cache.WaitForCacheSync(stopCh, ctrl.pvSynced, ctrl.pvcSynced) { | ||
klog.Errorf("Cannot sync pv/pvc caches") | ||
if !cache.WaitForCacheSync(stopCh, ctrl.pvSynced, ctrl.pvcSynced, ctrl.podListerSynced) { | ||
klog.Errorf("Cannot sync pod, pv or pvc caches") | ||
return | ||
} | ||
|
||
|
@@ -322,6 +357,15 @@ func (ctrl *resizeController) resizePVC(pvc *v1.PersistentVolumeClaim, pv *v1.Pe | |
pvc = updatedPVC | ||
} | ||
|
||
// if pvc previously failed to expand because it can't be expanded when in-use | ||
// we must not try expansion here | ||
if ctrl.usedPVCs.hasInUseErrors(pvc) && ctrl.usedPVCs.checkForUse(pvc) { | ||
// Record an event to indicate that resizer is not expanding the pvc | ||
ctrl.eventRecorder.Event(pvc, v1.EventTypeWarning, util.VolumeResizeFailed, | ||
fmt.Sprintf("CSI resizer is not expanding %s because it is in-use", pv.Name)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you add a message to indicate that the CSI driver only supports offline volume expansion. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done. |
||
return fmt.Errorf("csi resizer is not expanding %s because it is in-use", pv.Name) | ||
} | ||
|
||
// Record an event to indicate that external resizer is resizing this volume. | ||
ctrl.eventRecorder.Event(pvc, v1.EventTypeNormal, util.VolumeResizing, | ||
fmt.Sprintf("External resizer is resizing volume %s", pv.Name)) | ||
|
@@ -352,12 +396,24 @@ func (ctrl *resizeController) resizePVC(pvc *v1.PersistentVolumeClaim, pv *v1.Pe | |
func (ctrl *resizeController) resizeVolume( | ||
pvc *v1.PersistentVolumeClaim, | ||
pv *v1.PersistentVolume) (resource.Quantity, bool, error) { | ||
|
||
// before trying expansion we will remove the PVC from map | ||
// that tracks PVCs which can't be expanded when in-use. If | ||
// pvc indeed can not be expanded when in-use then it will be added | ||
// back when expansion fails with in-use error. | ||
ctrl.usedPVCs.removePVCWithInUseError(pvc) | ||
|
||
requestSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] | ||
|
||
newSize, fsResizeRequired, err := ctrl.resizer.Resize(pv, requestSize) | ||
|
||
if err != nil { | ||
klog.Errorf("Resize volume %q by resizer %q failed: %v", pv.Name, ctrl.name, err) | ||
// if this error was a in-use error then it must be tracked so as we don't retry without | ||
// first verifying if volume is in-use | ||
if inUseError(err) { | ||
ctrl.usedPVCs.addPVCWithInUseError(pvc) | ||
} | ||
return newSize, fsResizeRequired, fmt.Errorf("resize volume %s failed: %v", pv.Name, err) | ||
} | ||
klog.V(4).Infof("Resize volume succeeded for volume %q, start to update PV's capacity", pv.Name) | ||
|
@@ -422,3 +478,37 @@ func (ctrl *resizeController) markPVCAsFSResizeRequired(pvc *v1.PersistentVolume | |
|
||
return nil | ||
} | ||
|
||
func parsePod(obj interface{}) *v1.Pod { | ||
if obj == nil { | ||
return nil | ||
} | ||
pod, ok := obj.(*v1.Pod) | ||
if !ok { | ||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you pick a different name than "tombstone"? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. renamed to |
||
if !ok { | ||
utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) | ||
gnufied marked this conversation as resolved.
Show resolved
Hide resolved
|
||
return nil | ||
} | ||
pod, ok = tombstone.Obj.(*v1.Pod) | ||
if !ok { | ||
utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Pod %#v", obj)) | ||
gnufied marked this conversation as resolved.
Show resolved
Hide resolved
|
||
return nil | ||
} | ||
} | ||
return pod | ||
} | ||
|
||
func inUseError(err error) bool { | ||
st, ok := status.FromError(err) | ||
if !ok { | ||
// not a grpc error | ||
return false | ||
} | ||
// if this is a failed precondition error then that means driver does not support expansion | ||
// of in-use volumes | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you add a comment linking to the CSI spec description about this? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done |
||
if st.Code() == codes.FailedPrecondition { | ||
return true | ||
} | ||
return false | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3,10 +3,11 @@ package controller | |
import ( | ||
"context" | ||
"fmt" | ||
"k8s.io/client-go/util/workqueue" | ||
"testing" | ||
"time" | ||
|
||
"k8s.io/client-go/util/workqueue" | ||
|
||
"github.com/kubernetes-csi/csi-lib-utils/metrics" | ||
"github.com/kubernetes-csi/external-resizer/pkg/csi" | ||
"github.com/kubernetes-csi/external-resizer/pkg/resizer" | ||
|
@@ -33,6 +34,12 @@ func TestController(t *testing.T) { | |
NodeResize bool | ||
CallCSIExpand bool | ||
expectBlockVolume bool | ||
expectError bool | ||
|
||
// is PVC being expanded in-use | ||
pvcInUse bool | ||
// does PVC being expanded has Failed Precondition errors | ||
pvcHasInUseErrors bool | ||
}{ | ||
{ | ||
Name: "Invalid key", | ||
|
@@ -59,7 +66,7 @@ func TestController(t *testing.T) { | |
{ | ||
Name: "pv claimref does not have pvc UID", | ||
PVC: createPVC(2, 1), | ||
PV: createPV(1, "testPVC" /*pvcName*/, "test" /*pvcNamespace*/, "foobaz" /*pvcUID*/, &fsVolumeMode), | ||
PV: createPV(1, "testPVC" /*pvcName*/, defaultNS, "foobaz" /*pvcUID*/, &fsVolumeMode), | ||
CallCSIExpand: false, | ||
}, | ||
{ | ||
|
@@ -77,27 +84,55 @@ func TestController(t *testing.T) { | |
{ | ||
Name: "Resize PVC, no FS resize", | ||
PVC: createPVC(2, 1), | ||
PV: createPV(1, "testPVC", "test", "foobar", &fsVolumeMode), | ||
PV: createPV(1, "testPVC", defaultNS, "foobar", &fsVolumeMode), | ||
CreateObjects: true, | ||
CallCSIExpand: true, | ||
}, | ||
{ | ||
Name: "Resize PVC with FS resize", | ||
PVC: createPVC(2, 1), | ||
PV: createPV(1, "testPVC", "test", "foobar", &fsVolumeMode), | ||
PV: createPV(1, "testPVC", defaultNS, "foobar", &fsVolumeMode), | ||
CreateObjects: true, | ||
NodeResize: true, | ||
CallCSIExpand: true, | ||
}, | ||
{ | ||
Name: "Block Resize PVC with FS resize", | ||
PVC: createPVC(2, 1), | ||
PV: createPV(1, "testPVC", "test", "foobar", &blockVolumeMode), | ||
PV: createPV(1, "testPVC", defaultNS, "foobar", &blockVolumeMode), | ||
CreateObjects: true, | ||
NodeResize: true, | ||
CallCSIExpand: true, | ||
expectBlockVolume: true, | ||
}, | ||
{ | ||
Name: "Resize PVC, no FS resize, pvc-inuse with failedprecondition", | ||
PVC: createPVC(2, 1), | ||
PV: createPV(1, "testPVC", defaultNS, "foobar", &fsVolumeMode), | ||
CreateObjects: true, | ||
CallCSIExpand: false, | ||
pvcHasInUseErrors: true, | ||
pvcInUse: true, | ||
expectError: true, | ||
}, | ||
{ | ||
Name: "Resize PVC, no FS resize, pvc-inuse but no failedprecondition error", | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This one should be "with FS resize" as there is no failedprecondition? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it should not matter. The "FS resize" only decides whether node expansion is pending or not. But since in this case, all we are trying to test is - if a pvc was in-use but has no failedprecondition from before then expansion should proceed as usual. |
||
PVC: createPVC(2, 1), | ||
PV: createPV(1, "testPVC", defaultNS, "foobar", &fsVolumeMode), | ||
CreateObjects: true, | ||
CallCSIExpand: true, | ||
pvcHasInUseErrors: false, | ||
pvcInUse: true, | ||
}, | ||
{ | ||
Name: "Resize PVC, no FS resize, pvc not in-use but has failedprecondition error", | ||
PVC: createPVC(2, 1), | ||
PV: createPV(1, "testPVC", defaultNS, "foobar", &fsVolumeMode), | ||
CreateObjects: true, | ||
CallCSIExpand: true, | ||
pvcHasInUseErrors: true, | ||
pvcInUse: false, | ||
}, | ||
} { | ||
client := csi.NewMockClient("mock", test.NodeResize, true, true) | ||
driverName, _ := client.GetDriverName(context.TODO()) | ||
|
@@ -137,11 +172,33 @@ func TestController(t *testing.T) { | |
} | ||
|
||
controller := NewResizeController(driverName, csiResizer, kubeClient, time.Second, informerFactory, workqueue.DefaultControllerRateLimiter()) | ||
err = controller.(*resizeController).syncPVC(fmt.Sprintf("%s/%s", test.PVC.Namespace, test.PVC.Name)) | ||
if err != nil { | ||
|
||
ctrlInstance, _ := controller.(*resizeController) | ||
|
||
if test.pvcHasInUseErrors { | ||
ctrlInstance.usedPVCs.addPVCWithInUseError(test.PVC) | ||
if !ctrlInstance.usedPVCs.hasInUseErrors(test.PVC) { | ||
t.Fatalf("pvc %s does not have in-use errors", test.PVC.Name) | ||
} | ||
} | ||
|
||
if test.pvcInUse { | ||
pod := withPVC(test.PVC.Name, pod()) | ||
ctrlInstance.usedPVCs.addPod(pod) | ||
if !ctrlInstance.usedPVCs.checkForUse(test.PVC) { | ||
t.Fatalf("pvc %s is not in use", test.PVC.Name) | ||
} | ||
} | ||
|
||
err = ctrlInstance.syncPVC(fmt.Sprintf("%s/%s", test.PVC.Namespace, test.PVC.Name)) | ||
if err != nil && !test.expectError { | ||
t.Fatalf("Test %s: Unexpected error: %v", test.Name, err) | ||
} | ||
|
||
if test.expectError && err == nil { | ||
t.Fatalf("Test %s: expected error got no none", test.Name) | ||
} | ||
|
||
expandCallCount := client.GetExpandCount() | ||
if test.CallCSIExpand && expandCallCount == 0 { | ||
t.Fatalf("for %s: expected csi expand call, no csi expand call was made", test.Name) | ||
|
@@ -183,7 +240,7 @@ func createPVC(requestGB, capacityGB int) *v1.PersistentVolumeClaim { | |
return &v1.PersistentVolumeClaim{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: "testPVC", | ||
Namespace: "test", | ||
Namespace: defaultNS, | ||
UID: "foobar", | ||
}, | ||
Spec: v1.PersistentVolumeClaimSpec{ | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I see that the in-tree PVC protection controller also checks the pod. What is the advantage of checking thru the pod vs checking the VolumeAttachment object?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Because not all volume types support attach/detach and hence there may not be an VolumeAttachment object.