diff --git a/buildscripts/check-diff.sh b/buildscripts/check-diff.sh index 9662b35..bf54916 100755 --- a/buildscripts/check-diff.sh +++ b/buildscripts/check-diff.sh @@ -22,5 +22,5 @@ TEST_NAME=$1 if [[ `git diff --shortstat | wc -l` != 0 ]]; then - echo "Some files got changed after $1";printf "\n";git diff --stat;printf "\n"; exit 1; + echo "Some files got changed after $1";printf "\n";git --no-pager diff;printf "\n"; exit 1; fi diff --git a/go.mod b/go.mod index 8fd317f..678d967 100644 --- a/go.mod +++ b/go.mod @@ -38,6 +38,7 @@ require ( github.com/prometheus/client_golang v1.0.0 github.com/spf13/cobra v1.1.1 github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.4.0 k8s.io/api v0.17.3 k8s.io/apimachinery v0.17.3 k8s.io/client-go v11.0.0+incompatible diff --git a/provisioner/garbage_collector.go b/provisioner/garbage_collector.go new file mode 100644 index 0000000..bd1d5ea --- /dev/null +++ b/provisioner/garbage_collector.go @@ -0,0 +1,170 @@ +/* +Copyright 2021 The OpenEBS Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "fmt" + "time" + + mayav1alpha1 "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + errors "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" +) + +var ( + // GarbageCollectorInterval defines periodic interval to run garbage collector + GarbageCollectorInterval = 5 * time.Minute +) + +func RunGarbageCollector(client kubernetes.Interface, pvTracker ProvisioningTracker, ns string, stopCh <-chan struct{}) { + // NewTicker sends tick only after mentioned interval. + // So to ensure that the garbage collector gets executed at the beginning, + // we are running it here. + klog.V(4).Infof("Running garbage collector for stale NFS resources") + err := cleanUpStalePvc(client, pvTracker, ns) + klog.V(4).Infof("Garbage collection completed for stale NFS resources with error=%v", err) + + ticker := time.NewTicker(GarbageCollectorInterval) + + for { + select { + case <-stopCh: + ticker.Stop() + return + case <-ticker.C: + klog.V(4).Infof("Running garbage collector for stale NFS resources") + err = cleanUpStalePvc(client, pvTracker, ns) + klog.V(4).Infof("Garbage collection completed for stale NFS resources with error=%v", err) + } + } +} + +func cleanUpStalePvc(client kubernetes.Interface, pvTracker ProvisioningTracker, ns string) error { + backendPvcLabel := fmt.Sprintf("%s=%s", mayav1alpha1.CASTypeKey, "nfs-kernel") + pvcList, err := client.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: backendPvcLabel}) + if err != nil { + klog.Errorf("Failed to list PVC, err=%s", err) + return err + } + + for _, pvc := range pvcList.Items { + pvcExists, err := nfsPvcExists(client, pvc) + if err != nil { + // failed to check NFS PVC existence, + // will check in next retry + klog.Errorf("Failed to check NFS PVC for backendPVC=%s/%s, err=%v", ns, pvc.Name, err) + continue + } + + if pvcExists { + // NFS PVC exists for backend PVC + continue + } + + // check if NFS PV exists for this PVC or not + nfsPvName := "" + fmt.Sscanf(pvc.Name, "nfs-%s", &nfsPvName) + if nfsPvName == "" { + continue + } + + if pvTracker.Inprogress(nfsPvName) { + // provisioner is processing request for this PV + continue + } + + pvExists, err := pvExists(client, nfsPvName) + if err != nil { + // failed to check pv existence, will check in next retry + klog.Errorf("Failed to check NFS PV for backendPVC=%s/%s, err=%v", ns, pvc.Name, err) + continue + } + + if pvExists { + // Relevant NFS PV exists for backend PVC + continue + } + + // perform cleanup for stale NFS resource for this backend PVC + err = deleteBackendStaleResources(client, pvc.Namespace, nfsPvName) + if err != nil { + klog.Errorf("Failed to delete NFS resources for backendPVC=%s/%s, err=%v", ns, pvc.Name, err) + } + } + + return nil +} + +func deleteBackendStaleResources(client kubernetes.Interface, nfsServerNs, nfsPvName string) error { + klog.Infof("Deleting stale resources for PV=%s", nfsPvName) + + p := &Provisioner{ + kubeClient: client, + serverNamespace: nfsServerNs, + } + + nfsServerOpts := &KernelNFSServerOptions{ + pvName: nfsPvName, + } + + return p.deleteNFSServer(nfsServerOpts) +} + +func nfsPvcExists(client kubernetes.Interface, backendPvcObj corev1.PersistentVolumeClaim) (bool, error) { + nfsPvcName, nameExists := backendPvcObj.Labels[nfsPvcNameLabelKey] + nfsPvcNs, nsExists := backendPvcObj.Labels[nfsPvcNsLabelKey] + nfsPvcUID, uidExists := backendPvcObj.Labels[nfsPvcUIDLabelKey] + + if !nameExists || !nsExists || !uidExists { + return false, errors.New("backend PVC doesn't have sufficient information of nfs pvc") + } + + pvcObj, err := client.CoreV1().PersistentVolumeClaims(nfsPvcNs).Get(nfsPvcName, metav1.GetOptions{}) + if err != nil { + if !k8serrors.IsNotFound(err) { + // couldn't get the nfs pvc information due to network error or + // we don't have permission to fetch pvc from user namespace + return false, err + } + return false, nil + } + + if nfsPvcUID != string(pvcObj.UID) { + klog.Infof("different UID=%s actual=%s", nfsPvcUID, string(pvcObj.UID)) + // pvc is having different UID than nfs PVC, so + // original nfs pvc is deleted + return false, nil + } + + return true, nil +} + +func pvExists(client kubernetes.Interface, pvName string) (bool, error) { + _, err := client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + if err == nil { + return true, nil + } + + if k8serrors.IsNotFound(err) { + return false, nil + } + return false, err +} diff --git a/provisioner/garbage_collector_test.go b/provisioner/garbage_collector_test.go new file mode 100644 index 0000000..ef077fa --- /dev/null +++ b/provisioner/garbage_collector_test.go @@ -0,0 +1,355 @@ +/* +Copyright 2021 The OpenEBS Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/fake" +) + +func generateFakePvcObj(ns, name, uid string, phase corev1.PersistentVolumeClaimPhase, labels map[string]string) *corev1.PersistentVolumeClaim { + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + UID: types.UID(uid), + Labels: labels, + }, + Spec: corev1.PersistentVolumeClaimSpec{}, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: phase, + }, + } +} + +func generateFakePvObj(name string) *corev1.PersistentVolume { + return &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: corev1.PersistentVolumeSpec{}, + Status: corev1.PersistentVolumeStatus{}, + } +} + +func generateBackendPvcLabel(nfsPvcNs, nfsPvcName, nfsPvcUID, nfsPvName string) map[string]string { + return map[string]string{ + nfsPvcNameLabelKey: nfsPvcName, + nfsPvcUIDLabelKey: nfsPvcUID, + nfsPvcNsLabelKey: nfsPvcNs, + "persistent-volume": nfsPvName, + "openebs.io/cas-type": "nfs-kernel", + } +} + +func getProvisioningTracker(pvName ...string) ProvisioningTracker { + tracker := NewProvisioningTracker() + + for _, v := range pvName { + tracker.Add(v) + } + + return tracker +} + +func TestRunGarbageCollector(t *testing.T) { + GarbageCollectorInterval = 10 * time.Second + + nfsServerNs := "nfs-ns" + + clientset := fake.NewSimpleClientset() + pvTracker := getProvisioningTracker() + + backendPvc := generateFakePvcObj(nfsServerNs, "nfs-pv5", "backend-pvc5-uid", corev1.ClaimBound, + generateBackendPvcLabel("ns5", "pvc5", "uid5", "pv5")) + nfsDeployment := getFakeDeploymentObject(nfsServerNs, "nfs-pv5") + nfsService := getFakeServiceObject(nfsServerNs, "nfs-pv5") + + assert.NoError(t, createPvc(clientset, backendPvc), "on creating backend PVC resource") + assert.NoError(t, createDeployment(clientset, nfsDeployment), "on creating nfs-server deployment resource") + assert.NoError(t, createService(clientset, nfsService), "on creating nfs-server service resourec") + + stopCh := make(chan struct{}) + go RunGarbageCollector(clientset, pvTracker, nfsServerNs, stopCh) + + time.Sleep(GarbageCollectorInterval + 10*time.Second /* to ensure cleanUpStalePvc run */) + close(stopCh) + + exists, err := pvcExists(clientset, backendPvc.Namespace, backendPvc.Name) + assert.NoError(t, err, "checking backend PVC existence") + assert.Equal(t, false, exists, "backend PVC %s hould be removed") + + exists, err = deploymentExists(clientset, nfsDeployment.Namespace, nfsDeployment.Name) + assert.NoError(t, err, "checking nfs-server deployment existence") + assert.Equal(t, false, exists, "nfs-server deployment should be removed") + + exists, err = serviceExists(clientset, nfsService.Namespace, nfsService.Name) + assert.NoError(t, err, "checking nfs-server service existence") + assert.Equal(t, false, exists, "nfs-server service should be removed") +} + +func TestCleanUpStalePvc(t *testing.T) { + nfsServerNs := "nfs-ns" + + tests := []struct { + // name describe the test + name string + + clientset *fake.Clientset + pvTracker ProvisioningTracker + + nfsPvc *corev1.PersistentVolumeClaim + nfsPv *corev1.PersistentVolume + backendPvc *corev1.PersistentVolumeClaim + nfsDeployment *appsv1.Deployment + nfsService *corev1.Service + + shouldCleanup bool + }{ + { + name: "when NFS PVC is in bound state, NFS resources should not be destroyed", + + clientset: fake.NewSimpleClientset(), + pvTracker: getProvisioningTracker(), + shouldCleanup: false, + + nfsPvc: generateFakePvcObj("ns1", "pvc1", "uid1", corev1.ClaimBound, nil), + nfsPv: generateFakePvObj("pv1"), + backendPvc: generateFakePvcObj(nfsServerNs, "nfs-pv1", "backend-pvc1-uid", corev1.ClaimBound, + generateBackendPvcLabel("ns1", "pvc1", "uid1", "pv1")), + nfsDeployment: getFakeDeploymentObject(nfsServerNs, "nfs-pv1"), + nfsService: getFakeServiceObject(nfsServerNs, "nfs-pv1"), + }, + { + name: "when NFS PVC is in pending state, NFS resources should not be destroyed", + + clientset: fake.NewSimpleClientset(), + pvTracker: getProvisioningTracker(), + shouldCleanup: false, + + nfsPvc: generateFakePvcObj("ns2", "pvc2", "uid2", corev1.ClaimPending, nil), + backendPvc: generateFakePvcObj(nfsServerNs, "nfs-pv2", "backend-pvc2-uid", corev1.ClaimBound, + generateBackendPvcLabel("ns2", "pvc2", "uid2", "pv2")), + nfsDeployment: getFakeDeploymentObject(nfsServerNs, "nfs-pv2"), + nfsService: getFakeServiceObject(nfsServerNs, "nfs-pv2"), + }, + { + name: "when NFS PVC doesn't exist but provisioner is re-attempting provisioning, NFS resources should not be destroyed", + + clientset: fake.NewSimpleClientset(), + pvTracker: getProvisioningTracker("pv3"), + shouldCleanup: false, + + backendPvc: generateFakePvcObj(nfsServerNs, "nfs-pv3", "backend-pvc3-uid", corev1.ClaimBound, + generateBackendPvcLabel("ns3", "pvc3", "uid3", "pv3")), + nfsDeployment: getFakeDeploymentObject(nfsServerNs, "nfs-pv3"), + }, + { + name: "when NFS PVC doesn't exist and NFS PV exists, NFS resources should not be destroyed", + + clientset: fake.NewSimpleClientset(), + pvTracker: getProvisioningTracker(), + shouldCleanup: false, + + nfsPv: generateFakePvObj("pv4"), + backendPvc: generateFakePvcObj(nfsServerNs, "nfs-pv4", "backend-pvc4-uid", corev1.ClaimBound, + generateBackendPvcLabel("ns4", "pvc4", "uid1", "pv4")), + nfsDeployment: getFakeDeploymentObject(nfsServerNs, "nfs-pv4"), + nfsService: getFakeServiceObject(nfsServerNs, "nfs-pv4"), + }, + { + name: "when NFS PVC and NFS PV doesn't exist, NFS resources should be destroyed", + + clientset: fake.NewSimpleClientset(), + pvTracker: getProvisioningTracker(), + shouldCleanup: true, + + backendPvc: generateFakePvcObj(nfsServerNs, "nfs-pv5", "backend-pvc5-uid", corev1.ClaimBound, + generateBackendPvcLabel("ns5", "pvc5", "uid5", "pv5")), + nfsDeployment: getFakeDeploymentObject(nfsServerNs, "nfs-pv5"), + nfsService: getFakeServiceObject(nfsServerNs, "nfs-pv5"), + }, + { + name: "when PVC is having different UID and NFS PV exists, NFS resources should not be destroyed", + + clientset: fake.NewSimpleClientset(), + pvTracker: getProvisioningTracker(), + shouldCleanup: false, + + nfsPvc: generateFakePvcObj("ns6", "pvc6", "different-uid", corev1.ClaimBound, nil), + nfsPv: generateFakePvObj("pv6"), + backendPvc: generateFakePvcObj(nfsServerNs, "nfs-pv6", "backend-pvc6-uid", corev1.ClaimBound, + generateBackendPvcLabel("ns6", "pvc6", "uid6", "pv6")), + nfsDeployment: getFakeDeploymentObject(nfsServerNs, "nfs-pv6"), + nfsService: getFakeServiceObject(nfsServerNs, "nfs-pv6"), + }, + { + name: "when PVC is having different UID and NFS PV doesn't exist, NFS resources should be destroyed", + + clientset: fake.NewSimpleClientset(), + pvTracker: getProvisioningTracker(), + shouldCleanup: true, + + nfsPvc: generateFakePvcObj("ns7", "pvc7", "different-uid", corev1.ClaimBound, nil), + backendPvc: generateFakePvcObj(nfsServerNs, "nfs-pv7", "backend-pvc7-uid", corev1.ClaimBound, + generateBackendPvcLabel("ns7", "pvc7", "uid7", "pv7")), + nfsDeployment: getFakeDeploymentObject(nfsServerNs, "nfs-pv7"), + nfsService: getFakeServiceObject(nfsServerNs, "nfs-pv7"), + }, + { + name: "when backend PVC is not having nfs-pvc labels, backend PVC should not be removed", + + clientset: fake.NewSimpleClientset(), + pvTracker: getProvisioningTracker(), + shouldCleanup: false, + + backendPvc: generateFakePvcObj(nfsServerNs, "not-nfs-pvc", "backend-pvc8-uid", corev1.ClaimPending, + map[string]string{"openebs.io/cas-type": "nfs-kernel"}), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + assert.NoError(t, createPvc(test.clientset, test.nfsPvc), "on creating nfs PVC resource") + assert.NoError(t, createPv(test.clientset, test.nfsPv), "on creating nfs PV resource") + assert.NoError(t, createPvc(test.clientset, test.backendPvc), "on creating backend PVC resource") + assert.NoError(t, createDeployment(test.clientset, test.nfsDeployment), "on creating nfs-server deployment resource") + assert.NoError(t, createService(test.clientset, test.nfsService), "on creating nfs-server service resourec") + + assert.NoError(t, cleanUpStalePvc(test.clientset, test.pvTracker, nfsServerNs)) + + if test.backendPvc != nil { + exists, err := pvcExists(test.clientset, test.backendPvc.Namespace, test.backendPvc.Name) + assert.NoError(t, err, "checking backend PVC existence") + assert.NotEqual(t, test.shouldCleanup, exists, "backend PVC %s", ternary(test.shouldCleanup, "should be removed", "shouldn't be removed")) + } + + if test.nfsDeployment != nil { + exists, err := deploymentExists(test.clientset, test.nfsDeployment.Namespace, test.nfsDeployment.Name) + assert.NoError(t, err, "checking nfs-server deployment existence") + assert.NotEqual(t, test.shouldCleanup, exists, "nfs-server deployment %s", ternary(test.shouldCleanup, "should be removed", "shouldn't be removed")) + } + + if test.nfsService != nil { + exists, err := serviceExists(test.clientset, test.nfsService.Namespace, test.nfsService.Name) + assert.NoError(t, err, "checking nfs-server service existence") + assert.NotEqual(t, test.shouldCleanup, exists, "nfs-server service %s", ternary(test.shouldCleanup, "should be removed", "shouldn't be removed")) + } + + }) + } + +} + +func ternary(cond bool, varA, varB interface{}) interface{} { + if cond { + return varA + } + return varB +} + +func pvcExists(client *fake.Clientset, pvcNamespace, pvcName string) (bool, error) { + _, err := client.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(pvcName, metav1.GetOptions{}) + if err == nil { + return true, nil + } + + if k8serrors.IsNotFound(err) { + return false, nil + } + return false, err +} + +func deploymentExists(client *fake.Clientset, deploymentNs, deploymentName string) (bool, error) { + _, err := client.AppsV1().Deployments(deploymentNs).Get(deploymentName, metav1.GetOptions{}) + if err == nil { + return true, nil + } + + if k8serrors.IsNotFound(err) { + return false, nil + } + return false, err +} + +func serviceExists(client *fake.Clientset, serviceNs, serviceName string) (bool, error) { + _, err := client.CoreV1().Services(serviceNs).Get(serviceName, metav1.GetOptions{}) + if err == nil { + return true, nil + } + + if k8serrors.IsNotFound(err) { + return false, nil + } + return false, err +} + +// createPvc creates PVC resource for the given PVC object. +// On successful creation or if object is nil, it return nil error, +// else return error, occurred on create k8s resource +func createPvc(client *fake.Clientset, pvcObj *corev1.PersistentVolumeClaim) error { + if pvcObj == nil { + return nil + } + + _, err := client.CoreV1().PersistentVolumeClaims(pvcObj.Namespace).Create(pvcObj) + return err +} + +// createDeployment creates Deployment resource for the given object +// on successful creation or if object is nil, it return nil error, +// else return error, occurred on create k8s resource +func createDeployment(client *fake.Clientset, deployObj *appsv1.Deployment) error { + if deployObj == nil { + return nil + } + + _, err := client.AppsV1().Deployments(deployObj.Namespace).Create(deployObj) + return err +} + +// createService creates Service resource for the given object +// on successful creation or if object is nil, it return nil error, +// else return error, occurred on create k8s resource +func createService(client *fake.Clientset, serviceObj *corev1.Service) error { + if serviceObj == nil { + return nil + } + + _, err := client.CoreV1().Services(serviceObj.Namespace).Create(serviceObj) + return err +} + +// createPv creates PV resource for the given PV object. +// On successful creation or if object is nil, it return nil error, +// else return error, occurred on create k8s resource +func createPv(client *fake.Clientset, pvObj *corev1.PersistentVolume) error { + if pvObj == nil { + return nil + } + + _, err := client.CoreV1().PersistentVolumes().Create(pvObj) + return err +} diff --git a/provisioner/helper_kernel_nfs_server.go b/provisioner/helper_kernel_nfs_server.go index aeb0560..0fcf06a 100644 --- a/provisioner/helper_kernel_nfs_server.go +++ b/provisioner/helper_kernel_nfs_server.go @@ -18,6 +18,7 @@ package provisioner import ( "strconv" + "time" errors "github.com/pkg/errors" "k8s.io/klog" @@ -31,12 +32,18 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" ) const ( pvcStorageClassAnnotation = "nfs.openebs.io/persistentvolumeclaim" pvStorageClassAnnotation = "nfs.openebs.io/persistentvolume" + // PVC Label key to store information about NFS PVC + nfsPvcNameLabelKey = "nfs.openebs.io/nfs-pvc-name" + nfsPvcUIDLabelKey = "nfs.openebs.io/nfs-pvc-uid" + nfsPvcNsLabelKey = "nfs.openebs.io/nfs-pvc-namespace" + // NFSPVFinalizer represents finalizer string used by NFSPV NFSPVFinalizer = "nfs.openebs.io/finalizer" @@ -45,6 +52,10 @@ const ( //RPCBindPort set the RPC Bind Port RPCBindPort = 111 + + // BackendPvcBoundTimeout defines the timeout for PVC Bound check + // set to 60 seconds + BackendPvcBoundTimeout = 60 * time.Second ) var ( @@ -61,8 +72,11 @@ type KernelNFSServerOptions struct { provisionerNS string pvName string capacity string - backendStorageClass string pvcName string + pvcUID string + pvcNamespace string + backendStorageClass string + backendPvcName string serviceName string deploymentName string nfsServerCustomConfig string @@ -92,28 +106,33 @@ func (p *Provisioner) createBackendPVC(nfsServerOpts *KernelNFSServerOptions) er return err } - pvcName := "nfs-" + nfsServerOpts.pvName - klog.V(4).Infof("Verifying if PVC(%v) for NFS storage was already created.", pvcName) + backendPvcName := "nfs-" + nfsServerOpts.pvName + klog.V(4).Infof("Verifying if PVC(%v) for NFS storage was already created.", backendPvcName) //Check if the PVC is already created. This can happen //if the previous reconciliation of PVC-PV, resulted in //creating a PVC, but was not yet available for 60+ seconds _, err := p.kubeClient.CoreV1(). PersistentVolumeClaims(p.serverNamespace). - Get(pvcName, metav1.GetOptions{}) + Get(backendPvcName, metav1.GetOptions{}) if err != nil && !k8serrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to check existence of backend PVC {%s/%s}", p.serverNamespace, pvcName) + return errors.Wrapf(err, "failed to check existence of backend PVC {%s/%s}", p.serverNamespace, backendPvcName) } else if err == nil { - nfsServerOpts.pvcName = pvcName - klog.Infof("Volume %v has been initialized with PVC {%s/%s}", nfsServerOpts.pvName, p.serverNamespace, pvcName) + nfsServerOpts.backendPvcName = backendPvcName + klog.Infof("Volume %v has been initialized with PVC {%s/%s}", nfsServerOpts.pvName, p.serverNamespace, backendPvcName) return nil } + pvcLabel := nfsServerOpts.getLabels() + pvcLabel[nfsPvcNameLabelKey] = nfsServerOpts.pvcName + pvcLabel[nfsPvcUIDLabelKey] = nfsServerOpts.pvcUID + pvcLabel[nfsPvcNsLabelKey] = nfsServerOpts.pvcNamespace + // Create PVC using the provided capacity and SC details pvcObjBuilder := persistentvolumeclaim.NewBuilder(). WithNamespace(p.serverNamespace). - WithName(pvcName). - WithLabels(nfsServerOpts.getLabels()). + WithName(backendPvcName). + WithLabels(pvcLabel). WithCapacity(nfsServerOpts.capacity). WithAccessModeRWO(). WithStorageClass(nfsServerOpts.backendStorageClass) @@ -129,10 +148,10 @@ func (p *Provisioner) createBackendPVC(nfsServerOpts *KernelNFSServerOptions) er PersistentVolumeClaims(p.serverNamespace). Create(pvcObj) if err != nil { - return errors.Wrapf(err, "failed to create PVC {%s/%s}", p.serverNamespace, pvcName) + return errors.Wrapf(err, "failed to create PVC {%s/%s}", p.serverNamespace, backendPvcName) } - nfsServerOpts.pvcName = pvcName + nfsServerOpts.backendPvcName = backendPvcName return nil } @@ -143,17 +162,17 @@ func (p *Provisioner) deleteBackendPVC(nfsServerOpts *KernelNFSServerOptions) er return err } - pvcName := "nfs-" + nfsServerOpts.pvName - klog.V(4).Infof("Verifying if PVC {%s/%s} for NFS storage exists.", p.serverNamespace, pvcName) + backendPvcName := "nfs-" + nfsServerOpts.pvName + klog.V(4).Infof("Verifying if PVC {%s/%s} for NFS storage exists.", p.serverNamespace, backendPvcName) //Check if the PVC still exists. It could have been removed // or never created due to a provisioning create failure. _, err := p.kubeClient.CoreV1(). PersistentVolumeClaims(p.serverNamespace). - Get(pvcName, metav1.GetOptions{}) + Get(backendPvcName, metav1.GetOptions{}) if err == nil { - nfsServerOpts.pvcName = pvcName - klog.Infof("Volume %v has been initialized with PVC {%s/%s} Initiating delete...", nfsServerOpts.pvName, p.serverNamespace, pvcName) + nfsServerOpts.backendPvcName = backendPvcName + klog.Infof("Volume %v has been initialized with PVC {%s/%s} Initiating delete...", nfsServerOpts.pvName, p.serverNamespace, backendPvcName) } else if err != nil && k8serrors.IsNotFound(err) { return nil } @@ -164,9 +183,9 @@ func (p *Provisioner) deleteBackendPVC(nfsServerOpts *KernelNFSServerOptions) er // Delete PVC err = p.kubeClient.CoreV1(). PersistentVolumeClaims(p.serverNamespace). - Delete(pvcName, &metav1.DeleteOptions{}) + Delete(backendPvcName, &metav1.DeleteOptions{}) if err != nil && !k8serrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to delete backend PVC {%s/%s} associated with PV %v", p.serverNamespace, pvcName, nfsServerOpts.pvName) + return errors.Wrapf(err, "failed to delete backend PVC {%s/%s} associated with PV %v", p.serverNamespace, backendPvcName, nfsServerOpts.pvName) } return nil } @@ -221,6 +240,7 @@ func (p *Provisioner) createDeployment(nfsServerOpts *KernelNFSServerOptions) er container.NewBuilder(). WithName("nfs-server"). WithImage(getNFSServerImage()). + WithImagePullPolicy(corev1.PullIfNotPresent). WithEnvsNew( []corev1.EnvVar{ { @@ -266,7 +286,7 @@ func (p *Provisioner) createDeployment(nfsServerOpts *KernelNFSServerOptions) er WithVolumeBuilders( volume.NewBuilder(). WithName("exports-dir"). - WithPVCSource(nfsServerOpts.pvcName), + WithPVCSource(nfsServerOpts.backendPvcName), ), ) @@ -438,7 +458,7 @@ func (p *Provisioner) getNFSServerAddress(nfsServerOpts *KernelNFSServerOptions) // If not create NFS Service err := p.createNFSServer(nfsServerOpts) if err != nil { - return "", errors.Wrapf(err, "failed to create NFS Server for PVC{%v}", nfsServerOpts.pvName) + return "", errors.Wrapf(err, "failed to deploy NFS Server") } //Get the NFS Service to extract Cluster IP @@ -448,7 +468,7 @@ func (p *Provisioner) getNFSServerAddress(nfsServerOpts *KernelNFSServerOptions) Services(p.serverNamespace). Get(nfsServerOpts.serviceName, metav1.GetOptions{}) if err != nil || nfsService == nil { - return "", errors.Wrapf(err, "failed to get NFS Service for PVC{%v}", nfsServerOpts.pvcName) + return "", errors.Wrapf(err, "failed to get NFS Service for PVC{%v}", nfsServerOpts.backendPvcName) } return nfsService.Spec.ClusterIP, nil } @@ -473,6 +493,11 @@ func (p *Provisioner) createNFSServer(nfsServerOpts *KernelNFSServerOptions) err return errors.Wrapf(err, "failed to initialize NFS Storage Deployment for RWX PVC{%v}", nfsServerOpts.pvName) } + err = waitForPvcBound(p.kubeClient, p.serverNamespace, "nfs-"+nfsServerOpts.pvName, BackendPvcBoundTimeout) + if err != nil { + return err + } + err = p.createService(nfsServerOpts) if err != nil { return errors.Wrapf(err, "failed to initialize NFS Storage Service for RWX PVC{%v}", nfsServerOpts.pvName) @@ -513,3 +538,33 @@ func (nfsServerOpts *KernelNFSServerOptions) getLabels() map[string]string { "openebs.io/cas-type": "nfs-kernel", } } + +// waitForPvcBound wait for PVC to bound for timeout period +func waitForPvcBound(client kubernetes.Interface, namespace, name string, timeout time.Duration) error { + timer := time.NewTimer(timeout) + defer timer.Stop() + + timeoutCh := timer.C + + tick := time.NewTicker(time.Second) + defer tick.Stop() + + for { + select { + case <-timeoutCh: + return errors.Errorf("timed out waiting for PVC{%s/%s} to bound", namespace, name) + + case <-tick.C: + obj, err := client.CoreV1(). + PersistentVolumeClaims(namespace). + Get(name, metav1.GetOptions{}) + if err != nil { + return errors.Wrapf(err, "failed to get pvc{%s/%s}", namespace, name) + } + + if obj.Status.Phase == corev1.ClaimBound { + return nil + } + } + } +} diff --git a/provisioner/helper_kernel_nfs_server_test.go b/provisioner/helper_kernel_nfs_server_test.go index 4f4ae9b..4b317b8 100644 --- a/provisioner/helper_kernel_nfs_server_test.go +++ b/provisioner/helper_kernel_nfs_server_test.go @@ -17,27 +17,34 @@ limitations under the License. package provisioner import ( + "fmt" "os" "testing" errors "github.com/pkg/errors" + "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" ) func getInt64Ptr(val int64) *int64 { return &val } -func getFakePVCObject(pvcNamespace, pvcName, scName string) *corev1.PersistentVolumeClaim { +func getFakePVCObject(pvcNamespace, pvcName, scName, uid string) *corev1.PersistentVolumeClaim { return &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: pvcNamespace, + UID: types.UID(uid), }, Spec: corev1.PersistentVolumeClaimSpec{ StorageClassName: &scName, @@ -158,7 +165,7 @@ func TestCreateBackendPVC(t *testing.T) { serverNamespace: "nfs-server-ns2", }, expectedPVCName: "nfs-test2-pv", - preProvisionedPVC: getFakePVCObject("nfs-server-ns2", "nfs-test2-pv", "test2-sc"), + preProvisionedPVC: getFakePVCObject("nfs-server-ns2", "nfs-test2-pv", "test2-sc", "uid"), }, "when PVC is pre-provisioned with same name in provisioner namespace": { options: &KernelNFSServerOptions{ @@ -172,7 +179,7 @@ func TestCreateBackendPVC(t *testing.T) { serverNamespace: "nfs-server-ns3", }, expectedPVCName: "nfs-test3-pv", - preProvisionedPVC: getFakePVCObject("openebs", "nfs-test3-pv", "test3-sc"), + preProvisionedPVC: getFakePVCObject("openebs", "nfs-test3-pv", "test3-sc", "uid"), }, } @@ -230,7 +237,7 @@ func TestDeleteBackendPVC(t *testing.T) { kubeClient: fake.NewSimpleClientset(), serverNamespace: "nfs-server-ns1", }, - existingPVC: getFakePVCObject("nfs-server-ns1", "nfs-test1-pv", "test1-sc"), + existingPVC: getFakePVCObject("nfs-server-ns1", "nfs-test1-pv", "test1-sc", "uid"), }, "when PVC is already deleted": { options: &KernelNFSServerOptions{ @@ -291,9 +298,9 @@ func TestCreateDeployment(t *testing.T) { "when there are no errors deployment should get created": { // NOTE: Populated only fields required for test options: &KernelNFSServerOptions{ - provisionerNS: "openebs", - pvName: "test1-pv", - pvcName: "nfs-test1-pv", + provisionerNS: "openebs", + pvName: "test1-pv", + backendPvcName: "nfs-test1-pv", }, provisioner: &Provisioner{ kubeClient: fake.NewSimpleClientset(), @@ -309,9 +316,9 @@ func TestCreateDeployment(t *testing.T) { }, "when deployment is pre-provisioned": { options: &KernelNFSServerOptions{ - provisionerNS: "openebs", - pvName: "test2-pv", - pvcName: "nfs-test2-pv", + provisionerNS: "openebs", + pvName: "test2-pv", + backendPvcName: "nfs-test2-pv", }, provisioner: &Provisioner{ kubeClient: fake.NewSimpleClientset(), @@ -324,10 +331,10 @@ func TestCreateDeployment(t *testing.T) { }, "when deployment exist with same name in provisioner namespace": { options: &KernelNFSServerOptions{ - provisionerNS: "openebs", - pvName: "test3-pv", - pvcName: "nfs-test3-pv", - fsGroup: getInt64Ptr(123), + provisionerNS: "openebs", + pvName: "test3-pv", + backendPvcName: "nfs-test3-pv", + fsGroup: getInt64Ptr(123), }, provisioner: &Provisioner{ kubeClient: fake.NewSimpleClientset(), @@ -346,7 +353,7 @@ func TestCreateDeployment(t *testing.T) { options: &KernelNFSServerOptions{ provisionerNS: "openebs", pvName: "test4-pv", - pvcName: "nfs-test4-pv", + backendPvcName: "nfs-test4-pv", fsGroup: getInt64Ptr(123), leaseTime: 100, graceTime: 100, @@ -421,9 +428,9 @@ func TestDeleteDeployment(t *testing.T) { "when there are no errors deployment should get deleted": { // NOTE: Populated only fields required for test options: &KernelNFSServerOptions{ - provisionerNS: "openebs", - pvName: "test1-pv", - pvcName: "nfs-test1-pv", + provisionerNS: "openebs", + pvName: "test1-pv", + backendPvcName: "nfs-test1-pv", }, provisioner: &Provisioner{ kubeClient: fake.NewSimpleClientset(), @@ -433,9 +440,9 @@ func TestDeleteDeployment(t *testing.T) { }, "when deployment is already deleted": { options: &KernelNFSServerOptions{ - provisionerNS: "openebs", - pvName: "test2-pv", - pvcName: "nfs-test2-pv", + provisionerNS: "openebs", + pvName: "test2-pv", + backendPvcName: "nfs-test2-pv", }, provisioner: &Provisioner{ kubeClient: fake.NewSimpleClientset(), @@ -573,9 +580,9 @@ func TestDeleteService(t *testing.T) { "when there are no errors service should get deleted": { // NOTE: Populated only fields required for test options: &KernelNFSServerOptions{ - provisionerNS: "openebs", - pvName: "test1-pv", - pvcName: "nfs-test1-pv", + provisionerNS: "openebs", + pvName: "test1-pv", + backendPvcName: "nfs-test1-pv", }, provisioner: &Provisioner{ kubeClient: fake.NewSimpleClientset(), @@ -585,9 +592,9 @@ func TestDeleteService(t *testing.T) { }, "when service is already deleted": { options: &KernelNFSServerOptions{ - provisionerNS: "openebs", - pvName: "test2-pv", - pvcName: "nfs-test2-pv", + provisionerNS: "openebs", + pvName: "test2-pv", + backendPvcName: "nfs-test2-pv", }, provisioner: &Provisioner{ kubeClient: fake.NewSimpleClientset(), @@ -633,10 +640,11 @@ func TestDeleteService(t *testing.T) { func TestGetNFSServerAddress(t *testing.T) { tests := map[string]struct { - options *KernelNFSServerOptions - provisioner *Provisioner - isErrExpected bool - expectedServiceIP string + options *KernelNFSServerOptions + provisioner *Provisioner + isErrExpected bool + expectedServiceIP string + shouldBoundBackendPvc bool }{ "when there are no errors service address should be returned": { // NOTE: Populated only fields required for test @@ -650,7 +658,8 @@ func TestGetNFSServerAddress(t *testing.T) { kubeClient: fake.NewSimpleClientset(), serverNamespace: "nfs-server-ns1", }, - expectedServiceIP: "nfs-test1-pv.nfs-server-ns1.svc.cluster.local", + expectedServiceIP: "nfs-test1-pv.nfs-server-ns1.svc.cluster.local", + shouldBoundBackendPvc: true, }, "when opted for clusterIP it should service address": { // NOTE: Populated only fields required for test @@ -667,13 +676,48 @@ func TestGetNFSServerAddress(t *testing.T) { }, // Since we are using fake clients there won't be ClusterIP on service // so expecting for empty value - expectedServiceIP: "", + expectedServiceIP: "", + shouldBoundBackendPvc: true, + }, + "when backend PVC failed to bound": { + // NOTE: Populated only fields required for test + options: &KernelNFSServerOptions{ + provisionerNS: "openebs", + pvName: "test3-pv", + capacity: "5G", + backendStorageClass: "test3-sc", + }, + provisioner: &Provisioner{ + kubeClient: fake.NewSimpleClientset(), + serverNamespace: "nfs-server-ns3", + useClusterIP: false, + }, + // Since we are using fake clients there won't be ClusterIP on service + // so expecting for empty value + expectedServiceIP: "", + isErrExpected: true, + shouldBoundBackendPvc: false, }, } os.Setenv(string(NFSServerImageKey), "openebs/nfs-server:ci") for name, test := range tests { name := name test := test + informer := informers.NewSharedInformerFactory(test.provisioner.kubeClient, 0) + pvcInformer := informer.Core().V1().PersistentVolumeClaims().Informer() + pvcInformer.AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if test.shouldBoundBackendPvc { + boundPvc(test.provisioner.kubeClient, obj) + } + }, + }, + ) + stopCh := make(chan struct{}) + informer.Start(stopCh) + assert.True(t, cache.WaitForCacheSync(stopCh, pvcInformer.HasSynced)) + t.Run(name, func(t *testing.T) { serviceIP, err := test.provisioner.getNFSServerAddress(test.options) if test.isErrExpected && err == nil { @@ -689,6 +733,24 @@ func TestGetNFSServerAddress(t *testing.T) { } } }) + + // to stop pvc informer + close(stopCh) } os.Unsetenv(string(NFSServerImageKey)) } + +func boundPvc(client kubernetes.Interface, obj interface{}) { + pvc, ok := obj.(*corev1.PersistentVolumeClaim) + if !ok { + return + } + + pvc.Status.Phase = corev1.ClaimBound + + _, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(pvc) + if err != nil { + fmt.Printf("failed to update PVC object err=%+v\n", err) + } + return +} diff --git a/provisioner/provisioner.go b/provisioner/provisioner.go index b1cab70..24f6fdf 100644 --- a/provisioner/provisioner.go +++ b/provisioner/provisioner.go @@ -71,6 +71,9 @@ func NewProvisioner(stopCh chan struct{}, kubeClient *clientset.Clientset) (*Pro k8sNodeInformer := kubeInformerFactory.Core().V1().Nodes().Informer() nfsServerNs := getNfsServerNamespace() + + pvTracker := NewProvisioningTracker() + p := &Provisioner{ stopCh: stopCh, @@ -86,6 +89,7 @@ func NewProvisioner(stopCh chan struct{}, kubeClient *clientset.Clientset) (*Pro useClusterIP: menv.Truthy(ProvisionerNFSServerUseClusterIP), k8sNodeLister: listersv1.NewNodeLister(k8sNodeInformer.GetIndexer()), nodeAffinity: getNodeAffinityRules(), + pvTracker: pvTracker, } p.getVolumeConfig = p.GetVolumeConfig @@ -93,6 +97,9 @@ func NewProvisioner(stopCh chan struct{}, kubeClient *clientset.Clientset) (*Pro // and maintain it in cache go k8sNodeInformer.Run(stopCh) + // Running garbage collector to perform cleanup for stale NFS resources + go RunGarbageCollector(kubeClient, pvTracker, nfsServerNs, stopCh) + return p, nil } @@ -107,6 +114,9 @@ func (p *Provisioner) SupportsBlock() bool { func (p *Provisioner) Provision(opts pvController.ProvisionOptions) (*v1.PersistentVolume, error) { pvc := opts.PVC + p.pvTracker.Add(opts.PVName) + defer p.pvTracker.Delete(opts.PVName) + for _, accessMode := range pvc.Spec.AccessModes { if accessMode != v1.ReadWriteMany { klog.Infof("Received PVC provision request for non-rwx mode %v", accessMode) @@ -166,6 +176,9 @@ func (p *Provisioner) Provision(opts pvController.ProvisionOptions) (*v1.Persist // set to not-retain, then this function will create a helper pod // to delete the host path from the node. func (p *Provisioner) Delete(pv *v1.PersistentVolume) (err error) { + p.pvTracker.Add(pv.Name) + defer p.pvTracker.Delete(pv.Name) + defer func() { err = errors.Wrapf(err, "failed to delete volume %v", pv.Name) }() diff --git a/provisioner/provisioner_kernel_nfs_server.go b/provisioner/provisioner_kernel_nfs_server.go index 2337826..2e081f7 100644 --- a/provisioner/provisioner_kernel_nfs_server.go +++ b/provisioner/provisioner_kernel_nfs_server.go @@ -65,10 +65,12 @@ func (p *Provisioner) ProvisionKernalNFSServer(opts pvController.ProvisionOption leaseTime: leaseTime, graceTime: graceTime, fsGroup: fsGID, + pvcName: pvc.Name, + pvcNamespace: pvc.Namespace, + pvcUID: string(pvc.UID), } nfsService, err := p.getNFSServerAddress(nfsServerOpts) - if err != nil { klog.Infof("Initialize volume %v failed: %v", name, err) alertlog.Logger.Errorw("", diff --git a/provisioner/pvc_tracker.go b/provisioner/pvc_tracker.go new file mode 100644 index 0000000..2f8ab01 --- /dev/null +++ b/provisioner/pvc_tracker.go @@ -0,0 +1,68 @@ +/* +Copyright 2021 The OpenEBS Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// ProvisioningTracker tracks provisioning request +type ProvisioningTracker interface { + // Add PV for which provisioning is in-progress + Add(pvName string) + + // Delete PV for which provisioning is completed + Delete(pvName string) + + // Inprogress checks if provisioning for given PV is in-progress or not + Inprogress(pvName string) bool +} + +type provisioningTracker struct { + // request contains list of in-progress provisioning request + request sets.String + lock sync.RWMutex +} + +func NewProvisioningTracker() ProvisioningTracker { + return &provisioningTracker{ + request: sets.NewString(), + } +} + +func (t *provisioningTracker) Add(pvName string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.request.Insert(pvName) +} + +func (t *provisioningTracker) Delete(pvName string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.request.Delete(pvName) +} + +func (t *provisioningTracker) Inprogress(pvName string) bool { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.request.Has(pvName) +} diff --git a/provisioner/pvc_tracker_test.go b/provisioner/pvc_tracker_test.go new file mode 100644 index 0000000..18d8919 --- /dev/null +++ b/provisioner/pvc_tracker_test.go @@ -0,0 +1,42 @@ +/* +Copyright 2021 The OpenEBS Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestProvisioningTracker(t *testing.T) { + tracker := NewProvisioningTracker() + + assert.False(t, tracker.Inprogress("pv1")) + + tracker.Add("pv1") + tracker.Add("pv2") + + assert.True(t, tracker.Inprogress("pv1")) + assert.True(t, tracker.Inprogress("pv2")) + + tracker.Delete("pv1") + assert.False(t, tracker.Inprogress("pv1")) + assert.True(t, tracker.Inprogress("pv2")) + + tracker.Delete("pv2") + assert.False(t, tracker.Inprogress("pv2")) +} diff --git a/provisioner/types.go b/provisioner/types.go index 0c38f30..64a873a 100644 --- a/provisioner/types.go +++ b/provisioner/types.go @@ -54,6 +54,9 @@ type Provisioner struct { // nodeAffinity specifies requirements for scheduling NFS Server nodeAffinity NodeAffinity + + // pvTracker to track in-progress provisioning request + pvTracker ProvisioningTracker } //VolumeConfig struct contains the merged configuration of the PVC diff --git a/tests/garbage_collector_test.go b/tests/garbage_collector_test.go new file mode 100644 index 0000000..4943633 --- /dev/null +++ b/tests/garbage_collector_test.go @@ -0,0 +1,245 @@ +/* +Copyright 2021 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tests + +import ( + "fmt" + "time" + + "github.com/ghodss/yaml" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + mayav1alpha1 "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + pvc "github.com/openebs/dynamic-nfs-provisioner/pkg/kubernetes/api/core/v1/persistentvolumeclaim" + provisioner "github.com/openebs/dynamic-nfs-provisioner/provisioner" +) + +var _ = Describe("TEST GARBAGE COLLECTION OF NFS RESOURCES", func() { + var ( + // application parameters + applicationNamespace = "default" + + // pvc values + accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany} + capacity = "2Gi" + pvcName = "reclaim-released-pv" + pvcUID = "" + + // nfs provisioner values + nfsProvisionerName = "openebs-nfs-provisioner" + nfsProvisionerLabel = "openebs.io/component-name=openebs-nfs-provisioner" + openebsNamespace = "openebs" + scName = "nfs-sc" + backendScName = "nfs-invalid-backend-sc" + scNfsServerType = "kernel" + ) + + When("create nfs storageclass with invalid backend storageclass", func() { + It("should create storageclass", func() { + By("creating storageclass") + + casObj := []mayav1alpha1.Config{ + { + Name: provisioner.KeyPVNFSServerType, + Value: scNfsServerType, + }, + { + Name: provisioner.KeyPVBackendStorageClass, + Value: backendScName, + }, + } + + casObjStr, err := yaml.Marshal(casObj) + Expect(err).To(BeNil(), "while marshaling cas object") + + err = Client.createStorageClass(&storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: scName, + Annotations: map[string]string{ + string(mayav1alpha1.CASTypeKey): "nfsrwx", + string(mayav1alpha1.CASConfigKey): string(casObjStr), + }, + }, + Provisioner: "openebs.io/nfsrwx", + }) + Expect(err).To(BeNil(), "while creating SC{%s}", scName) + }) + }) + + When(fmt.Sprintf("pvc with storageclass %s is created", scName), func() { + It("should create a pvc ", func() { + By("building a pvc") + pvcObj, err := pvc.NewBuilder(). + WithName(pvcName). + WithNamespace(applicationNamespace). + WithStorageClass(scName). + WithAccessModes(accessModes). + WithCapacity(capacity).Build() + Expect(err).To(BeNil(), "while building pvc %s/%s object", applicationNamespace, pvcName) + + By("creating above pvc") + err = Client.createPVC(pvcObj) + Expect(err).To(BeNil(), "while creating pvc %s/%s", applicationNamespace, pvcName) + + pvcObj, err = Client.getPVC(applicationNamespace, pvcName) + Expect(err).To(BeNil(), "while fetching pvc %s/%s", applicationNamespace, pvcName) + + pvcUID = string(pvcObj.UID) + + By("wait till nfs-server deployment get created") + var nfsDeploymentCreated bool + maxRetryCount := 10 + + nfsDeploymentName := "nfs-pvc-" + pvcUID + for maxRetryCount != 0 { + _, err := Client.getDeployment(openebsNamespace, nfsDeploymentName) + if err == nil { + nfsDeploymentCreated = true + break + } + + if !k8serrors.IsNotFound(err) { + fmt.Printf("error fetching nfs-server deployment resource, err=%v\n", err) + } + + time.Sleep(5 * time.Second) + maxRetryCount-- + } + Expect(nfsDeploymentCreated).Should(BeTrue(), "while checking nfs-server deployment creation") + }) + }) + + When("nfs-provisioner is scaled down", func() { + It("should scaled down the provisioner", func() { + By("scale down provisioner") + deployObj, err := Client.getDeployment(openebsNamespace, nfsProvisionerName) + Expect(err).To(BeNil(), "while fetching deployment %s/%s", openebsNamespace, nfsProvisionerName) + + replicaCount := int32(0) + deployObj.Spec.Replicas = &replicaCount + + _, err = Client.updateDeployment(deployObj) + Expect(err).To(BeNil(), "while updating the deployment %s/%s with replicaCount=%d", openebsNamespace, nfsProvisionerName, replicaCount) + + By("verifying pod count as 0") + err = Client.waitForPods(openebsNamespace, nfsProvisionerLabel, corev1.PodRunning, 0) + Expect(err).To(BeNil(), "while verifying pod count") + }) + }) + + When(fmt.Sprintf("pvc with storageclass %s is deleted", scName), func() { + It("should delete the pvc", func() { + Expect(pvcUID).NotTo(BeEmpty(), "PVC UID should not be empty") + + By(fmt.Sprintf("pvc with storageclass %s is deleted", scName)) + err := Client.deletePVC(applicationNamespace, pvcName) + Expect(err).To(BeNil(), "while deleting pvc %s/%s", applicationNamespace, pvcName) + + maxRetryCount := 5 + isPvcDeleted := false + for retries := 0; retries < maxRetryCount; retries++ { + _, err := Client.getPVC(applicationNamespace, pvcName) + if err != nil && k8serrors.IsNotFound(err) { + isPvcDeleted = true + break + } + time.Sleep(time.Second * 5) + } + Expect(isPvcDeleted).To(BeTrue(), "pvc should be deleted") + + By("checking backend PVC") + backendPvcName := "nfs-pvc-" + pvcUID + pvcObj, err := Client.getPVC(openebsNamespace, backendPvcName) + Expect(err).To(BeNil(), "while fetching nfs pv") + Expect(pvcObj.Status.Phase).To(Equal(corev1.ClaimPending), "while verifying backend PVC claim phase") + + By("checking nfs-server deployment") + nfsDeploymentName := "nfs-pvc-" + pvcUID + deployObj, err := Client.getDeployment(openebsNamespace, nfsDeploymentName) + Expect(err).To(BeNil(), "while fetching nfs-server deployment") + Expect(deployObj.Status.UnavailableReplicas == 1).To(BeTrue(), "nfs-server pod should not be in ready state") + }) + }) + + When("nfs-provisioner is scaled-up", func() { + It("should scale-up nfs-provisioner", func() { + deployObj, err := Client.getDeployment(openebsNamespace, nfsProvisionerName) + Expect(err).To(BeNil(), "while fetching deployment %s/%s", openebsNamespace, nfsProvisionerName) + + replicaCount := int32(1) + deployObj.Spec.Replicas = &replicaCount + + _, err = Client.updateDeployment(deployObj) + Expect(err).To(BeNil(), "while updating the deployment %s/%s with replicaCount=%d", openebsNamespace, nfsProvisionerName, replicaCount) + + By("verifying pod count as 1") + err = Client.waitForPods(openebsNamespace, nfsProvisionerLabel, corev1.PodRunning, 1) + Expect(err).To(BeNil(), "while verifying pod count") + }) + }) + + When("nfs-pv stale resources are cleaned-up", func() { + It("should not find backend PVC and nfs-server deployment", func() { + Expect(pvcUID).NotTo(BeEmpty(), "PVC UID should not be empty") + + By("checking backend PVC") + var backendPvcDeleted bool + backendPvcName := "nfs-pvc-" + pvcUID + maxRetryCount := 10 + + for maxRetryCount != 0 { + _, err := Client.getPVC(openebsNamespace, backendPvcName) + if err != nil && k8serrors.IsNotFound(err) { + backendPvcDeleted = true + break + } + time.Sleep(time.Second * 5) + maxRetryCount-- + } + Expect(backendPvcDeleted).To(BeTrue(), "backend pvc should be deleted") + + By("checking nfs-server deployment") + var nfsDeploymentDeleted bool + nfsDeploymentName := "nfs-pvc-" + pvcUID + maxRetryCount = 10 + + for maxRetryCount != 0 { + _, err := Client.getDeployment(openebsNamespace, nfsDeploymentName) + if err != nil && k8serrors.IsNotFound(err) { + nfsDeploymentDeleted = true + break + } + time.Sleep(time.Second * 5) + maxRetryCount-- + } + Expect(nfsDeploymentDeleted).To(BeTrue(), "nfs-server deployment should be deleted") + }) + }) + + When(fmt.Sprintf("StorageClass %s is deleted", scName), func() { + It("should delete the SC", func() { + By("deleting SC") + err := Client.deleteStorageClass(scName) + Expect(err).To(BeNil(), "while deleting sc {%s}", scName) + }) + }) +}) diff --git a/tests/nfs_server_invalid_ns_test.go b/tests/nfs_server_invalid_ns_test.go index d6cbdb0..8271769 100644 --- a/tests/nfs_server_invalid_ns_test.go +++ b/tests/nfs_server_invalid_ns_test.go @@ -86,7 +86,7 @@ var _ = Describe("TEST INVALID NAMESPACE FOR NFS SERVER", func() { When("verifying application PVC state", func() { It("should have PVC in pending state", func() { pvcObj, err := Client.getPVC(applicationNamespace, pvcName) - Expect(err).To(BeNil(), "while fetching pvc {%s} in namespace {%s}", pvcName, applicationNamespace) + Expect(err).To(BeNil(), "while fetching pvc %s/%s", applicationNamespace, pvcName) Expect(pvcObj.Status.Phase).To(Equal(corev1.ClaimPending), "while verifying PVC claim phase") }) diff --git a/tests/provisioner_with_invalid_backend_sc.go b/tests/provisioner_with_invalid_backend_sc.go index 7ce5a32..2109158 100644 --- a/tests/provisioner_with_invalid_backend_sc.go +++ b/tests/provisioner_with_invalid_backend_sc.go @@ -19,6 +19,7 @@ package tests import ( "fmt" "strings" + "time" "github.com/ghodss/yaml" . "github.com/onsi/ginkgo" @@ -42,11 +43,10 @@ var _ = Describe("TEST NFS PROVISIONER WITH INVALID BACKEND SC", func() { pvcName = "pvc-invalid-backend-sc" // nfs provisioner values - openebsNamespace = "openebs" - nfsServerLabel = "openebs.io/nfs-server" scName = "nfs-server-invalid-sc" backendScName = "nfs-invalid-backend-sc" scNfsServerType = "kernel" + openebsNamespace = "openebs" ) When("create storageclass with nfs configuration", func() { @@ -89,48 +89,43 @@ var _ = Describe("TEST NFS PROVISIONER WITH INVALID BACKEND SC", func() { WithStorageClass(scName). WithAccessModes(accessModes). WithCapacity(capacity).Build() - Expect(err).To(BeNil(), "while building pvc {%s} in namespace {%s}", pvcName, applicationNamespace) + Expect(err).To(BeNil(), "while building pvc object for %s/%s", applicationNamespace, pvcName) By("creating above pvc") err = Client.createPVC(pvcObj) - Expect(err).To(BeNil(), "while creating pvc {%s} in namespace {%s}", pvcName, applicationNamespace) - - pvcPhase, err := Client.waitForPVCBound(applicationNamespace, pvcName) - Expect(err).To(BeNil(), "while waiting for pvc %s/%s bound phase", applicationNamespace, pvcName) - Expect(pvcPhase).To(Equal(corev1.ClaimBound), "pvc %s/%s should be in bound phase", applicationNamespace, pvcName) + Expect(err).To(BeNil(), "while creating pvc %s/%s", applicationNamespace, pvcName) }) }) - When("verifying nfs-server state", func() { - It("should have nfs-server in pending state", func() { - By("fetching nfs-server deployment name") + When("verifying NFS PVC state", func() { + It("should have NFS PVC in pending state", func() { pvcObj, err := Client.getPVC(applicationNamespace, pvcName) - Expect(err).To(BeNil(), "while fetching pvc {%s} in namespace {%s}", pvcName, applicationNamespace) - - nfsDeployment := fmt.Sprintf("nfs-%s", pvcObj.Spec.VolumeName) - podList, err := Client.listPods(openebsNamespace, fmt.Sprintf("%s=%s", nfsServerLabel, nfsDeployment)) - Expect(err).To(BeNil(), "while fetching nfs-server pod") - Expect(podList.Items[0].Status.Phase).To(Equal(corev1.PodPending), "while verifying nfs-server pod state") - - var unboundPVCCondFound bool - for _, v := range podList.Items[0].Status.Conditions { - if strings.Contains(v.Message, "pod has unbound immediate PersistentVolumeClaims") { - unboundPVCCondFound = true + Expect(err).To(BeNil(), "while fetching pvc %s/%s", applicationNamespace, pvcName) + Expect(pvcObj.Status.Phase).To(Equal(corev1.ClaimPending), "while verifying NFS PVC claim phase") + + var isExpectedEventExist bool + maxRetryCount := 15 + backendPvcName := "nfs-pvc-" + pvcObj.UID + for retries := 0; retries < maxRetryCount; retries++ { + // Verify for provision failure events on PVC + eventList, err := Client.getEvents(pvcObj) + Expect(err).To(BeNil(), "while fetching PVC %s/%s", pvcObj.Namespace, pvcObj.Name) + + for _, event := range eventList.Items { + if event.Reason == "ProvisioningFailed" && + strings.Contains(event.Message, + fmt.Sprintf("timed out waiting for PVC{%s/%s} to bound", openebsNamespace, backendPvcName)) { + isExpectedEventExist = true + break + } + } + if isExpectedEventExist { + break } + // event will be generated after 60 seconds + time.Sleep(time.Second * 10) } - Expect(unboundPVCCondFound).Should(BeTrue(), "while checking unbound PVC condition for nfs-server pod") - }) - }) - - When("verifying backend PVC state", func() { - It("should have backend in pending state", func() { - pvcObj, err := Client.getPVC(applicationNamespace, pvcName) - Expect(err).To(BeNil(), "while fetching pvc {%s} in namespace {%s}", pvcName, applicationNamespace) - - backendPVCName := "nfs-" + pvcObj.Spec.VolumeName - backendPvcObj, err := Client.getPVC(openebsNamespace, backendPVCName) - Expect(err).To(BeNil(), "while fetching backend pvc {%s} in namespace {%s}", backendPVCName, openebsNamespace) - Expect(backendPvcObj.Status.Phase).To(Equal(corev1.ClaimPending), "while verifying backed PVC claim phase") + Expect(isExpectedEventExist).To(BeTrue(), "ProvisioningFailed event should exist with PVC bound timed out") }) }) @@ -138,8 +133,7 @@ var _ = Describe("TEST NFS PROVISIONER WITH INVALID BACKEND SC", func() { It("should delete the pvc", func() { By("deleting above pvc") err := Client.deletePVC(applicationNamespace, pvcName) - Expect(err).To(BeNil(), "while deleting pvc {%s} in namespace {%s}", pvcName, applicationNamespace) - + Expect(err).To(BeNil(), "while deleting pvc %s/%s", applicationNamespace, pvcName) }) })