From 14e1a78c04919afe24bffed017751fc694f32652 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 30 Aug 2023 09:46:01 +0200 Subject: [PATCH 1/3] cephfs: block creation of ROX clone from ROX volume As there is no usecase currently, blocking the creation of ROX clone from the ROX volume. Signed-off-by: Madhu Rajanna --- internal/cephfs/controllerserver.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/internal/cephfs/controllerserver.go b/internal/cephfs/controllerserver.go index 873b8175c95..ce4c19dc550 100644 --- a/internal/cephfs/controllerserver.go +++ b/internal/cephfs/controllerserver.go @@ -214,6 +214,7 @@ func checkValidCreateVolumeRequest( sID *store.SnapshotIdentifier, req *csi.CreateVolumeRequest, ) error { + volCaps := req.GetVolumeCapabilities() switch { case pvID != nil: if vol.Size < parentVol.Size { @@ -224,12 +225,12 @@ func checkValidCreateVolumeRequest( vol.Size) } - if vol.BackingSnapshot { - return errors.New("cloning snapshot-backed volumes is currently not supported") + if parentVol.BackingSnapshot && store.IsVolumeCreateRO(volCaps) { + return errors.New("creating read-only clone from a snapshot-backed volume is not supported") } + case sID != nil: if vol.BackingSnapshot { - volCaps := req.GetVolumeCapabilities() isRO := store.IsVolumeCreateRO(volCaps) if !isRO { return errors.New("backingSnapshot may be used only with read-only access modes") From f9283192012734234a7d76319a23178b78362f52 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 30 Aug 2023 11:13:03 +0200 Subject: [PATCH 2/3] cephfs: add support for RWX clone from ROX Add support to create RWX clone from the ROX clone, in ceph no subvolume clone is created when ROX clone is created from a snapshot just a internal ref counter is added. This PR allows creating a RWX clone from a ROX clone which allows users to create RW copy of PVC where cephcsi will identify the snapshot created for the ROX volume and creates a subvolume from the CephFS snapshot. updates: #3603 Signed-off-by: Madhu Rajanna --- internal/cephfs/controllerserver.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/internal/cephfs/controllerserver.go b/internal/cephfs/controllerserver.go index ce4c19dc550..d1c126fc3d1 100644 --- a/internal/cephfs/controllerserver.go +++ b/internal/cephfs/controllerserver.go @@ -299,6 +299,23 @@ func (cs *ControllerServer) CreateVolume( return nil, status.Error(codes.InvalidArgument, err.Error()) } + // As we are trying to create RWX volume from backing snapshot, we need to + // retrieve the snapshot details from the backing snapshot and create a + // subvolume clone from the snapshot. + if parentVol != nil && parentVol.BackingSnapshot && !store.IsVolumeCreateRO(req.VolumeCapabilities) { + // unset pvID as we dont have real subvolume for the parent volumeID as its a backing snapshot + pvID = nil + parentVol, _, sID, err = store.NewSnapshotOptionsFromID(ctx, parentVol.BackingSnapshotID, cr, + req.GetSecrets(), cs.ClusterName, cs.SetMetadata) + if err != nil { + if errors.Is(err, cerrors.ErrSnapNotFound) { + return nil, status.Error(codes.NotFound, err.Error()) + } + + return nil, status.Error(codes.Internal, err.Error()) + } + } + vID, err := store.CheckVolExists(ctx, volOptions, parentVol, pvID, sID, cr, cs.ClusterName, cs.SetMetadata) if err != nil { if cerrors.IsCloneRetryError(err) { From c5fd40a2431a357762805425b42552cc18b11d36 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 30 Aug 2023 12:18:29 +0200 Subject: [PATCH 3/3] e2e: add e2e test for RWX from ROX added an e2e test case to create RWX clone from ROX and also try to write extra data in the RWX cloned PVC. Signed-off-by: Madhu Rajanna --- e2e/cephfs.go | 161 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 161 insertions(+) diff --git a/e2e/cephfs.go b/e2e/cephfs.go index 53fb03eb5a7..7e596c1c94a 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -2030,6 +2030,167 @@ var _ = Describe(cephfsType, func() { } }) + By("create RWX clone from ROX PVC", func() { + pvc, err := loadPVC(pvcPath) + if err != nil { + framework.Failf("failed to load PVC: %v", err) + } + pvc.Namespace = f.UniqueName + err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + framework.Failf("failed to create PVC: %v", err) + } + + _, pv, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) + if err != nil { + framework.Failf("failed to get PV object for %s: %v", pvc.Name, err) + } + + app, err := loadApp(appPath) + if err != nil { + framework.Failf("failed to load application: %v", err) + } + app.Namespace = f.UniqueName + app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name + appLabels := map[string]string{ + appKey: appLabel, + } + app.Labels = appLabels + optApp := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", appKey, appLabels[appKey]), + } + err = writeDataInPod(app, &optApp, f) + if err != nil { + framework.Failf("failed to write data: %v", err) + } + + appTestFilePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" + + err = appendToFileInContainer(f, app, appTestFilePath, "hello", &optApp) + if err != nil { + framework.Failf("failed to append data: %v", err) + } + + parentFileSum, err := calculateSHA512sum(f, app, appTestFilePath, &optApp) + if err != nil { + framework.Failf("failed to get SHA512 sum for file: %v", err) + } + + snap := getSnapshot(snapshotPath) + snap.Namespace = f.UniqueName + snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name + err = createSnapshot(&snap, deployTimeout) + if err != nil { + framework.Failf("failed to create snapshot: %v", err) + } + validateCephFSSnapshotCount(f, 1, subvolumegroup, pv) + + pvcClone, err := loadPVC(pvcClonePath) + if err != nil { + framework.Failf("failed to load PVC: %v", err) + } + // Snapshot-backed volumes support read-only access modes only. + pvcClone.Spec.DataSource.Name = snap.Name + pvcClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany} + + pvcClone.Namespace = f.UniqueName + err = createPVCAndvalidatePV(c, pvcClone, deployTimeout) + if err != nil { + framework.Failf("failed to create PVC: %v", err) + } + + validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup) + + // create RWX clone from ROX PVC + pvcRWXClone, err := loadPVC(pvcSmartClonePath) + if err != nil { + framework.Failf("failed to load PVC: %v", err) + } + pvcRWXClone.Spec.DataSource.Name = pvcClone.Name + pvcRWXClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteMany} + pvcRWXClone.Namespace = f.UniqueName + + appClone, err := loadApp(appPath) + if err != nil { + framework.Failf("failed to load application: %v", err) + } + appCloneLabels := map[string]string{ + appKey: appCloneLabel, + } + appClone.Name = f.UniqueName + "-app" + appClone.Namespace = f.UniqueName + appClone.Labels = appCloneLabels + appClone.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcRWXClone.Name + optAppClone := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", appKey, appCloneLabels[appKey]), + } + + err = createPVCAndApp("", f, pvcRWXClone, appClone, deployTimeout) + if err != nil { + framework.Failf("failed to create PVC and app: %v", err) + } + // 2 subvolumes should be created 1 for parent PVC and 1 for + // RWX clone PVC. + validateSubvolumeCount(f, 2, fileSystemName, subvolumegroup) + + appCloneTestFilePath := appClone.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" + + cloneFileSum, err := calculateSHA512sum(f, appClone, appCloneTestFilePath, &optAppClone) + if err != nil { + framework.Failf("failed to get SHA512 sum for file: %v", err) + } + + if parentFileSum != cloneFileSum { + framework.Failf( + "SHA512 sums of files in parent and ROX should not differ. parentFileSum: %s cloneFileSum: %s", + parentFileSum, + cloneFileSum) + } + + // Now try to write to the PVC as its a RWX PVC + err = appendToFileInContainer(f, app, appCloneTestFilePath, "testing", &optApp) + if err != nil { + framework.Failf("failed to append data: %v", err) + } + + // Deleting snapshot before deleting pvcClone should succeed. It will be + // deleted once all volumes that are backed by this snapshot are gone. + err = deleteSnapshot(&snap, deployTimeout) + if err != nil { + framework.Failf("failed to delete snapshot: %v", err) + } + + // delete parent pvc and app + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + framework.Failf("failed to delete PVC or application: %v", err) + } + + // delete ROX clone PVC + err = deletePVCAndValidatePV(c, pvcClone, deployTimeout) + if err != nil { + framework.Failf("failed to delete PVC or application: %v", err) + } + // delete RWX clone PVC and app + err = deletePVCAndApp("", f, pvcRWXClone, appClone) + if err != nil { + framework.Failf("failed to delete PVC or application: %v", err) + } + + validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup) + validateOmapCount(f, 0, cephfsType, metadataPool, volumesType) + + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + framework.Failf("failed to delete CephFS storageclass: %v", err) + } + + err = createCephfsStorageClass(f.ClientSet, f, false, nil) + if err != nil { + framework.Failf("failed to create CephFS storageclass: %v", err) + } + }) + if testCephFSFscrypt { kmsToTest := map[string]kmsConfig{ "secrets-metadata-test": secretsMetadataKMS,