From fe85b8a90e1b3e1b422ee3760179710013c80de4 Mon Sep 17 00:00:00 2001 From: Pawan Date: Wed, 24 Jun 2020 23:59:55 +0530 Subject: [PATCH] feat(zfspv): adding backup and restore support for ZFSPV Here adding Backup and Restore controller, which will be watching for the events. The velero plugin will create a Backup CR to create a backup with the remote location information, the controller will send the data to that remote location. In the same way, the velero plugin will create a Restore CR to restore the volume from the the remote location and the restore controller will restore the data. Steps to use velero plugin for ZFS-LocalPV are : 1. install velero 2. add openebs plugin velero plugin add openebs/velero-plugin:latest 3. Create the volumesnapshot location : ```yaml apiVersion: velero.io/v1 kind: VolumeSnapshotLocation metadata: name: default namespace: velero spec: provider: openebs.io/zfspv-blockstore config: bucket: velero prefix: zfs namespace: openebs provider: aws region: minio s3ForcePathStyle: "true" s3Url: http://minio.velero.svc:9000 ``` 4. Create backup velero backup create my-backup --snapshot-volumes --include-namespaces=velero-ns --volume-snapshot-locations=aws-cloud-default --storage-location=default 5. Create Schedule velero create schedule newschedule --schedule="*/1 * * * *" --snapshot-volumes --include-namespaces=velero-ns --volume-snapshot-locations=aws-local-default --storage-location=default 6. Restore from backup velero restore create --from-backup my-backup --restore-volumes=true --namespace-mappings velero-ns:ns1 Signed-off-by: Pawan --- buildscripts/generate-manifests.sh | 38 ++ buildscripts/zfs-driver/Dockerfile | 2 +- deploy/operators/centos7/zfs-operator.yaml | 189 ++++++++ deploy/operators/centos8/zfs-operator.yaml | 189 ++++++++ deploy/sample/zfsbackup.yaml | 12 + deploy/sample/zfsrestore.yaml | 11 + deploy/yamls/ubuntu/zfs-driver.yaml | 4 +- deploy/yamls/zfsbackup-crd.yaml | 104 +++++ deploy/yamls/zfsrestore-crd.yaml | 85 ++++ deploy/zfs-operator.yaml | 193 +++++++- pkg/apis/openebs.io/zfs/v1/register.go | 4 + pkg/apis/openebs.io/zfs/v1/zfsbackup.go | 98 ++++ pkg/apis/openebs.io/zfs/v1/zfsrestore.go | 88 ++++ .../zfs/v1/zz_generated.deepcopy.go | 152 +++++++ pkg/builder/bkpbuilder/backup.go | 117 +++++ pkg/builder/bkpbuilder/build.go | 184 ++++++++ pkg/builder/bkpbuilder/buildlist.go | 72 +++ pkg/builder/bkpbuilder/kubernetes.go | 427 ++++++++++++++++++ pkg/builder/restorebuilder/build.go | 169 +++++++ pkg/builder/restorebuilder/buildlist.go | 72 +++ pkg/builder/restorebuilder/kubernetes.go | 427 ++++++++++++++++++ pkg/builder/restorebuilder/restore.go | 117 +++++ pkg/driver/agent.go | 18 + .../typed/zfs/v1/fake/fake_zfs_client.go | 8 + .../typed/zfs/v1/fake/fake_zfsbackup.go | 140 ++++++ .../typed/zfs/v1/fake/fake_zfsrestore.go | 140 ++++++ .../typed/zfs/v1/generated_expansion.go | 4 + .../typed/zfs/v1/zfs_client.go | 10 + .../typed/zfs/v1/zfsbackup.go | 191 ++++++++ .../typed/zfs/v1/zfsrestore.go | 191 ++++++++ .../informer/externalversions/generic.go | 4 + .../externalversions/zfs/v1/interface.go | 14 + .../externalversions/zfs/v1/zfsbackup.go | 89 ++++ .../externalversions/zfs/v1/zfsrestore.go | 89 ++++ .../lister/zfs/v1/expansion_generated.go | 16 + pkg/generated/lister/zfs/v1/zfsbackup.go | 94 ++++ pkg/generated/lister/zfs/v1/zfsrestore.go | 94 ++++ pkg/mgmt/backup/backup.go | 250 ++++++++++ pkg/mgmt/backup/builder.go | 136 ++++++ pkg/mgmt/backup/start.go | 107 +++++ pkg/mgmt/restore/builder.go | 136 ++++++ pkg/mgmt/restore/restore.go | 246 ++++++++++ pkg/mgmt/restore/start.go | 107 +++++ pkg/zfs/volume.go | 47 +- pkg/zfs/zfs_util.go | 141 +++++- 45 files changed, 5019 insertions(+), 7 deletions(-) create mode 100644 deploy/sample/zfsbackup.yaml create mode 100644 deploy/sample/zfsrestore.yaml create mode 100644 deploy/yamls/zfsbackup-crd.yaml create mode 100644 deploy/yamls/zfsrestore-crd.yaml create mode 100644 pkg/apis/openebs.io/zfs/v1/zfsbackup.go create mode 100644 pkg/apis/openebs.io/zfs/v1/zfsrestore.go create mode 100644 pkg/builder/bkpbuilder/backup.go create mode 100644 pkg/builder/bkpbuilder/build.go create mode 100644 pkg/builder/bkpbuilder/buildlist.go create mode 100644 pkg/builder/bkpbuilder/kubernetes.go create mode 100644 pkg/builder/restorebuilder/build.go create mode 100644 pkg/builder/restorebuilder/buildlist.go create mode 100644 pkg/builder/restorebuilder/kubernetes.go create mode 100644 pkg/builder/restorebuilder/restore.go create mode 100644 pkg/generated/clientset/internalclientset/typed/zfs/v1/fake/fake_zfsbackup.go create mode 100644 pkg/generated/clientset/internalclientset/typed/zfs/v1/fake/fake_zfsrestore.go create mode 100644 pkg/generated/clientset/internalclientset/typed/zfs/v1/zfsbackup.go create mode 100644 pkg/generated/clientset/internalclientset/typed/zfs/v1/zfsrestore.go create mode 100644 pkg/generated/informer/externalversions/zfs/v1/zfsbackup.go create mode 100644 pkg/generated/informer/externalversions/zfs/v1/zfsrestore.go create mode 100644 pkg/generated/lister/zfs/v1/zfsbackup.go create mode 100644 pkg/generated/lister/zfs/v1/zfsrestore.go create mode 100644 pkg/mgmt/backup/backup.go create mode 100644 pkg/mgmt/backup/builder.go create mode 100644 pkg/mgmt/backup/start.go create mode 100644 pkg/mgmt/restore/builder.go create mode 100644 pkg/mgmt/restore/restore.go create mode 100644 pkg/mgmt/restore/start.go diff --git a/buildscripts/generate-manifests.sh b/buildscripts/generate-manifests.sh index ebc574aa2..dbb33c2e9 100755 --- a/buildscripts/generate-manifests.sh +++ b/buildscripts/generate-manifests.sh @@ -62,6 +62,38 @@ echo ' cat deploy/yamls/zfs.openebs.io_zfssnapshots.yaml >> deploy/yamls/zfssnapshot-crd.yaml rm deploy/yamls/zfs.openebs.io_zfssnapshots.yaml + +echo ' + +############################################## +########### ############ +########### ZFSBackup CRD ############ +########### ############ +############################################## + +# ZFSBackups CRD is autogenerated via `make manifests` command. +# Do the modification in the code and run the `make manifests` command +# to generate the CRD definition' > deploy/yamls/zfsbackup-crd.yaml + +cat deploy/yamls/zfs.openebs.io_zfsbackups.yaml >> deploy/yamls/zfsbackup-crd.yaml +rm deploy/yamls/zfs.openebs.io_zfsbackups.yaml + + +echo ' + +############################################## +########### ############ +########### ZFSRestore CRD ############ +########### ############ +############################################## + +# ZFSRestores CRD is autogenerated via `make manifests` command. +# Do the modification in the code and run the `make manifests` command +# to generate the CRD definition' > deploy/yamls/zfsrestore-crd.yaml + +cat deploy/yamls/zfs.openebs.io_zfsrestores.yaml >> deploy/yamls/zfsrestore-crd.yaml +rm deploy/yamls/zfs.openebs.io_zfsrestores.yaml + ## create the operator file using all the yamls echo '# This manifest is autogenerated via `make manifests` command @@ -81,6 +113,12 @@ cat deploy/yamls/zfsvolume-crd.yaml >> deploy/zfs-operator.yaml # Add ZFSSnapshot v1alpha1 and v1 CRDs to the Operator yaml cat deploy/yamls/zfssnapshot-crd.yaml >> deploy/zfs-operator.yaml +# Add ZFSBackup v1 CRDs to the Operator yaml +cat deploy/yamls/zfsbackup-crd.yaml >> deploy/zfs-operator.yaml + +# Add ZFSRestore v1 CRDs to the Operator yaml +cat deploy/yamls/zfsrestore-crd.yaml >> deploy/zfs-operator.yaml + # Copy the base Operator yaml to CentOS 7 and 8 Operator yamls cp deploy/zfs-operator.yaml deploy/operators/centos7/zfs-operator.yaml cp deploy/zfs-operator.yaml deploy/operators/centos8/zfs-operator.yaml diff --git a/buildscripts/zfs-driver/Dockerfile b/buildscripts/zfs-driver/Dockerfile index 10466c55c..40f05e5ba 100644 --- a/buildscripts/zfs-driver/Dockerfile +++ b/buildscripts/zfs-driver/Dockerfile @@ -16,7 +16,7 @@ FROM ubuntu:19.10 RUN apt-get clean && rm -rf /var/lib/apt/lists/* RUN apt-get update; exit 0 RUN apt-get -y install rsyslog libssl-dev xfsprogs ca-certificates -RUN apt-get -y install btrfs-progs +RUN apt-get -y install btrfs-progs netcat ARG ARCH ARG DBUILD_DATE diff --git a/deploy/operators/centos7/zfs-operator.yaml b/deploy/operators/centos7/zfs-operator.yaml index fde6e05ee..a24ac46ee 100644 --- a/deploy/operators/centos7/zfs-operator.yaml +++ b/deploy/operators/centos7/zfs-operator.yaml @@ -829,6 +829,195 @@ status: conditions: [] storedVersions: [] + +############################################## +########### ############ +########### ZFSBackup CRD ############ +########### ############ +############################################## + +# ZFSBackups CRD is autogenerated via `make manifests` command. +# Do the modification in the code and run the `make manifests` command +# to generate the CRD definition + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.8 + creationTimestamp: null + name: zfsbackups.zfs.openebs.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.prevSnapName + description: Previous snapshot for backup + name: PrevSnap + type: string + - JSONPath: .status + description: Backup status + name: Status + type: string + - JSONPath: .metadata.creationTimestamp + description: Age of the volume + name: Age + type: date + group: zfs.openebs.io + names: + kind: ZFSBackup + listKind: ZFSBackupList + plural: zfsbackups + shortNames: + - zb + singular: zfsbackup + preserveUnknownFields: false + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + description: ZFSBackup describes a zfs backup resource created as a custom resource + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZFSBackupSpec is the spec for a ZFSBackup resource + properties: + backupDest: + description: BackupDest is the remote address for backup transfer + minLength: 1 + type: string + ownerNodeID: + description: OwnerNodeID is a name of the nodes where the source volume + is + minLength: 1 + type: string + prevSnapName: + description: PrevSnapName is the last completed-backup's snapshot name + type: string + volumeName: + description: VolumeName is a name of the volume for which this backup + is destined + minLength: 1 + type: string + required: + - backupDest + - ownerNodeID + - volumeName + type: object + status: + description: ZFSBackupStatus is to hold status of backup + type: string + required: + - spec + - status + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + + +############################################## +########### ############ +########### ZFSRestore CRD ############ +########### ############ +############################################## + +# ZFSRestores CRD is autogenerated via `make manifests` command. +# Do the modification in the code and run the `make manifests` command +# to generate the CRD definition + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.8 + creationTimestamp: null + name: zfsrestores.zfs.openebs.io +spec: + group: zfs.openebs.io + names: + kind: ZFSRestore + listKind: ZFSRestoreList + plural: zfsrestores + singular: zfsrestore + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: ZFSRestore describes a cstor restore resource created as a custom + resource + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZFSRestoreSpec is the spec for a ZFSRestore resource + properties: + ownerNodeID: + description: owner node name where restore volume is present + minLength: 1 + type: string + restoreSrc: + description: it can be ip:port in case of restore from remote or volumeName + in case of local restore + minLength: 1 + type: string + volumeName: + description: volume name to where restore has to be performed + minLength: 1 + type: string + required: + - ownerNodeID + - restoreSrc + - volumeName + type: object + status: + description: ZFSRestoreStatus is to hold result of action. + type: string + required: + - spec + - status + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + --- # Create the CSI Driver object diff --git a/deploy/operators/centos8/zfs-operator.yaml b/deploy/operators/centos8/zfs-operator.yaml index 92635ff03..34ac0fbb3 100644 --- a/deploy/operators/centos8/zfs-operator.yaml +++ b/deploy/operators/centos8/zfs-operator.yaml @@ -829,6 +829,195 @@ status: conditions: [] storedVersions: [] + +############################################## +########### ############ +########### ZFSBackup CRD ############ +########### ############ +############################################## + +# ZFSBackups CRD is autogenerated via `make manifests` command. +# Do the modification in the code and run the `make manifests` command +# to generate the CRD definition + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.8 + creationTimestamp: null + name: zfsbackups.zfs.openebs.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.prevSnapName + description: Previous snapshot for backup + name: PrevSnap + type: string + - JSONPath: .status + description: Backup status + name: Status + type: string + - JSONPath: .metadata.creationTimestamp + description: Age of the volume + name: Age + type: date + group: zfs.openebs.io + names: + kind: ZFSBackup + listKind: ZFSBackupList + plural: zfsbackups + shortNames: + - zb + singular: zfsbackup + preserveUnknownFields: false + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + description: ZFSBackup describes a zfs backup resource created as a custom resource + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZFSBackupSpec is the spec for a ZFSBackup resource + properties: + backupDest: + description: BackupDest is the remote address for backup transfer + minLength: 1 + type: string + ownerNodeID: + description: OwnerNodeID is a name of the nodes where the source volume + is + minLength: 1 + type: string + prevSnapName: + description: PrevSnapName is the last completed-backup's snapshot name + type: string + volumeName: + description: VolumeName is a name of the volume for which this backup + is destined + minLength: 1 + type: string + required: + - backupDest + - ownerNodeID + - volumeName + type: object + status: + description: ZFSBackupStatus is to hold status of backup + type: string + required: + - spec + - status + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + + +############################################## +########### ############ +########### ZFSRestore CRD ############ +########### ############ +############################################## + +# ZFSRestores CRD is autogenerated via `make manifests` command. +# Do the modification in the code and run the `make manifests` command +# to generate the CRD definition + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.8 + creationTimestamp: null + name: zfsrestores.zfs.openebs.io +spec: + group: zfs.openebs.io + names: + kind: ZFSRestore + listKind: ZFSRestoreList + plural: zfsrestores + singular: zfsrestore + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: ZFSRestore describes a cstor restore resource created as a custom + resource + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZFSRestoreSpec is the spec for a ZFSRestore resource + properties: + ownerNodeID: + description: owner node name where restore volume is present + minLength: 1 + type: string + restoreSrc: + description: it can be ip:port in case of restore from remote or volumeName + in case of local restore + minLength: 1 + type: string + volumeName: + description: volume name to where restore has to be performed + minLength: 1 + type: string + required: + - ownerNodeID + - restoreSrc + - volumeName + type: object + status: + description: ZFSRestoreStatus is to hold result of action. + type: string + required: + - spec + - status + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + --- # Create the CSI Driver object diff --git a/deploy/sample/zfsbackup.yaml b/deploy/sample/zfsbackup.yaml new file mode 100644 index 000000000..1c06e49ae --- /dev/null +++ b/deploy/sample/zfsbackup.yaml @@ -0,0 +1,12 @@ +apiVersion: zfs.openebs.io/v1 +kind: ZFSBackup +metadata: + name: testbkp + namespace: openebs +spec: + volumeName: "pvc-55e3ec53-a82c-4753-9037-9a610ea8b5d6" + ownerNodeID: pawan-node-1 + snapName: "snap1" + backupDest: "10.41.10.61:1234" +status: "Init" + diff --git a/deploy/sample/zfsrestore.yaml b/deploy/sample/zfsrestore.yaml new file mode 100644 index 000000000..97bd41ade --- /dev/null +++ b/deploy/sample/zfsrestore.yaml @@ -0,0 +1,11 @@ +apiVersion: zfs.openebs.io/v1 +kind: ZFSRestore +metadata: + name: testrestore + namespace: openebs +spec: + volumeName: "pvc-794118b5-143b-49a1-99e8-4c2b80a74776" + ownerNodeID: pawan-node-1 + restoreSrc: "10.41.10.61:1234" +status: "Init" + diff --git a/deploy/yamls/ubuntu/zfs-driver.yaml b/deploy/yamls/ubuntu/zfs-driver.yaml index 05aeb4c6e..6f25a3c14 100644 --- a/deploy/yamls/ubuntu/zfs-driver.yaml +++ b/deploy/yamls/ubuntu/zfs-driver.yaml @@ -478,7 +478,7 @@ rules: resources: ["nodes"] verbs: ["get", "list", "watch"] - apiGroups: ["*"] - resources: ["zfsvolumes", "zfssnapshots"] + resources: ["zfsvolumes", "zfssnapshots", "zfsbackups", "zfsrestores"] verbs: ["*"] --- @@ -681,7 +681,7 @@ rules: resources: ["persistentvolumes", "nodes", "services"] verbs: ["get", "list"] - apiGroups: ["*"] - resources: ["zfsvolumes", "zfssnapshots"] + resources: ["zfsvolumes", "zfssnapshots", "zfsbackups", "zfsrestores"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- diff --git a/deploy/yamls/zfsbackup-crd.yaml b/deploy/yamls/zfsbackup-crd.yaml new file mode 100644 index 000000000..506f31aff --- /dev/null +++ b/deploy/yamls/zfsbackup-crd.yaml @@ -0,0 +1,104 @@ + + +############################################## +########### ############ +########### ZFSBackup CRD ############ +########### ############ +############################################## + +# ZFSBackups CRD is autogenerated via `make manifests` command. +# Do the modification in the code and run the `make manifests` command +# to generate the CRD definition + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.8 + creationTimestamp: null + name: zfsbackups.zfs.openebs.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.prevSnapName + description: Previous snapshot for backup + name: PrevSnap + type: string + - JSONPath: .status + description: Backup status + name: Status + type: string + - JSONPath: .metadata.creationTimestamp + description: Age of the volume + name: Age + type: date + group: zfs.openebs.io + names: + kind: ZFSBackup + listKind: ZFSBackupList + plural: zfsbackups + shortNames: + - zb + singular: zfsbackup + preserveUnknownFields: false + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + description: ZFSBackup describes a zfs backup resource created as a custom resource + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZFSBackupSpec is the spec for a ZFSBackup resource + properties: + backupDest: + description: BackupDest is the remote address for backup transfer + minLength: 1 + type: string + ownerNodeID: + description: OwnerNodeID is a name of the nodes where the source volume + is + minLength: 1 + type: string + prevSnapName: + description: PrevSnapName is the last completed-backup's snapshot name + type: string + volumeName: + description: VolumeName is a name of the volume for which this backup + is destined + minLength: 1 + type: string + required: + - backupDest + - ownerNodeID + - volumeName + type: object + status: + description: ZFSBackupStatus is to hold status of backup + type: string + required: + - spec + - status + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/yamls/zfsrestore-crd.yaml b/deploy/yamls/zfsrestore-crd.yaml new file mode 100644 index 000000000..4726206d8 --- /dev/null +++ b/deploy/yamls/zfsrestore-crd.yaml @@ -0,0 +1,85 @@ + + +############################################## +########### ############ +########### ZFSRestore CRD ############ +########### ############ +############################################## + +# ZFSRestores CRD is autogenerated via `make manifests` command. +# Do the modification in the code and run the `make manifests` command +# to generate the CRD definition + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.8 + creationTimestamp: null + name: zfsrestores.zfs.openebs.io +spec: + group: zfs.openebs.io + names: + kind: ZFSRestore + listKind: ZFSRestoreList + plural: zfsrestores + singular: zfsrestore + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: ZFSRestore describes a cstor restore resource created as a custom + resource + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZFSRestoreSpec is the spec for a ZFSRestore resource + properties: + ownerNodeID: + description: owner node name where restore volume is present + minLength: 1 + type: string + restoreSrc: + description: it can be ip:port in case of restore from remote or volumeName + in case of local restore + minLength: 1 + type: string + volumeName: + description: volume name to where restore has to be performed + minLength: 1 + type: string + required: + - ownerNodeID + - restoreSrc + - volumeName + type: object + status: + description: ZFSRestoreStatus is to hold result of action. + type: string + required: + - spec + - status + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/zfs-operator.yaml b/deploy/zfs-operator.yaml index 1428d3347..00ae75b6f 100644 --- a/deploy/zfs-operator.yaml +++ b/deploy/zfs-operator.yaml @@ -829,6 +829,195 @@ status: conditions: [] storedVersions: [] + +############################################## +########### ############ +########### ZFSBackup CRD ############ +########### ############ +############################################## + +# ZFSBackups CRD is autogenerated via `make manifests` command. +# Do the modification in the code and run the `make manifests` command +# to generate the CRD definition + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.8 + creationTimestamp: null + name: zfsbackups.zfs.openebs.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.prevSnapName + description: Previous snapshot for backup + name: PrevSnap + type: string + - JSONPath: .status + description: Backup status + name: Status + type: string + - JSONPath: .metadata.creationTimestamp + description: Age of the volume + name: Age + type: date + group: zfs.openebs.io + names: + kind: ZFSBackup + listKind: ZFSBackupList + plural: zfsbackups + shortNames: + - zb + singular: zfsbackup + preserveUnknownFields: false + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + description: ZFSBackup describes a zfs backup resource created as a custom resource + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZFSBackupSpec is the spec for a ZFSBackup resource + properties: + backupDest: + description: BackupDest is the remote address for backup transfer + minLength: 1 + type: string + ownerNodeID: + description: OwnerNodeID is a name of the nodes where the source volume + is + minLength: 1 + type: string + prevSnapName: + description: PrevSnapName is the last completed-backup's snapshot name + type: string + volumeName: + description: VolumeName is a name of the volume for which this backup + is destined + minLength: 1 + type: string + required: + - backupDest + - ownerNodeID + - volumeName + type: object + status: + description: ZFSBackupStatus is to hold status of backup + type: string + required: + - spec + - status + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + + +############################################## +########### ############ +########### ZFSRestore CRD ############ +########### ############ +############################################## + +# ZFSRestores CRD is autogenerated via `make manifests` command. +# Do the modification in the code and run the `make manifests` command +# to generate the CRD definition + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.8 + creationTimestamp: null + name: zfsrestores.zfs.openebs.io +spec: + group: zfs.openebs.io + names: + kind: ZFSRestore + listKind: ZFSRestoreList + plural: zfsrestores + singular: zfsrestore + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: ZFSRestore describes a cstor restore resource created as a custom + resource + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZFSRestoreSpec is the spec for a ZFSRestore resource + properties: + ownerNodeID: + description: owner node name where restore volume is present + minLength: 1 + type: string + restoreSrc: + description: it can be ip:port in case of restore from remote or volumeName + in case of local restore + minLength: 1 + type: string + volumeName: + description: volume name to where restore has to be performed + minLength: 1 + type: string + required: + - ownerNodeID + - restoreSrc + - volumeName + type: object + status: + description: ZFSRestoreStatus is to hold result of action. + type: string + required: + - spec + - status + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + --- # Create the CSI Driver object @@ -1308,7 +1497,7 @@ rules: resources: ["nodes"] verbs: ["get", "list", "watch"] - apiGroups: ["*"] - resources: ["zfsvolumes", "zfssnapshots"] + resources: ["zfsvolumes", "zfssnapshots", "zfsbackups", "zfsrestores"] verbs: ["*"] --- @@ -1511,7 +1700,7 @@ rules: resources: ["persistentvolumes", "nodes", "services"] verbs: ["get", "list"] - apiGroups: ["*"] - resources: ["zfsvolumes", "zfssnapshots"] + resources: ["zfsvolumes", "zfssnapshots", "zfsbackups", "zfsrestores"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- diff --git a/pkg/apis/openebs.io/zfs/v1/register.go b/pkg/apis/openebs.io/zfs/v1/register.go index 97bd9aacb..544b4d034 100644 --- a/pkg/apis/openebs.io/zfs/v1/register.go +++ b/pkg/apis/openebs.io/zfs/v1/register.go @@ -73,6 +73,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ZFSVolumeList{}, &ZFSSnapshot{}, &ZFSSnapshotList{}, + &ZFSBackup{}, + &ZFSBackupList{}, + &ZFSRestore{}, + &ZFSRestoreList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/pkg/apis/openebs.io/zfs/v1/zfsbackup.go b/pkg/apis/openebs.io/zfs/v1/zfsbackup.go new file mode 100644 index 000000000..9357fce37 --- /dev/null +++ b/pkg/apis/openebs.io/zfs/v1/zfsbackup.go @@ -0,0 +1,98 @@ +/* +Copyright 2020 The OpenEBS Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=zfsbackup + +// ZFSBackup describes a zfs backup resource created as a custom resource +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:scope=Namespaced,shortName=zb +// +kubebuilder:printcolumn:name="PrevSnap",type=string,JSONPath=`.spec.prevSnapName`,description="Previous snapshot for backup" +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status`,description="Backup status" +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`,description="Age of the volume" +type ZFSBackup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ZFSBackupSpec `json:"spec"` + Status ZFSBackupStatus `json:"status"` +} + +// ZFSBackupSpec is the spec for a ZFSBackup resource +type ZFSBackupSpec struct { + // VolumeName is a name of the volume for which this backup is destined + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + VolumeName string `json:"volumeName"` + + // OwnerNodeID is a name of the nodes where the source volume is + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + OwnerNodeID string `json:"ownerNodeID"` + + // PrevSnapName is the last completed-backup's snapshot name + PrevSnapName string `json:"prevSnapName,omitempty"` + + // BackupDest is the remote address for backup transfer + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + BackupDest string `json:"backupDest"` +} + +// ZFSBackupStatus is to hold status of backup +type ZFSBackupStatus string + +// Status written onto ZFSBackup objects. +const ( + // BKPZFSStatusEmpty ensures the create operation is to be done, if import fails. + BKPZFSStatusEmpty ZFSBackupStatus = "" + + // BKPZFSStatusDone , backup is completed. + BKPZFSStatusDone ZFSBackupStatus = "Done" + + // BKPZFSStatusFailed , backup is failed. + BKPZFSStatusFailed ZFSBackupStatus = "Failed" + + // BKPZFSStatusInit , backup is initialized. + BKPZFSStatusInit ZFSBackupStatus = "Init" + + // BKPZFSStatusPending , backup is pending. + BKPZFSStatusPending ZFSBackupStatus = "Pending" + + // BKPZFSStatusInProgress , backup is in progress. + BKPZFSStatusInProgress ZFSBackupStatus = "InProgress" + + // BKPZFSStatusInvalid , backup operation is invalid. + BKPZFSStatusInvalid ZFSBackupStatus = "Invalid" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=zfsbackups + +// ZFSBackupList is a list of ZFSBackup resources +type ZFSBackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ZFSBackup `json:"items"` +} diff --git a/pkg/apis/openebs.io/zfs/v1/zfsrestore.go b/pkg/apis/openebs.io/zfs/v1/zfsrestore.go new file mode 100644 index 000000000..003739562 --- /dev/null +++ b/pkg/apis/openebs.io/zfs/v1/zfsrestore.go @@ -0,0 +1,88 @@ +/* +Copyright 2020 The OpenEBS Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=zfsrestore + +// ZFSRestore describes a cstor restore resource created as a custom resource +type ZFSRestore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` // set name to restore name + volume name + something like csp tag + Spec ZFSRestoreSpec `json:"spec"` + Status ZFSRestoreStatus `json:"status"` +} + +// ZFSRestoreSpec is the spec for a ZFSRestore resource +type ZFSRestoreSpec struct { + // volume name to where restore has to be performed + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + VolumeName string `json:"volumeName"` + // owner node name where restore volume is present + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + OwnerNodeID string `json:"ownerNodeID"` + + // it can be ip:port in case of restore from remote or volumeName in case of local restore + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + RestoreSrc string `json:"restoreSrc"` +} + +// ZFSRestoreStatus is to hold result of action. +type ZFSRestoreStatus string + +// Status written onto CStrorRestore object. +const ( + // RSTZFSStatusEmpty ensures the create operation is to be done, if import fails. + RSTZFSStatusEmpty ZFSRestoreStatus = "" + + // RSTZFSStatusDone , restore operation is completed. + RSTZFSStatusDone ZFSRestoreStatus = "Done" + + // RSTZFSStatusFailed , restore operation is failed. + RSTZFSStatusFailed ZFSRestoreStatus = "Failed" + + // RSTZFSStatusInit , restore operation is initialized. + RSTZFSStatusInit ZFSRestoreStatus = "Init" + + // RSTZFSStatusPending , restore operation is pending. + RSTZFSStatusPending ZFSRestoreStatus = "Pending" + + // RSTZFSStatusInProgress , restore operation is in progress. + RSTZFSStatusInProgress ZFSRestoreStatus = "InProgress" + + // RSTZFSStatusInvalid , restore operation is invalid. + RSTZFSStatusInvalid ZFSRestoreStatus = "Invalid" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=zfsrestores + +// ZFSRestoreList is a list of ZFSRestore resources +type ZFSRestoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ZFSRestore `json:"items"` +} diff --git a/pkg/apis/openebs.io/zfs/v1/zz_generated.deepcopy.go b/pkg/apis/openebs.io/zfs/v1/zz_generated.deepcopy.go index 9b5419aee..a6ac5dbf0 100644 --- a/pkg/apis/openebs.io/zfs/v1/zz_generated.deepcopy.go +++ b/pkg/apis/openebs.io/zfs/v1/zz_generated.deepcopy.go @@ -98,6 +98,158 @@ func (in *VolumeInfo) DeepCopy() *VolumeInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZFSBackup) DeepCopyInto(out *ZFSBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZFSBackup. +func (in *ZFSBackup) DeepCopy() *ZFSBackup { + if in == nil { + return nil + } + out := new(ZFSBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZFSBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZFSBackupList) DeepCopyInto(out *ZFSBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ZFSBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZFSBackupList. +func (in *ZFSBackupList) DeepCopy() *ZFSBackupList { + if in == nil { + return nil + } + out := new(ZFSBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZFSBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZFSBackupSpec) DeepCopyInto(out *ZFSBackupSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZFSBackupSpec. +func (in *ZFSBackupSpec) DeepCopy() *ZFSBackupSpec { + if in == nil { + return nil + } + out := new(ZFSBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZFSRestore) DeepCopyInto(out *ZFSRestore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZFSRestore. +func (in *ZFSRestore) DeepCopy() *ZFSRestore { + if in == nil { + return nil + } + out := new(ZFSRestore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZFSRestore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZFSRestoreList) DeepCopyInto(out *ZFSRestoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ZFSRestore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZFSRestoreList. +func (in *ZFSRestoreList) DeepCopy() *ZFSRestoreList { + if in == nil { + return nil + } + out := new(ZFSRestoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZFSRestoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZFSRestoreSpec) DeepCopyInto(out *ZFSRestoreSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZFSRestoreSpec. +func (in *ZFSRestoreSpec) DeepCopy() *ZFSRestoreSpec { + if in == nil { + return nil + } + out := new(ZFSRestoreSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZFSSnapshot) DeepCopyInto(out *ZFSSnapshot) { *out = *in diff --git a/pkg/builder/bkpbuilder/backup.go b/pkg/builder/bkpbuilder/backup.go new file mode 100644 index 000000000..108e38442 --- /dev/null +++ b/pkg/builder/bkpbuilder/backup.go @@ -0,0 +1,117 @@ +// Copyright © 2020 The OpenEBS Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bkpbuilder + +import ( + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" +) + +// ZFSBackup is a wrapper over +// ZFSBackup API instance +type ZFSBackup struct { + // ZFSSnap object + Object *apis.ZFSBackup +} + +// From returns a new instance of +// zfsbkp bkpume +func From(bkp *apis.ZFSBackup) *ZFSBackup { + return &ZFSBackup{ + Object: bkp, + } +} + +// Predicate defines an abstraction +// to determine conditional checks +// against the provided pod instance +type Predicate func(*ZFSBackup) bool + +// PredicateList holds a list of predicate +type predicateList []Predicate + +// ZFSBackupList holds the list +// of zfs backup instances +type ZFSBackupList struct { + // List contains list of backups + List apis.ZFSBackupList +} + +// Len returns the number of items present +// in the ZFSBackupList +func (bkpList *ZFSBackupList) Len() int { + return len(bkpList.List.Items) +} + +// all returns true if all the predicates +// succeed against the provided ZFSBackup +// instance +func (l predicateList) all(bkp *ZFSBackup) bool { + for _, pred := range l { + if !pred(bkp) { + return false + } + } + return true +} + +// HasLabels returns true if provided labels +// are present in the provided ZFSBackup instance +func HasLabels(keyValuePair map[string]string) Predicate { + return func(bkp *ZFSBackup) bool { + for key, value := range keyValuePair { + if !bkp.HasLabel(key, value) { + return false + } + } + return true + } +} + +// HasLabel returns true if provided label +// is present in the provided ZFSBackup instance +func (bkp *ZFSBackup) HasLabel(key, value string) bool { + val, ok := bkp.Object.GetLabels()[key] + if ok { + return val == value + } + return false +} + +// HasLabel returns true if provided label +// is present in the provided ZFSBackup instance +func HasLabel(key, value string) Predicate { + return func(bkp *ZFSBackup) bool { + return bkp.HasLabel(key, value) + } +} + +// IsNil returns true if the zfsbkp bkpume instance +// is nil +func (bkp *ZFSBackup) IsNil() bool { + return bkp.Object == nil +} + +// IsNil is predicate to filter out nil zfsbkp bkpume +// instances +func IsNil() Predicate { + return func(bkp *ZFSBackup) bool { + return bkp.IsNil() + } +} + +// GetAPIObject returns zfsbkp bkpume's API instance +func (bkp *ZFSBackup) GetAPIObject() *apis.ZFSBackup { + return bkp.Object +} diff --git a/pkg/builder/bkpbuilder/build.go b/pkg/builder/bkpbuilder/build.go new file mode 100644 index 000000000..3a80b67ad --- /dev/null +++ b/pkg/builder/bkpbuilder/build.go @@ -0,0 +1,184 @@ +/* +Copyright 2020 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bkpbuilder + +import ( + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + "github.com/openebs/zfs-localpv/pkg/common/errors" +) + +// Builder is the builder object for ZFSBackup +type Builder struct { + bkp *ZFSBackup + errs []error +} + +// NewBuilder returns new instance of Builder +func NewBuilder() *Builder { + return &Builder{ + bkp: &ZFSBackup{ + Object: &apis.ZFSBackup{}, + }, + } +} + +// BuildFrom returns new instance of Builder +// from the provided api instance +func BuildFrom(bkp *apis.ZFSBackup) *Builder { + if bkp == nil { + b := NewBuilder() + b.errs = append( + b.errs, + errors.New("failed to build bkp object: nil bkp"), + ) + return b + } + return &Builder{ + bkp: &ZFSBackup{ + Object: bkp, + }, + } +} + +// WithNamespace sets the namespace of ZFSBackup +func (b *Builder) WithNamespace(namespace string) *Builder { + if namespace == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi bkp object: missing namespace", + ), + ) + return b + } + b.bkp.Object.Namespace = namespace + return b +} + +// WithName sets the name of ZFSBackup +func (b *Builder) WithName(name string) *Builder { + if name == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi bkp object: missing name", + ), + ) + return b + } + b.bkp.Object.Name = name + return b +} + +// WithPrevSnap sets the previous snapshot for ZFSBackup +func (b *Builder) WithPrevSnap(snap string) *Builder { + b.bkp.Object.Spec.PrevSnapName = snap + return b +} + +// WithVolume sets the volume name of ZFSBackup +func (b *Builder) WithVolume(volume string) *Builder { + if volume == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi bkp object: missing volume name", + ), + ) + return b + } + b.bkp.Object.Spec.VolumeName = volume + return b +} + +// WithNode sets the owenr node for the ZFSBackup +func (b *Builder) WithNode(node string) *Builder { + if node == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build bkp object: missing node id", + ), + ) + return b + } + b.bkp.Object.Spec.OwnerNodeID = node + return b +} + +// WithStatus sets the status of the Backup progress +func (b *Builder) WithStatus(status apis.ZFSBackupStatus) *Builder { + if status == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build bkp object: missing snap name", + ), + ) + return b + } + b.bkp.Object.Status = status + return b +} + +// WithRemote sets the remote address for the ZFSBackup +func (b *Builder) WithRemote(server string) *Builder { + if server == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build bkp object: missing remote", + ), + ) + return b + } + b.bkp.Object.Spec.BackupDest = server + return b +} + +// WithLabels merges existing labels if any +// with the ones that are provided here +func (b *Builder) WithLabels(labels map[string]string) *Builder { + if len(labels) == 0 { + return b + } + + if b.bkp.Object.Labels == nil { + b.bkp.Object.Labels = map[string]string{} + } + + for key, value := range labels { + b.bkp.Object.Labels[key] = value + } + return b +} + +// WithFinalizer merge existing finalizers if any +// with the ones that are provided here +func (b *Builder) WithFinalizer(finalizer []string) *Builder { + b.bkp.Object.Finalizers = append(b.bkp.Object.Finalizers, finalizer...) + return b +} + +// Build returns ZFSBackup API object +func (b *Builder) Build() (*apis.ZFSBackup, error) { + if len(b.errs) > 0 { + return nil, errors.Errorf("%+v", b.errs) + } + + return b.bkp.Object, nil +} diff --git a/pkg/builder/bkpbuilder/buildlist.go b/pkg/builder/bkpbuilder/buildlist.go new file mode 100644 index 000000000..64d19d318 --- /dev/null +++ b/pkg/builder/bkpbuilder/buildlist.go @@ -0,0 +1,72 @@ +/* +Copyright 2020 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bkpbuilder + +import ( + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" +) + +// ListBuilder enables building an instance of +// ZFSBackupList +type ListBuilder struct { + list *apis.ZFSBackupList + filters predicateList +} + +// NewListBuilder returns a new instance of ListBuilder +func NewListBuilder() *ListBuilder { + return &ListBuilder{ + list: &apis.ZFSBackupList{}, + } +} + +// ListBuilderFrom returns a new instance of +// ListBuilder from API list instance +func ListBuilderFrom(bkps apis.ZFSBackupList) *ListBuilder { + b := &ListBuilder{list: &apis.ZFSBackupList{}} + if len(bkps.Items) == 0 { + return b + } + + b.list.Items = append(b.list.Items, bkps.Items...) + return b +} + +// List returns the list of pod +// instances that was built by this +// builder +func (b *ListBuilder) List() *apis.ZFSBackupList { + if b.filters == nil || len(b.filters) == 0 { + return b.list + } + + filtered := &apis.ZFSBackupList{} + for _, bkp := range b.list.Items { + bkp := bkp // pin it + if b.filters.all(From(&bkp)) { + filtered.Items = append(filtered.Items, bkp) + } + } + return filtered +} + +// WithFilter add filters on which the pod +// has to be filtered +func (b *ListBuilder) WithFilter(pred ...Predicate) *ListBuilder { + b.filters = append(b.filters, pred...) + return b +} diff --git a/pkg/builder/bkpbuilder/kubernetes.go b/pkg/builder/bkpbuilder/kubernetes.go new file mode 100644 index 000000000..cc680ea9e --- /dev/null +++ b/pkg/builder/bkpbuilder/kubernetes.go @@ -0,0 +1,427 @@ +// Copyright © 2020 The OpenEBS Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bkpbuilder + +import ( + "encoding/json" + + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + client "github.com/openebs/zfs-localpv/pkg/common/kubernetes/client" + clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// getClientsetFn is a typed function that +// abstracts fetching of internal clientset +type getClientsetFn func() (clientset *clientset.Clientset, err error) + +// getClientsetFromPathFn is a typed function that +// abstracts fetching of clientset from kubeConfigPath +type getClientsetForPathFn func(kubeConfigPath string) ( + clientset *clientset.Clientset, + err error, +) + +// createFn is a typed function that abstracts +// creating zfsbkp bkpume instance +type createFn func( + cs *clientset.Clientset, + upgradeResultObj *apis.ZFSBackup, + namespace string, +) (*apis.ZFSBackup, error) + +// getFn is a typed function that abstracts +// fetching a zfsbkp bkpume instance +type getFn func( + cli *clientset.Clientset, + name, + namespace string, + opts metav1.GetOptions, +) (*apis.ZFSBackup, error) + +// listFn is a typed function that abstracts +// listing of zfsbkp bkpume instances +type listFn func( + cli *clientset.Clientset, + namespace string, + opts metav1.ListOptions, +) (*apis.ZFSBackupList, error) + +// delFn is a typed function that abstracts +// deleting a zfsbkp bkpume instance +type delFn func( + cli *clientset.Clientset, + name, + namespace string, + opts *metav1.DeleteOptions, +) error + +// updateFn is a typed function that abstracts +// updating zfsbkp bkpume instance +type updateFn func( + cs *clientset.Clientset, + bkp *apis.ZFSBackup, + namespace string, +) (*apis.ZFSBackup, error) + +// Kubeclient enables kubernetes API operations +// on zfsbkp bkpume instance +type Kubeclient struct { + // clientset refers to zfsbkp bkpume's + // clientset that will be responsible to + // make kubernetes API calls + clientset *clientset.Clientset + + kubeConfigPath string + + // namespace holds the namespace on which + // kubeclient has to operate + namespace string + + // functions useful during mocking + getClientset getClientsetFn + getClientsetForPath getClientsetForPathFn + get getFn + list listFn + del delFn + create createFn + update updateFn +} + +// KubeclientBuildOption defines the abstraction +// to build a kubeclient instance +type KubeclientBuildOption func(*Kubeclient) + +// defaultGetClientset is the default implementation to +// get kubernetes clientset instance +func defaultGetClientset() (clients *clientset.Clientset, err error) { + + config, err := client.GetConfig(client.New()) + if err != nil { + return nil, err + } + + return clientset.NewForConfig(config) + +} + +// defaultGetClientsetForPath is the default implementation to +// get kubernetes clientset instance based on the given +// kubeconfig path +func defaultGetClientsetForPath( + kubeConfigPath string, +) (clients *clientset.Clientset, err error) { + config, err := client.GetConfig( + client.New(client.WithKubeConfigPath(kubeConfigPath))) + if err != nil { + return nil, err + } + + return clientset.NewForConfig(config) +} + +// defaultGet is the default implementation to get +// a zfsbkp bkpume instance in kubernetes cluster +func defaultGet( + cli *clientset.Clientset, + name, namespace string, + opts metav1.GetOptions, +) (*apis.ZFSBackup, error) { + return cli.ZfsV1(). + ZFSBackups(namespace). + Get(name, opts) +} + +// defaultList is the default implementation to list +// zfsbkp bkpume instances in kubernetes cluster +func defaultList( + cli *clientset.Clientset, + namespace string, + opts metav1.ListOptions, +) (*apis.ZFSBackupList, error) { + return cli.ZfsV1(). + ZFSBackups(namespace). + List(opts) +} + +// defaultCreate is the default implementation to delete +// a zfsbkp bkpume instance in kubernetes cluster +func defaultDel( + cli *clientset.Clientset, + name, namespace string, + opts *metav1.DeleteOptions, +) error { + deletePropagation := metav1.DeletePropagationForeground + opts.PropagationPolicy = &deletePropagation + err := cli.ZfsV1(). + ZFSBackups(namespace). + Delete(name, opts) + return err +} + +// defaultCreate is the default implementation to create +// a zfsbkp bkpume instance in kubernetes cluster +func defaultCreate( + cli *clientset.Clientset, + bkp *apis.ZFSBackup, + namespace string, +) (*apis.ZFSBackup, error) { + return cli.ZfsV1(). + ZFSBackups(namespace). + Create(bkp) +} + +// defaultUpdate is the default implementation to update +// a zfsbkp bkpume instance in kubernetes cluster +func defaultUpdate( + cli *clientset.Clientset, + bkp *apis.ZFSBackup, + namespace string, +) (*apis.ZFSBackup, error) { + return cli.ZfsV1(). + ZFSBackups(namespace). + Update(bkp) +} + +// withDefaults sets the default options +// of kubeclient instance +func (k *Kubeclient) withDefaults() { + if k.getClientset == nil { + k.getClientset = defaultGetClientset + } + if k.getClientsetForPath == nil { + k.getClientsetForPath = defaultGetClientsetForPath + } + if k.get == nil { + k.get = defaultGet + } + if k.list == nil { + k.list = defaultList + } + if k.del == nil { + k.del = defaultDel + } + if k.create == nil { + k.create = defaultCreate + } + if k.update == nil { + k.update = defaultUpdate + } +} + +// WithClientSet sets the kubernetes client against +// the kubeclient instance +func WithClientSet(c *clientset.Clientset) KubeclientBuildOption { + return func(k *Kubeclient) { + k.clientset = c + } +} + +// WithNamespace sets the kubernetes client against +// the provided namespace +func WithNamespace(namespace string) KubeclientBuildOption { + return func(k *Kubeclient) { + k.namespace = namespace + } +} + +// WithNamespace sets the provided namespace +// against this Kubeclient instance +func (k *Kubeclient) WithNamespace(namespace string) *Kubeclient { + k.namespace = namespace + return k +} + +// WithKubeConfigPath sets the kubernetes client +// against the provided path +func WithKubeConfigPath(path string) KubeclientBuildOption { + return func(k *Kubeclient) { + k.kubeConfigPath = path + } +} + +// NewKubeclient returns a new instance of +// kubeclient meant for zfsbkp bkpume operations +func NewKubeclient(opts ...KubeclientBuildOption) *Kubeclient { + k := &Kubeclient{} + for _, o := range opts { + o(k) + } + + k.withDefaults() + return k +} + +func (k *Kubeclient) getClientsetForPathOrDirect() ( + *clientset.Clientset, + error, +) { + if k.kubeConfigPath != "" { + return k.getClientsetForPath(k.kubeConfigPath) + } + + return k.getClientset() +} + +// getClientOrCached returns either a new instance +// of kubernetes client or its cached copy +func (k *Kubeclient) getClientOrCached() (*clientset.Clientset, error) { + if k.clientset != nil { + return k.clientset, nil + } + + c, err := k.getClientsetForPathOrDirect() + if err != nil { + return nil, + errors.Wrapf( + err, + "failed to get clientset", + ) + } + + k.clientset = c + return k.clientset, nil +} + +// Create creates a zfsbkp bkpume instance +// in kubernetes cluster +func (k *Kubeclient) Create(bkp *apis.ZFSBackup) (*apis.ZFSBackup, error) { + if bkp == nil { + return nil, + errors.New( + "failed to create csibkpume: nil bkp object", + ) + } + cs, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to create zfsbkp bkpume {%s} in namespace {%s}", + bkp.Name, + k.namespace, + ) + } + + return k.create(cs, bkp, k.namespace) +} + +// Get returns zfsbkp bkpume object for given name +func (k *Kubeclient) Get( + name string, + opts metav1.GetOptions, +) (*apis.ZFSBackup, error) { + if name == "" { + return nil, + errors.New( + "failed to get zfsbkp bkpume: missing zfsbkp bkpume name", + ) + } + + cli, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to get zfsbkp bkpume {%s} in namespace {%s}", + name, + k.namespace, + ) + } + + return k.get(cli, name, k.namespace, opts) +} + +// GetRaw returns zfsbkp bkpume instance +// in bytes +func (k *Kubeclient) GetRaw( + name string, + opts metav1.GetOptions, +) ([]byte, error) { + if name == "" { + return nil, errors.New( + "failed to get raw zfsbkp bkpume: missing bkp name", + ) + } + csiv, err := k.Get(name, opts) + if err != nil { + return nil, errors.Wrapf( + err, + "failed to get zfsbkp bkpume {%s} in namespace {%s}", + name, + k.namespace, + ) + } + + return json.Marshal(csiv) +} + +// List returns a list of zfsbkp bkpume +// instances present in kubernetes cluster +func (k *Kubeclient) List(opts metav1.ListOptions) (*apis.ZFSBackupList, error) { + cli, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to list zfsbkp bkpumes in namespace {%s}", + k.namespace, + ) + } + + return k.list(cli, k.namespace, opts) +} + +// Delete deletes the zfsbkp bkpume from +// kubernetes +func (k *Kubeclient) Delete(name string) error { + if name == "" { + return errors.New( + "failed to delete csibkpume: missing bkp name", + ) + } + cli, err := k.getClientOrCached() + if err != nil { + return errors.Wrapf( + err, + "failed to delete csibkpume {%s} in namespace {%s}", + name, + k.namespace, + ) + } + + return k.del(cli, name, k.namespace, &metav1.DeleteOptions{}) +} + +// Update updates this zfsbkp bkpume instance +// against kubernetes cluster +func (k *Kubeclient) Update(bkp *apis.ZFSBackup) (*apis.ZFSBackup, error) { + if bkp == nil { + return nil, + errors.New( + "failed to update csibkpume: nil bkp object", + ) + } + + cs, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to update csibkpume {%s} in namespace {%s}", + bkp.Name, + bkp.Namespace, + ) + } + + return k.update(cs, bkp, k.namespace) +} diff --git a/pkg/builder/restorebuilder/build.go b/pkg/builder/restorebuilder/build.go new file mode 100644 index 000000000..d552d1a5f --- /dev/null +++ b/pkg/builder/restorebuilder/build.go @@ -0,0 +1,169 @@ +/* +Copyright 2020 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restorebuilder + +import ( + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + "github.com/openebs/zfs-localpv/pkg/common/errors" +) + +// Builder is the builder object for ZFSRestore +type Builder struct { + rstr *ZFSRestore + errs []error +} + +// NewBuilder returns new instance of Builder +func NewBuilder() *Builder { + return &Builder{ + rstr: &ZFSRestore{ + Object: &apis.ZFSRestore{}, + }, + } +} + +// BuildFrom returns new instance of Builder +// from the provided api instance +func BuildFrom(rstr *apis.ZFSRestore) *Builder { + if rstr == nil { + b := NewBuilder() + b.errs = append( + b.errs, + errors.New("failed to build rstr object: nil rstr"), + ) + return b + } + return &Builder{ + rstr: &ZFSRestore{ + Object: rstr, + }, + } +} + +// WithNamespace sets the namespace of ZFSRestore +func (b *Builder) WithNamespace(namespace string) *Builder { + if namespace == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi rstr object: missing namespace", + ), + ) + return b + } + b.rstr.Object.Namespace = namespace + return b +} + +// WithName sets the name of ZFSRestore +func (b *Builder) WithName(name string) *Builder { + if name == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi rstr object: missing name", + ), + ) + return b + } + b.rstr.Object.Name = name + return b +} + +// WithVolume sets the name of ZFSRestore +func (b *Builder) WithVolume(name string) *Builder { + if name == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi rstr object: missing volume name", + ), + ) + return b + } + b.rstr.Object.Spec.VolumeName = name + return b +} + +// WithNode sets the node id for ZFSRestore +func (b *Builder) WithNode(node string) *Builder { + if node == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi rstr object: missing node name", + ), + ) + return b + } + b.rstr.Object.Spec.OwnerNodeID = node + return b +} + +// WithStatus sets the status for ZFSRestore +func (b *Builder) WithStatus(status apis.ZFSRestoreStatus) *Builder { + b.rstr.Object.Status = status + return b +} + +// WithRemote sets the node id for ZFSRestore +func (b *Builder) WithRemote(server string) *Builder { + if server == "" { + b.errs = append( + b.errs, + errors.New( + "failed to build csi rstr object: missing node name", + ), + ) + return b + } + b.rstr.Object.Spec.RestoreSrc = server + return b +} + +// WithLabels merges existing labels if any +// with the ones that are provided here +func (b *Builder) WithLabels(labels map[string]string) *Builder { + if len(labels) == 0 { + return b + } + + if b.rstr.Object.Labels == nil { + b.rstr.Object.Labels = map[string]string{} + } + + for key, value := range labels { + b.rstr.Object.Labels[key] = value + } + return b +} + +// WithFinalizer merge existing finalizers if any +// with the ones that are provided here +func (b *Builder) WithFinalizer(finalizer []string) *Builder { + b.rstr.Object.Finalizers = append(b.rstr.Object.Finalizers, finalizer...) + return b +} + +// Build returns ZFSRestore API object +func (b *Builder) Build() (*apis.ZFSRestore, error) { + if len(b.errs) > 0 { + return nil, errors.Errorf("%+v", b.errs) + } + + return b.rstr.Object, nil +} diff --git a/pkg/builder/restorebuilder/buildlist.go b/pkg/builder/restorebuilder/buildlist.go new file mode 100644 index 000000000..e15384aff --- /dev/null +++ b/pkg/builder/restorebuilder/buildlist.go @@ -0,0 +1,72 @@ +/* +Copyright 2020 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restorebuilder + +import ( + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" +) + +// ListBuilder enables building an instance of +// ZFSRestoreList +type ListBuilder struct { + list *apis.ZFSRestoreList + filters predicateList +} + +// NewListBuilder returns a new instance of ListBuilder +func NewListBuilder() *ListBuilder { + return &ListBuilder{ + list: &apis.ZFSRestoreList{}, + } +} + +// ListBuilderFrom returns a new instance of +// ListBuilder from API list instance +func ListBuilderFrom(bkps apis.ZFSRestoreList) *ListBuilder { + b := &ListBuilder{list: &apis.ZFSRestoreList{}} + if len(bkps.Items) == 0 { + return b + } + + b.list.Items = append(b.list.Items, bkps.Items...) + return b +} + +// List returns the list of pod +// instances that was built by this +// builder +func (b *ListBuilder) List() *apis.ZFSRestoreList { + if b.filters == nil || len(b.filters) == 0 { + return b.list + } + + filtered := &apis.ZFSRestoreList{} + for _, rstr := range b.list.Items { + rstr := rstr // pin it + if b.filters.all(From(&rstr)) { + filtered.Items = append(filtered.Items, rstr) + } + } + return filtered +} + +// WithFilter add filters on which the pod +// has to be filtered +func (b *ListBuilder) WithFilter(pred ...Predicate) *ListBuilder { + b.filters = append(b.filters, pred...) + return b +} diff --git a/pkg/builder/restorebuilder/kubernetes.go b/pkg/builder/restorebuilder/kubernetes.go new file mode 100644 index 000000000..379387833 --- /dev/null +++ b/pkg/builder/restorebuilder/kubernetes.go @@ -0,0 +1,427 @@ +// Copyright © 2020 The OpenEBS Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package restorebuilder + +import ( + "encoding/json" + + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + client "github.com/openebs/zfs-localpv/pkg/common/kubernetes/client" + clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// getClientsetFn is a typed function that +// abstracts fetching of internal clientset +type getClientsetFn func() (clientset *clientset.Clientset, err error) + +// getClientsetFromPathFn is a typed function that +// abstracts fetching of clientset from kubeConfigPath +type getClientsetForPathFn func(kubeConfigPath string) ( + clientset *clientset.Clientset, + err error, +) + +// createFn is a typed function that abstracts +// creating zfsrstr rstrume instance +type createFn func( + cs *clientset.Clientset, + upgradeResultObj *apis.ZFSRestore, + namespace string, +) (*apis.ZFSRestore, error) + +// getFn is a typed function that abstracts +// fetching a zfsrstr rstrume instance +type getFn func( + cli *clientset.Clientset, + name, + namespace string, + opts metav1.GetOptions, +) (*apis.ZFSRestore, error) + +// listFn is a typed function that abstracts +// listing of zfsrstr rstrume instances +type listFn func( + cli *clientset.Clientset, + namespace string, + opts metav1.ListOptions, +) (*apis.ZFSRestoreList, error) + +// delFn is a typed function that abstracts +// deleting a zfsrstr rstrume instance +type delFn func( + cli *clientset.Clientset, + name, + namespace string, + opts *metav1.DeleteOptions, +) error + +// updateFn is a typed function that abstracts +// updating zfsrstr rstrume instance +type updateFn func( + cs *clientset.Clientset, + rstr *apis.ZFSRestore, + namespace string, +) (*apis.ZFSRestore, error) + +// Kubeclient enables kubernetes API operations +// on zfsrstr rstrume instance +type Kubeclient struct { + // clientset refers to zfsrstr rstrume's + // clientset that will be responsible to + // make kubernetes API calls + clientset *clientset.Clientset + + kubeConfigPath string + + // namespace holds the namespace on which + // kubeclient has to operate + namespace string + + // functions useful during mocking + getClientset getClientsetFn + getClientsetForPath getClientsetForPathFn + get getFn + list listFn + del delFn + create createFn + update updateFn +} + +// KubeclientBuildOption defines the abstraction +// to build a kubeclient instance +type KubeclientBuildOption func(*Kubeclient) + +// defaultGetClientset is the default implementation to +// get kubernetes clientset instance +func defaultGetClientset() (clients *clientset.Clientset, err error) { + + config, err := client.GetConfig(client.New()) + if err != nil { + return nil, err + } + + return clientset.NewForConfig(config) + +} + +// defaultGetClientsetForPath is the default implementation to +// get kubernetes clientset instance based on the given +// kubeconfig path +func defaultGetClientsetForPath( + kubeConfigPath string, +) (clients *clientset.Clientset, err error) { + config, err := client.GetConfig( + client.New(client.WithKubeConfigPath(kubeConfigPath))) + if err != nil { + return nil, err + } + + return clientset.NewForConfig(config) +} + +// defaultGet is the default implementation to get +// a zfsrstr rstrume instance in kubernetes cluster +func defaultGet( + cli *clientset.Clientset, + name, namespace string, + opts metav1.GetOptions, +) (*apis.ZFSRestore, error) { + return cli.ZfsV1(). + ZFSRestores(namespace). + Get(name, opts) +} + +// defaultList is the default implementation to list +// zfsrstr rstrume instances in kubernetes cluster +func defaultList( + cli *clientset.Clientset, + namespace string, + opts metav1.ListOptions, +) (*apis.ZFSRestoreList, error) { + return cli.ZfsV1(). + ZFSRestores(namespace). + List(opts) +} + +// defaultCreate is the default implementation to delete +// a zfsrstr rstrume instance in kubernetes cluster +func defaultDel( + cli *clientset.Clientset, + name, namespace string, + opts *metav1.DeleteOptions, +) error { + deletePropagation := metav1.DeletePropagationForeground + opts.PropagationPolicy = &deletePropagation + err := cli.ZfsV1(). + ZFSRestores(namespace). + Delete(name, opts) + return err +} + +// defaultCreate is the default implementation to create +// a zfsrstr rstrume instance in kubernetes cluster +func defaultCreate( + cli *clientset.Clientset, + rstr *apis.ZFSRestore, + namespace string, +) (*apis.ZFSRestore, error) { + return cli.ZfsV1(). + ZFSRestores(namespace). + Create(rstr) +} + +// defaultUpdate is the default implementation to update +// a zfsrstr rstrume instance in kubernetes cluster +func defaultUpdate( + cli *clientset.Clientset, + rstr *apis.ZFSRestore, + namespace string, +) (*apis.ZFSRestore, error) { + return cli.ZfsV1(). + ZFSRestores(namespace). + Update(rstr) +} + +// withDefaults sets the default options +// of kubeclient instance +func (k *Kubeclient) withDefaults() { + if k.getClientset == nil { + k.getClientset = defaultGetClientset + } + if k.getClientsetForPath == nil { + k.getClientsetForPath = defaultGetClientsetForPath + } + if k.get == nil { + k.get = defaultGet + } + if k.list == nil { + k.list = defaultList + } + if k.del == nil { + k.del = defaultDel + } + if k.create == nil { + k.create = defaultCreate + } + if k.update == nil { + k.update = defaultUpdate + } +} + +// WithClientSet sets the kubernetes client against +// the kubeclient instance +func WithClientSet(c *clientset.Clientset) KubeclientBuildOption { + return func(k *Kubeclient) { + k.clientset = c + } +} + +// WithNamespace sets the kubernetes client against +// the provided namespace +func WithNamespace(namespace string) KubeclientBuildOption { + return func(k *Kubeclient) { + k.namespace = namespace + } +} + +// WithNamespace sets the provided namespace +// against this Kubeclient instance +func (k *Kubeclient) WithNamespace(namespace string) *Kubeclient { + k.namespace = namespace + return k +} + +// WithKubeConfigPath sets the kubernetes client +// against the provided path +func WithKubeConfigPath(path string) KubeclientBuildOption { + return func(k *Kubeclient) { + k.kubeConfigPath = path + } +} + +// NewKubeclient returns a new instance of +// kubeclient meant for zfsrstr rstrume operations +func NewKubeclient(opts ...KubeclientBuildOption) *Kubeclient { + k := &Kubeclient{} + for _, o := range opts { + o(k) + } + + k.withDefaults() + return k +} + +func (k *Kubeclient) getClientsetForPathOrDirect() ( + *clientset.Clientset, + error, +) { + if k.kubeConfigPath != "" { + return k.getClientsetForPath(k.kubeConfigPath) + } + + return k.getClientset() +} + +// getClientOrCached returns either a new instance +// of kubernetes client or its cached copy +func (k *Kubeclient) getClientOrCached() (*clientset.Clientset, error) { + if k.clientset != nil { + return k.clientset, nil + } + + c, err := k.getClientsetForPathOrDirect() + if err != nil { + return nil, + errors.Wrapf( + err, + "failed to get clientset", + ) + } + + k.clientset = c + return k.clientset, nil +} + +// Create creates a zfsrstr rstrume instance +// in kubernetes cluster +func (k *Kubeclient) Create(rstr *apis.ZFSRestore) (*apis.ZFSRestore, error) { + if rstr == nil { + return nil, + errors.New( + "failed to create csirstrume: nil rstr object", + ) + } + cs, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to create zfsrstr rstrume {%s} in namespace {%s}", + rstr.Name, + k.namespace, + ) + } + + return k.create(cs, rstr, k.namespace) +} + +// Get returns zfsrstr rstrume object for given name +func (k *Kubeclient) Get( + name string, + opts metav1.GetOptions, +) (*apis.ZFSRestore, error) { + if name == "" { + return nil, + errors.New( + "failed to get zfsrstr rstrume: missing zfsrstr rstrume name", + ) + } + + cli, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to get zfsrstr rstrume {%s} in namespace {%s}", + name, + k.namespace, + ) + } + + return k.get(cli, name, k.namespace, opts) +} + +// GetRaw returns zfsrstr rstrume instance +// in bytes +func (k *Kubeclient) GetRaw( + name string, + opts metav1.GetOptions, +) ([]byte, error) { + if name == "" { + return nil, errors.New( + "failed to get raw zfsrstr rstrume: missing rstr name", + ) + } + csiv, err := k.Get(name, opts) + if err != nil { + return nil, errors.Wrapf( + err, + "failed to get zfsrstr rstrume {%s} in namespace {%s}", + name, + k.namespace, + ) + } + + return json.Marshal(csiv) +} + +// List returns a list of zfsrstr rstrume +// instances present in kubernetes cluster +func (k *Kubeclient) List(opts metav1.ListOptions) (*apis.ZFSRestoreList, error) { + cli, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to list zfsrstr rstrumes in namespace {%s}", + k.namespace, + ) + } + + return k.list(cli, k.namespace, opts) +} + +// Delete deletes the zfsrstr rstrume from +// kubernetes +func (k *Kubeclient) Delete(name string) error { + if name == "" { + return errors.New( + "failed to delete csirstrume: missing rstr name", + ) + } + cli, err := k.getClientOrCached() + if err != nil { + return errors.Wrapf( + err, + "failed to delete csirstrume {%s} in namespace {%s}", + name, + k.namespace, + ) + } + + return k.del(cli, name, k.namespace, &metav1.DeleteOptions{}) +} + +// Update updates this zfsrstr rstrume instance +// against kubernetes cluster +func (k *Kubeclient) Update(rstr *apis.ZFSRestore) (*apis.ZFSRestore, error) { + if rstr == nil { + return nil, + errors.New( + "failed to update csirstrume: nil rstr object", + ) + } + + cs, err := k.getClientOrCached() + if err != nil { + return nil, errors.Wrapf( + err, + "failed to update csirstrume {%s} in namespace {%s}", + rstr.Name, + rstr.Namespace, + ) + } + + return k.update(cs, rstr, k.namespace) +} diff --git a/pkg/builder/restorebuilder/restore.go b/pkg/builder/restorebuilder/restore.go new file mode 100644 index 000000000..ce3998150 --- /dev/null +++ b/pkg/builder/restorebuilder/restore.go @@ -0,0 +1,117 @@ +// Copyright © 2020 The OpenEBS Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package restorebuilder + +import ( + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" +) + +// ZFSRestore is a wrapper over +// ZFSRestore API instance +type ZFSRestore struct { + // ZFSSnap object + Object *apis.ZFSRestore +} + +// From returns a new instance of +// zfsrstr rstrume +func From(rstr *apis.ZFSRestore) *ZFSRestore { + return &ZFSRestore{ + Object: rstr, + } +} + +// Predicate defines an abstraction +// to determine conditional checks +// against the provided pod instance +type Predicate func(*ZFSRestore) bool + +// PredicateList holds a list of predicate +type predicateList []Predicate + +// ZFSRestoreList holds the list +// of zfs restore instances +type ZFSRestoreList struct { + // List contains list of restore + List apis.ZFSRestoreList +} + +// Len returns the number of items present +// in the ZFSRestoreList +func (rstrList *ZFSRestoreList) Len() int { + return len(rstrList.List.Items) +} + +// all returns true if all the predicates +// succeed against the provided ZFSRestore +// instance +func (l predicateList) all(rstr *ZFSRestore) bool { + for _, pred := range l { + if !pred(rstr) { + return false + } + } + return true +} + +// HasLabels returns true if provided labels +// are present in the provided ZFSRestore instance +func HasLabels(keyValuePair map[string]string) Predicate { + return func(rstr *ZFSRestore) bool { + for key, value := range keyValuePair { + if !rstr.HasLabel(key, value) { + return false + } + } + return true + } +} + +// HasLabel returns true if provided label +// is present in the provided ZFSRestore instance +func (rstr *ZFSRestore) HasLabel(key, value string) bool { + val, ok := rstr.Object.GetLabels()[key] + if ok { + return val == value + } + return false +} + +// HasLabel returns true if provided label +// is present in the provided ZFSRestore instance +func HasLabel(key, value string) Predicate { + return func(rstr *ZFSRestore) bool { + return rstr.HasLabel(key, value) + } +} + +// IsNil returns true if the zfsrstr rstrume instance +// is nil +func (rstr *ZFSRestore) IsNil() bool { + return rstr.Object == nil +} + +// IsNil is predicate to filter out nil zfsrstr rstrume +// instances +func IsNil() Predicate { + return func(rstr *ZFSRestore) bool { + return rstr.IsNil() + } +} + +// GetAPIObject returns zfsrstr rstrume's API instance +func (rstr *ZFSRestore) GetAPIObject() *apis.ZFSRestore { + return rstr.Object +} diff --git a/pkg/driver/agent.go b/pkg/driver/agent.go index ed7ecf683..54ea992f7 100644 --- a/pkg/driver/agent.go +++ b/pkg/driver/agent.go @@ -23,6 +23,8 @@ import ( apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" "github.com/openebs/zfs-localpv/pkg/builder/volbuilder" k8sapi "github.com/openebs/zfs-localpv/pkg/client/k8s/v1alpha1" + "github.com/openebs/zfs-localpv/pkg/mgmt/backup" + "github.com/openebs/zfs-localpv/pkg/mgmt/restore" "github.com/openebs/zfs-localpv/pkg/mgmt/snapshot" "github.com/openebs/zfs-localpv/pkg/mgmt/volume" "github.com/openebs/zfs-localpv/pkg/zfs" @@ -65,6 +67,22 @@ func NewNode(d *CSIDriver) csi.NodeServer { } }() + // start the backup controller + go func() { + err := backup.Start(&ControllerMutex, stopCh) + if err != nil { + klog.Fatalf("Failed to start ZFS volume snapshot management controller: %s", err.Error()) + } + }() + + // start the restore controller + go func() { + err := restore.Start(&ControllerMutex, stopCh) + if err != nil { + klog.Fatalf("Failed to start ZFS volume snapshot management controller: %s", err.Error()) + } + }() + return &node{ driver: d, } diff --git a/pkg/generated/clientset/internalclientset/typed/zfs/v1/fake/fake_zfs_client.go b/pkg/generated/clientset/internalclientset/typed/zfs/v1/fake/fake_zfs_client.go index 5ad5582f0..21cb4018f 100644 --- a/pkg/generated/clientset/internalclientset/typed/zfs/v1/fake/fake_zfs_client.go +++ b/pkg/generated/clientset/internalclientset/typed/zfs/v1/fake/fake_zfs_client.go @@ -28,6 +28,14 @@ type FakeZfsV1 struct { *testing.Fake } +func (c *FakeZfsV1) ZFSBackups(namespace string) v1.ZFSBackupInterface { + return &FakeZFSBackups{c, namespace} +} + +func (c *FakeZfsV1) ZFSRestores(namespace string) v1.ZFSRestoreInterface { + return &FakeZFSRestores{c, namespace} +} + func (c *FakeZfsV1) ZFSSnapshots(namespace string) v1.ZFSSnapshotInterface { return &FakeZFSSnapshots{c, namespace} } diff --git a/pkg/generated/clientset/internalclientset/typed/zfs/v1/fake/fake_zfsbackup.go b/pkg/generated/clientset/internalclientset/typed/zfs/v1/fake/fake_zfsbackup.go new file mode 100644 index 000000000..a46952efd --- /dev/null +++ b/pkg/generated/clientset/internalclientset/typed/zfs/v1/fake/fake_zfsbackup.go @@ -0,0 +1,140 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + zfsv1 "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeZFSBackups implements ZFSBackupInterface +type FakeZFSBackups struct { + Fake *FakeZfsV1 + ns string +} + +var zfsbackupsResource = schema.GroupVersionResource{Group: "zfs.openebs.io", Version: "v1", Resource: "zfsbackups"} + +var zfsbackupsKind = schema.GroupVersionKind{Group: "zfs.openebs.io", Version: "v1", Kind: "ZFSBackup"} + +// Get takes name of the zFSBackup, and returns the corresponding zFSBackup object, and an error if there is any. +func (c *FakeZFSBackups) Get(name string, options v1.GetOptions) (result *zfsv1.ZFSBackup, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(zfsbackupsResource, c.ns, name), &zfsv1.ZFSBackup{}) + + if obj == nil { + return nil, err + } + return obj.(*zfsv1.ZFSBackup), err +} + +// List takes label and field selectors, and returns the list of ZFSBackups that match those selectors. +func (c *FakeZFSBackups) List(opts v1.ListOptions) (result *zfsv1.ZFSBackupList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(zfsbackupsResource, zfsbackupsKind, c.ns, opts), &zfsv1.ZFSBackupList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &zfsv1.ZFSBackupList{ListMeta: obj.(*zfsv1.ZFSBackupList).ListMeta} + for _, item := range obj.(*zfsv1.ZFSBackupList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested zFSBackups. +func (c *FakeZFSBackups) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(zfsbackupsResource, c.ns, opts)) + +} + +// Create takes the representation of a zFSBackup and creates it. Returns the server's representation of the zFSBackup, and an error, if there is any. +func (c *FakeZFSBackups) Create(zFSBackup *zfsv1.ZFSBackup) (result *zfsv1.ZFSBackup, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(zfsbackupsResource, c.ns, zFSBackup), &zfsv1.ZFSBackup{}) + + if obj == nil { + return nil, err + } + return obj.(*zfsv1.ZFSBackup), err +} + +// Update takes the representation of a zFSBackup and updates it. Returns the server's representation of the zFSBackup, and an error, if there is any. +func (c *FakeZFSBackups) Update(zFSBackup *zfsv1.ZFSBackup) (result *zfsv1.ZFSBackup, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(zfsbackupsResource, c.ns, zFSBackup), &zfsv1.ZFSBackup{}) + + if obj == nil { + return nil, err + } + return obj.(*zfsv1.ZFSBackup), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeZFSBackups) UpdateStatus(zFSBackup *zfsv1.ZFSBackup) (*zfsv1.ZFSBackup, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(zfsbackupsResource, "status", c.ns, zFSBackup), &zfsv1.ZFSBackup{}) + + if obj == nil { + return nil, err + } + return obj.(*zfsv1.ZFSBackup), err +} + +// Delete takes name of the zFSBackup and deletes it. Returns an error if one occurs. +func (c *FakeZFSBackups) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(zfsbackupsResource, c.ns, name), &zfsv1.ZFSBackup{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeZFSBackups) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(zfsbackupsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &zfsv1.ZFSBackupList{}) + return err +} + +// Patch applies the patch and returns the patched zFSBackup. +func (c *FakeZFSBackups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *zfsv1.ZFSBackup, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(zfsbackupsResource, c.ns, name, pt, data, subresources...), &zfsv1.ZFSBackup{}) + + if obj == nil { + return nil, err + } + return obj.(*zfsv1.ZFSBackup), err +} diff --git a/pkg/generated/clientset/internalclientset/typed/zfs/v1/fake/fake_zfsrestore.go b/pkg/generated/clientset/internalclientset/typed/zfs/v1/fake/fake_zfsrestore.go new file mode 100644 index 000000000..864525d17 --- /dev/null +++ b/pkg/generated/clientset/internalclientset/typed/zfs/v1/fake/fake_zfsrestore.go @@ -0,0 +1,140 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + zfsv1 "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeZFSRestores implements ZFSRestoreInterface +type FakeZFSRestores struct { + Fake *FakeZfsV1 + ns string +} + +var zfsrestoresResource = schema.GroupVersionResource{Group: "zfs.openebs.io", Version: "v1", Resource: "zfsrestores"} + +var zfsrestoresKind = schema.GroupVersionKind{Group: "zfs.openebs.io", Version: "v1", Kind: "ZFSRestore"} + +// Get takes name of the zFSRestore, and returns the corresponding zFSRestore object, and an error if there is any. +func (c *FakeZFSRestores) Get(name string, options v1.GetOptions) (result *zfsv1.ZFSRestore, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(zfsrestoresResource, c.ns, name), &zfsv1.ZFSRestore{}) + + if obj == nil { + return nil, err + } + return obj.(*zfsv1.ZFSRestore), err +} + +// List takes label and field selectors, and returns the list of ZFSRestores that match those selectors. +func (c *FakeZFSRestores) List(opts v1.ListOptions) (result *zfsv1.ZFSRestoreList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(zfsrestoresResource, zfsrestoresKind, c.ns, opts), &zfsv1.ZFSRestoreList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &zfsv1.ZFSRestoreList{ListMeta: obj.(*zfsv1.ZFSRestoreList).ListMeta} + for _, item := range obj.(*zfsv1.ZFSRestoreList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested zFSRestores. +func (c *FakeZFSRestores) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(zfsrestoresResource, c.ns, opts)) + +} + +// Create takes the representation of a zFSRestore and creates it. Returns the server's representation of the zFSRestore, and an error, if there is any. +func (c *FakeZFSRestores) Create(zFSRestore *zfsv1.ZFSRestore) (result *zfsv1.ZFSRestore, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(zfsrestoresResource, c.ns, zFSRestore), &zfsv1.ZFSRestore{}) + + if obj == nil { + return nil, err + } + return obj.(*zfsv1.ZFSRestore), err +} + +// Update takes the representation of a zFSRestore and updates it. Returns the server's representation of the zFSRestore, and an error, if there is any. +func (c *FakeZFSRestores) Update(zFSRestore *zfsv1.ZFSRestore) (result *zfsv1.ZFSRestore, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(zfsrestoresResource, c.ns, zFSRestore), &zfsv1.ZFSRestore{}) + + if obj == nil { + return nil, err + } + return obj.(*zfsv1.ZFSRestore), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeZFSRestores) UpdateStatus(zFSRestore *zfsv1.ZFSRestore) (*zfsv1.ZFSRestore, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(zfsrestoresResource, "status", c.ns, zFSRestore), &zfsv1.ZFSRestore{}) + + if obj == nil { + return nil, err + } + return obj.(*zfsv1.ZFSRestore), err +} + +// Delete takes name of the zFSRestore and deletes it. Returns an error if one occurs. +func (c *FakeZFSRestores) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(zfsrestoresResource, c.ns, name), &zfsv1.ZFSRestore{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeZFSRestores) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(zfsrestoresResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &zfsv1.ZFSRestoreList{}) + return err +} + +// Patch applies the patch and returns the patched zFSRestore. +func (c *FakeZFSRestores) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *zfsv1.ZFSRestore, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(zfsrestoresResource, c.ns, name, pt, data, subresources...), &zfsv1.ZFSRestore{}) + + if obj == nil { + return nil, err + } + return obj.(*zfsv1.ZFSRestore), err +} diff --git a/pkg/generated/clientset/internalclientset/typed/zfs/v1/generated_expansion.go b/pkg/generated/clientset/internalclientset/typed/zfs/v1/generated_expansion.go index 3c3bef919..bdfa5a5a4 100644 --- a/pkg/generated/clientset/internalclientset/typed/zfs/v1/generated_expansion.go +++ b/pkg/generated/clientset/internalclientset/typed/zfs/v1/generated_expansion.go @@ -18,6 +18,10 @@ limitations under the License. package v1 +type ZFSBackupExpansion interface{} + +type ZFSRestoreExpansion interface{} + type ZFSSnapshotExpansion interface{} type ZFSVolumeExpansion interface{} diff --git a/pkg/generated/clientset/internalclientset/typed/zfs/v1/zfs_client.go b/pkg/generated/clientset/internalclientset/typed/zfs/v1/zfs_client.go index 5be6062b8..e2f4daf94 100644 --- a/pkg/generated/clientset/internalclientset/typed/zfs/v1/zfs_client.go +++ b/pkg/generated/clientset/internalclientset/typed/zfs/v1/zfs_client.go @@ -26,6 +26,8 @@ import ( type ZfsV1Interface interface { RESTClient() rest.Interface + ZFSBackupsGetter + ZFSRestoresGetter ZFSSnapshotsGetter ZFSVolumesGetter } @@ -35,6 +37,14 @@ type ZfsV1Client struct { restClient rest.Interface } +func (c *ZfsV1Client) ZFSBackups(namespace string) ZFSBackupInterface { + return newZFSBackups(c, namespace) +} + +func (c *ZfsV1Client) ZFSRestores(namespace string) ZFSRestoreInterface { + return newZFSRestores(c, namespace) +} + func (c *ZfsV1Client) ZFSSnapshots(namespace string) ZFSSnapshotInterface { return newZFSSnapshots(c, namespace) } diff --git a/pkg/generated/clientset/internalclientset/typed/zfs/v1/zfsbackup.go b/pkg/generated/clientset/internalclientset/typed/zfs/v1/zfsbackup.go new file mode 100644 index 000000000..6187753bb --- /dev/null +++ b/pkg/generated/clientset/internalclientset/typed/zfs/v1/zfsbackup.go @@ -0,0 +1,191 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + scheme "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ZFSBackupsGetter has a method to return a ZFSBackupInterface. +// A group's client should implement this interface. +type ZFSBackupsGetter interface { + ZFSBackups(namespace string) ZFSBackupInterface +} + +// ZFSBackupInterface has methods to work with ZFSBackup resources. +type ZFSBackupInterface interface { + Create(*v1.ZFSBackup) (*v1.ZFSBackup, error) + Update(*v1.ZFSBackup) (*v1.ZFSBackup, error) + UpdateStatus(*v1.ZFSBackup) (*v1.ZFSBackup, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ZFSBackup, error) + List(opts metav1.ListOptions) (*v1.ZFSBackupList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ZFSBackup, err error) + ZFSBackupExpansion +} + +// zFSBackups implements ZFSBackupInterface +type zFSBackups struct { + client rest.Interface + ns string +} + +// newZFSBackups returns a ZFSBackups +func newZFSBackups(c *ZfsV1Client, namespace string) *zFSBackups { + return &zFSBackups{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the zFSBackup, and returns the corresponding zFSBackup object, and an error if there is any. +func (c *zFSBackups) Get(name string, options metav1.GetOptions) (result *v1.ZFSBackup, err error) { + result = &v1.ZFSBackup{} + err = c.client.Get(). + Namespace(c.ns). + Resource("zfsbackups"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ZFSBackups that match those selectors. +func (c *zFSBackups) List(opts metav1.ListOptions) (result *v1.ZFSBackupList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ZFSBackupList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("zfsbackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested zFSBackups. +func (c *zFSBackups) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("zfsbackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a zFSBackup and creates it. Returns the server's representation of the zFSBackup, and an error, if there is any. +func (c *zFSBackups) Create(zFSBackup *v1.ZFSBackup) (result *v1.ZFSBackup, err error) { + result = &v1.ZFSBackup{} + err = c.client.Post(). + Namespace(c.ns). + Resource("zfsbackups"). + Body(zFSBackup). + Do(). + Into(result) + return +} + +// Update takes the representation of a zFSBackup and updates it. Returns the server's representation of the zFSBackup, and an error, if there is any. +func (c *zFSBackups) Update(zFSBackup *v1.ZFSBackup) (result *v1.ZFSBackup, err error) { + result = &v1.ZFSBackup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("zfsbackups"). + Name(zFSBackup.Name). + Body(zFSBackup). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *zFSBackups) UpdateStatus(zFSBackup *v1.ZFSBackup) (result *v1.ZFSBackup, err error) { + result = &v1.ZFSBackup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("zfsbackups"). + Name(zFSBackup.Name). + SubResource("status"). + Body(zFSBackup). + Do(). + Into(result) + return +} + +// Delete takes name of the zFSBackup and deletes it. Returns an error if one occurs. +func (c *zFSBackups) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("zfsbackups"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *zFSBackups) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("zfsbackups"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched zFSBackup. +func (c *zFSBackups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ZFSBackup, err error) { + result = &v1.ZFSBackup{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("zfsbackups"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/clientset/internalclientset/typed/zfs/v1/zfsrestore.go b/pkg/generated/clientset/internalclientset/typed/zfs/v1/zfsrestore.go new file mode 100644 index 000000000..1df659367 --- /dev/null +++ b/pkg/generated/clientset/internalclientset/typed/zfs/v1/zfsrestore.go @@ -0,0 +1,191 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + scheme "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ZFSRestoresGetter has a method to return a ZFSRestoreInterface. +// A group's client should implement this interface. +type ZFSRestoresGetter interface { + ZFSRestores(namespace string) ZFSRestoreInterface +} + +// ZFSRestoreInterface has methods to work with ZFSRestore resources. +type ZFSRestoreInterface interface { + Create(*v1.ZFSRestore) (*v1.ZFSRestore, error) + Update(*v1.ZFSRestore) (*v1.ZFSRestore, error) + UpdateStatus(*v1.ZFSRestore) (*v1.ZFSRestore, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ZFSRestore, error) + List(opts metav1.ListOptions) (*v1.ZFSRestoreList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ZFSRestore, err error) + ZFSRestoreExpansion +} + +// zFSRestores implements ZFSRestoreInterface +type zFSRestores struct { + client rest.Interface + ns string +} + +// newZFSRestores returns a ZFSRestores +func newZFSRestores(c *ZfsV1Client, namespace string) *zFSRestores { + return &zFSRestores{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the zFSRestore, and returns the corresponding zFSRestore object, and an error if there is any. +func (c *zFSRestores) Get(name string, options metav1.GetOptions) (result *v1.ZFSRestore, err error) { + result = &v1.ZFSRestore{} + err = c.client.Get(). + Namespace(c.ns). + Resource("zfsrestores"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ZFSRestores that match those selectors. +func (c *zFSRestores) List(opts metav1.ListOptions) (result *v1.ZFSRestoreList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ZFSRestoreList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("zfsrestores"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested zFSRestores. +func (c *zFSRestores) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("zfsrestores"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a zFSRestore and creates it. Returns the server's representation of the zFSRestore, and an error, if there is any. +func (c *zFSRestores) Create(zFSRestore *v1.ZFSRestore) (result *v1.ZFSRestore, err error) { + result = &v1.ZFSRestore{} + err = c.client.Post(). + Namespace(c.ns). + Resource("zfsrestores"). + Body(zFSRestore). + Do(). + Into(result) + return +} + +// Update takes the representation of a zFSRestore and updates it. Returns the server's representation of the zFSRestore, and an error, if there is any. +func (c *zFSRestores) Update(zFSRestore *v1.ZFSRestore) (result *v1.ZFSRestore, err error) { + result = &v1.ZFSRestore{} + err = c.client.Put(). + Namespace(c.ns). + Resource("zfsrestores"). + Name(zFSRestore.Name). + Body(zFSRestore). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *zFSRestores) UpdateStatus(zFSRestore *v1.ZFSRestore) (result *v1.ZFSRestore, err error) { + result = &v1.ZFSRestore{} + err = c.client.Put(). + Namespace(c.ns). + Resource("zfsrestores"). + Name(zFSRestore.Name). + SubResource("status"). + Body(zFSRestore). + Do(). + Into(result) + return +} + +// Delete takes name of the zFSRestore and deletes it. Returns an error if one occurs. +func (c *zFSRestores) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("zfsrestores"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *zFSRestores) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("zfsrestores"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched zFSRestore. +func (c *zFSRestores) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ZFSRestore, err error) { + result = &v1.ZFSRestore{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("zfsrestores"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/informer/externalversions/generic.go b/pkg/generated/informer/externalversions/generic.go index dae80baf2..5798a31fa 100644 --- a/pkg/generated/informer/externalversions/generic.go +++ b/pkg/generated/informer/externalversions/generic.go @@ -53,6 +53,10 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=zfs.openebs.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("zfsbackups"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Zfs().V1().ZFSBackups().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("zfsrestores"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Zfs().V1().ZFSRestores().Informer()}, nil case v1.SchemeGroupVersion.WithResource("zfssnapshots"): return &genericInformer{resource: resource.GroupResource(), informer: f.Zfs().V1().ZFSSnapshots().Informer()}, nil case v1.SchemeGroupVersion.WithResource("zfsvolumes"): diff --git a/pkg/generated/informer/externalversions/zfs/v1/interface.go b/pkg/generated/informer/externalversions/zfs/v1/interface.go index 8327982a6..e466bae8d 100644 --- a/pkg/generated/informer/externalversions/zfs/v1/interface.go +++ b/pkg/generated/informer/externalversions/zfs/v1/interface.go @@ -24,6 +24,10 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // ZFSBackups returns a ZFSBackupInformer. + ZFSBackups() ZFSBackupInformer + // ZFSRestores returns a ZFSRestoreInformer. + ZFSRestores() ZFSRestoreInformer // ZFSSnapshots returns a ZFSSnapshotInformer. ZFSSnapshots() ZFSSnapshotInformer // ZFSVolumes returns a ZFSVolumeInformer. @@ -41,6 +45,16 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// ZFSBackups returns a ZFSBackupInformer. +func (v *version) ZFSBackups() ZFSBackupInformer { + return &zFSBackupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ZFSRestores returns a ZFSRestoreInformer. +func (v *version) ZFSRestores() ZFSRestoreInformer { + return &zFSRestoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // ZFSSnapshots returns a ZFSSnapshotInformer. func (v *version) ZFSSnapshots() ZFSSnapshotInformer { return &zFSSnapshotInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/generated/informer/externalversions/zfs/v1/zfsbackup.go b/pkg/generated/informer/externalversions/zfs/v1/zfsbackup.go new file mode 100644 index 000000000..b4b1e7f97 --- /dev/null +++ b/pkg/generated/informer/externalversions/zfs/v1/zfsbackup.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + zfsv1 "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + internalclientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset" + internalinterfaces "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions/internalinterfaces" + v1 "github.com/openebs/zfs-localpv/pkg/generated/lister/zfs/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ZFSBackupInformer provides access to a shared informer and lister for +// ZFSBackups. +type ZFSBackupInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ZFSBackupLister +} + +type zFSBackupInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewZFSBackupInformer constructs a new informer for ZFSBackup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewZFSBackupInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredZFSBackupInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredZFSBackupInformer constructs a new informer for ZFSBackup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredZFSBackupInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ZfsV1().ZFSBackups(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ZfsV1().ZFSBackups(namespace).Watch(options) + }, + }, + &zfsv1.ZFSBackup{}, + resyncPeriod, + indexers, + ) +} + +func (f *zFSBackupInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredZFSBackupInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *zFSBackupInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&zfsv1.ZFSBackup{}, f.defaultInformer) +} + +func (f *zFSBackupInformer) Lister() v1.ZFSBackupLister { + return v1.NewZFSBackupLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informer/externalversions/zfs/v1/zfsrestore.go b/pkg/generated/informer/externalversions/zfs/v1/zfsrestore.go new file mode 100644 index 000000000..6f8dacf5c --- /dev/null +++ b/pkg/generated/informer/externalversions/zfs/v1/zfsrestore.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + zfsv1 "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + internalclientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset" + internalinterfaces "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions/internalinterfaces" + v1 "github.com/openebs/zfs-localpv/pkg/generated/lister/zfs/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ZFSRestoreInformer provides access to a shared informer and lister for +// ZFSRestores. +type ZFSRestoreInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ZFSRestoreLister +} + +type zFSRestoreInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewZFSRestoreInformer constructs a new informer for ZFSRestore type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewZFSRestoreInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredZFSRestoreInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredZFSRestoreInformer constructs a new informer for ZFSRestore type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredZFSRestoreInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ZfsV1().ZFSRestores(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ZfsV1().ZFSRestores(namespace).Watch(options) + }, + }, + &zfsv1.ZFSRestore{}, + resyncPeriod, + indexers, + ) +} + +func (f *zFSRestoreInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredZFSRestoreInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *zFSRestoreInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&zfsv1.ZFSRestore{}, f.defaultInformer) +} + +func (f *zFSRestoreInformer) Lister() v1.ZFSRestoreLister { + return v1.NewZFSRestoreLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/lister/zfs/v1/expansion_generated.go b/pkg/generated/lister/zfs/v1/expansion_generated.go index f8951ab9d..4842b2a5e 100644 --- a/pkg/generated/lister/zfs/v1/expansion_generated.go +++ b/pkg/generated/lister/zfs/v1/expansion_generated.go @@ -18,6 +18,22 @@ limitations under the License. package v1 +// ZFSBackupListerExpansion allows custom methods to be added to +// ZFSBackupLister. +type ZFSBackupListerExpansion interface{} + +// ZFSBackupNamespaceListerExpansion allows custom methods to be added to +// ZFSBackupNamespaceLister. +type ZFSBackupNamespaceListerExpansion interface{} + +// ZFSRestoreListerExpansion allows custom methods to be added to +// ZFSRestoreLister. +type ZFSRestoreListerExpansion interface{} + +// ZFSRestoreNamespaceListerExpansion allows custom methods to be added to +// ZFSRestoreNamespaceLister. +type ZFSRestoreNamespaceListerExpansion interface{} + // ZFSSnapshotListerExpansion allows custom methods to be added to // ZFSSnapshotLister. type ZFSSnapshotListerExpansion interface{} diff --git a/pkg/generated/lister/zfs/v1/zfsbackup.go b/pkg/generated/lister/zfs/v1/zfsbackup.go new file mode 100644 index 000000000..38f114ccb --- /dev/null +++ b/pkg/generated/lister/zfs/v1/zfsbackup.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ZFSBackupLister helps list ZFSBackups. +type ZFSBackupLister interface { + // List lists all ZFSBackups in the indexer. + List(selector labels.Selector) (ret []*v1.ZFSBackup, err error) + // ZFSBackups returns an object that can list and get ZFSBackups. + ZFSBackups(namespace string) ZFSBackupNamespaceLister + ZFSBackupListerExpansion +} + +// zFSBackupLister implements the ZFSBackupLister interface. +type zFSBackupLister struct { + indexer cache.Indexer +} + +// NewZFSBackupLister returns a new ZFSBackupLister. +func NewZFSBackupLister(indexer cache.Indexer) ZFSBackupLister { + return &zFSBackupLister{indexer: indexer} +} + +// List lists all ZFSBackups in the indexer. +func (s *zFSBackupLister) List(selector labels.Selector) (ret []*v1.ZFSBackup, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ZFSBackup)) + }) + return ret, err +} + +// ZFSBackups returns an object that can list and get ZFSBackups. +func (s *zFSBackupLister) ZFSBackups(namespace string) ZFSBackupNamespaceLister { + return zFSBackupNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ZFSBackupNamespaceLister helps list and get ZFSBackups. +type ZFSBackupNamespaceLister interface { + // List lists all ZFSBackups in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ZFSBackup, err error) + // Get retrieves the ZFSBackup from the indexer for a given namespace and name. + Get(name string) (*v1.ZFSBackup, error) + ZFSBackupNamespaceListerExpansion +} + +// zFSBackupNamespaceLister implements the ZFSBackupNamespaceLister +// interface. +type zFSBackupNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ZFSBackups in the indexer for a given namespace. +func (s zFSBackupNamespaceLister) List(selector labels.Selector) (ret []*v1.ZFSBackup, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ZFSBackup)) + }) + return ret, err +} + +// Get retrieves the ZFSBackup from the indexer for a given namespace and name. +func (s zFSBackupNamespaceLister) Get(name string) (*v1.ZFSBackup, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("zfsbackup"), name) + } + return obj.(*v1.ZFSBackup), nil +} diff --git a/pkg/generated/lister/zfs/v1/zfsrestore.go b/pkg/generated/lister/zfs/v1/zfsrestore.go new file mode 100644 index 000000000..ecf6fc0f9 --- /dev/null +++ b/pkg/generated/lister/zfs/v1/zfsrestore.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ZFSRestoreLister helps list ZFSRestores. +type ZFSRestoreLister interface { + // List lists all ZFSRestores in the indexer. + List(selector labels.Selector) (ret []*v1.ZFSRestore, err error) + // ZFSRestores returns an object that can list and get ZFSRestores. + ZFSRestores(namespace string) ZFSRestoreNamespaceLister + ZFSRestoreListerExpansion +} + +// zFSRestoreLister implements the ZFSRestoreLister interface. +type zFSRestoreLister struct { + indexer cache.Indexer +} + +// NewZFSRestoreLister returns a new ZFSRestoreLister. +func NewZFSRestoreLister(indexer cache.Indexer) ZFSRestoreLister { + return &zFSRestoreLister{indexer: indexer} +} + +// List lists all ZFSRestores in the indexer. +func (s *zFSRestoreLister) List(selector labels.Selector) (ret []*v1.ZFSRestore, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ZFSRestore)) + }) + return ret, err +} + +// ZFSRestores returns an object that can list and get ZFSRestores. +func (s *zFSRestoreLister) ZFSRestores(namespace string) ZFSRestoreNamespaceLister { + return zFSRestoreNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ZFSRestoreNamespaceLister helps list and get ZFSRestores. +type ZFSRestoreNamespaceLister interface { + // List lists all ZFSRestores in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ZFSRestore, err error) + // Get retrieves the ZFSRestore from the indexer for a given namespace and name. + Get(name string) (*v1.ZFSRestore, error) + ZFSRestoreNamespaceListerExpansion +} + +// zFSRestoreNamespaceLister implements the ZFSRestoreNamespaceLister +// interface. +type zFSRestoreNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ZFSRestores in the indexer for a given namespace. +func (s zFSRestoreNamespaceLister) List(selector labels.Selector) (ret []*v1.ZFSRestore, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ZFSRestore)) + }) + return ret, err +} + +// Get retrieves the ZFSRestore from the indexer for a given namespace and name. +func (s zFSRestoreNamespaceLister) Get(name string) (*v1.ZFSRestore, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("zfsrestore"), name) + } + return obj.(*v1.ZFSRestore), nil +} diff --git a/pkg/mgmt/backup/backup.go b/pkg/mgmt/backup/backup.go new file mode 100644 index 000000000..e94c5a6e4 --- /dev/null +++ b/pkg/mgmt/backup/backup.go @@ -0,0 +1,250 @@ +/* +Copyright 2020 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "fmt" + "k8s.io/klog" + "time" + + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + zfs "github.com/openebs/zfs-localpv/pkg/zfs" + k8serror "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" +) + +// isDeletionCandidate checks if a zfs backup is a deletion candidate. +func (c *BkpController) isDeletionCandidate(bkp *apis.ZFSBackup) bool { + return bkp.ObjectMeta.DeletionTimestamp != nil +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. +func (c *BkpController) syncHandler(key string) error { + // Convert the namespace/name string into a distinct namespace and name + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) + return nil + } + + // Get the bkp resource with this namespace/name + bkp, err := c.bkpLister.ZFSBackups(namespace).Get(name) + if k8serror.IsNotFound(err) { + runtime.HandleError(fmt.Errorf("zfs backup '%s' has been deleted", key)) + return nil + } + if err != nil { + return err + } + bkpCopy := bkp.DeepCopy() + err = c.syncBkp(bkpCopy) + return err +} + +// enqueueBkp takes a ZFSBackup resource and converts it into a namespace/name +// string which is then put onto the work queue. This method should *not* be +// passed resources of any type other than ZFSBackup. +func (c *BkpController) enqueueBkp(obj interface{}) { + var key string + var err error + if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { + runtime.HandleError(err) + return + } + c.workqueue.Add(key) +} + +// synBkp is the function which tries to converge to a desired state for the +// ZFSBackup +func (c *BkpController) syncBkp(bkp *apis.ZFSBackup) error { + var err error = nil + // ZFSBackup should be deleted. Check if deletion timestamp is set + if c.isDeletionCandidate(bkp) { + // reconcile for the Destroy error + err = zfs.DestoryBackup(bkp) + if err == nil { + err = zfs.RemoveBkpFinalizer(bkp) + } + } else { + // if status is init then it means we are creating the zfs backup. + if bkp.Status == apis.BKPZFSStatusInit { + err = zfs.CreateBackup(bkp) + if err == nil { + klog.Infof("backup %s done %s@%s prevsnap [%s]", bkp.Name, bkp.Spec.VolumeName, bkp.Name, bkp.Spec.PrevSnapName) + err = zfs.UpdateBkpInfo(bkp, apis.BKPZFSStatusDone) + } else { + klog.Errorf("backup %s failed %s@%s", bkp.Name, bkp.Spec.VolumeName, bkp.Name) + err = zfs.UpdateBkpInfo(bkp, apis.BKPZFSStatusFailed) + } + } + } + return err +} + +// addBkp is the add event handler for ZFSBackup +func (c *BkpController) addBkp(obj interface{}) { + bkp, ok := obj.(*apis.ZFSBackup) + if !ok { + runtime.HandleError(fmt.Errorf("Couldn't get bkp object %#v", obj)) + return + } + + if zfs.NodeID != bkp.Spec.OwnerNodeID { + return + } + klog.Infof("Got add event for Bkp %s snap %s@%s", bkp.Name, bkp.Spec.VolumeName, bkp.Name) + c.enqueueBkp(bkp) +} + +// updateBkp is the update event handler for ZFSBackup +func (c *BkpController) updateBkp(oldObj, newObj interface{}) { + + newBkp, ok := newObj.(*apis.ZFSBackup) + if !ok { + runtime.HandleError(fmt.Errorf("Couldn't get bkp object %#v", newBkp)) + return + } + + if zfs.NodeID != newBkp.Spec.OwnerNodeID { + return + } + + if c.isDeletionCandidate(newBkp) { + klog.Infof("Got update event for Bkp %s snap %s@%s", newBkp.Name, newBkp.Spec.VolumeName, newBkp.Name) + c.enqueueBkp(newBkp) + } +} + +// deleteBkp is the delete event handler for ZFSBackup +func (c *BkpController) deleteBkp(obj interface{}) { + bkp, ok := obj.(*apis.ZFSBackup) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + runtime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + return + } + bkp, ok = tombstone.Obj.(*apis.ZFSBackup) + if !ok { + runtime.HandleError(fmt.Errorf("Tombstone contained object that is not a zfsbackup %#v", obj)) + return + } + } + + if zfs.NodeID != bkp.Spec.OwnerNodeID { + return + } + + klog.Infof("Got delete event for Bkp %s snap %s@%s", bkp.Name, bkp.Spec.VolumeName, bkp.Name) + c.enqueueBkp(bkp) +} + +// Run will set up the event handlers for types we are interested in, as well +// as syncing informer caches and starting workers. It will block until stopCh +// is closed, at which point it will shutdown the workqueue and wait for +// workers to finish processing their current work items. +func (c *BkpController) Run(threadiness int, stopCh <-chan struct{}) error { + defer runtime.HandleCrash() + defer c.workqueue.ShutDown() + + // Start the informer factories to begin populating the informer caches + klog.Info("Starting Bkp controller") + + // Wait for the k8s caches to be synced before starting workers + klog.Info("Waiting for informer caches to sync") + if ok := cache.WaitForCacheSync(stopCh, c.bkpSynced); !ok { + return fmt.Errorf("failed to wait for caches to sync") + } + klog.Info("Starting Bkp workers") + // Launch worker to process Bkp resources + // Threadiness will decide the number of workers you want to launch to process work items from queue + for i := 0; i < threadiness; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + klog.Info("Started Bkp workers") + <-stopCh + klog.Info("Shutting down Bkp workers") + + return nil +} + +// runWorker is a long-running function that will continually call the +// processNextWorkItem function in order to read and process a message on the +// workqueue. +func (c *BkpController) runWorker() { + for c.processNextWorkItem() { + } +} + +// processNextWorkItem will read a single work item off the workqueue and +// attempt to process it, by calling the syncHandler. +func (c *BkpController) processNextWorkItem() bool { + obj, shutdown := c.workqueue.Get() + + if shutdown { + return false + } + + // We wrap this block in a func so we can defer c.workqueue.Done. + err := func(obj interface{}) error { + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer c.workqueue.Done(obj) + var key string + var ok bool + // We expect strings to come off the workqueue. These are of the + // form namespace/name. We do this as the delayed nature of the + // workqueue means the items in the informer cache may actually be + // more up to date that when the item was initially put onto the + // workqueue. + if key, ok = obj.(string); !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.workqueue.Forget(obj) + runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) + return nil + } + // Run the syncHandler, passing it the namespace/name string of the + // Bkp resource to be synced. + if err := c.syncHandler(key); err != nil { + // Put the item back on the workqueue to handle any transient errors. + c.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.workqueue.Forget(obj) + klog.Infof("Successfully synced '%s'", key) + return nil + }(obj) + + if err != nil { + runtime.HandleError(err) + return true + } + + return true +} diff --git a/pkg/mgmt/backup/builder.go b/pkg/mgmt/backup/builder.go new file mode 100644 index 000000000..e65c371b6 --- /dev/null +++ b/pkg/mgmt/backup/builder.go @@ -0,0 +1,136 @@ +/* +Copyright 2020 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "k8s.io/klog" + + clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset" + openebsScheme "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/scheme" + informers "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions" + listers "github.com/openebs/zfs-localpv/pkg/generated/lister/zfs/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" +) + +const controllerAgentName = "zfsbackup-controller" + +// BkpController is the controller implementation for Bkp resources +type BkpController struct { + // kubeclientset is a standard kubernetes clientset + kubeclientset kubernetes.Interface + + // clientset is a openebs custom resource package generated for custom API group. + clientset clientset.Interface + + bkpLister listers.ZFSBackupLister + + // backupSynced is used for caches sync to get populated + bkpSynced cache.InformerSynced + + // workqueue is a rate limited work queue. This is used to queue work to be + // processed instead of performing it as soon as a change happens. This + // means we can ensure we only process a fixed amount of resources at a + // time, and makes it easy to ensure we are never processing the same item + // simultaneously in two different workers. + workqueue workqueue.RateLimitingInterface + + // recorder is an event recorder for recording Event resources to the + // Kubernetes API. + recorder record.EventRecorder +} + +// BkpControllerBuilder is the builder object for controller. +type BkpControllerBuilder struct { + BkpController *BkpController +} + +// NewBkpControllerBuilder returns an empty instance of controller builder. +func NewBkpControllerBuilder() *BkpControllerBuilder { + return &BkpControllerBuilder{ + BkpController: &BkpController{}, + } +} + +// withKubeClient fills kube client to controller object. +func (cb *BkpControllerBuilder) withKubeClient(ks kubernetes.Interface) *BkpControllerBuilder { + cb.BkpController.kubeclientset = ks + return cb +} + +// withOpenEBSClient fills openebs client to controller object. +func (cb *BkpControllerBuilder) withOpenEBSClient(cs clientset.Interface) *BkpControllerBuilder { + cb.BkpController.clientset = cs + return cb +} + +// withBkpLister fills bkp lister to controller object. +func (cb *BkpControllerBuilder) withBkpLister(sl informers.SharedInformerFactory) *BkpControllerBuilder { + bkpInformer := sl.Zfs().V1().ZFSBackups() + cb.BkpController.bkpLister = bkpInformer.Lister() + return cb +} + +// withBkpSynced adds object sync information in cache to controller object. +func (cb *BkpControllerBuilder) withBkpSynced(sl informers.SharedInformerFactory) *BkpControllerBuilder { + bkpInformer := sl.Zfs().V1().ZFSBackups() + cb.BkpController.bkpSynced = bkpInformer.Informer().HasSynced + return cb +} + +// withWorkqueue adds workqueue to controller object. +func (cb *BkpControllerBuilder) withWorkqueueRateLimiting() *BkpControllerBuilder { + cb.BkpController.workqueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Bkp") + return cb +} + +// withRecorder adds recorder to controller object. +func (cb *BkpControllerBuilder) withRecorder(ks kubernetes.Interface) *BkpControllerBuilder { + klog.Infof("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: ks.CoreV1().Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) + cb.BkpController.recorder = recorder + return cb +} + +// withEventHandler adds event handlers controller object. +func (cb *BkpControllerBuilder) withEventHandler(cvcInformerFactory informers.SharedInformerFactory) *BkpControllerBuilder { + cvcInformer := cvcInformerFactory.Zfs().V1().ZFSBackups() + // Set up an event handler for when Bkp resources change + cvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: cb.BkpController.addBkp, + UpdateFunc: cb.BkpController.updateBkp, + DeleteFunc: cb.BkpController.deleteBkp, + }) + return cb +} + +// Build returns a controller instance. +func (cb *BkpControllerBuilder) Build() (*BkpController, error) { + err := openebsScheme.AddToScheme(scheme.Scheme) + if err != nil { + return nil, err + } + return cb.BkpController, nil +} diff --git a/pkg/mgmt/backup/start.go b/pkg/mgmt/backup/start.go new file mode 100644 index 000000000..114281e20 --- /dev/null +++ b/pkg/mgmt/backup/start.go @@ -0,0 +1,107 @@ +/* +Copyright 2020 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "sync" + + "github.com/pkg/errors" + "k8s.io/klog" + + "time" + + clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset" + informers "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +var ( + masterURL string + kubeconfig string +) + +// Start starts the zfsbackup controller. +func Start(controllerMtx *sync.RWMutex, stopCh <-chan struct{}) error { + + // Get in cluster config + cfg, err := getClusterConfig(kubeconfig) + if err != nil { + return errors.Wrap(err, "error building kubeconfig") + } + + // Building Kubernetes Clientset + kubeClient, err := kubernetes.NewForConfig(cfg) + if err != nil { + return errors.Wrap(err, "error building kubernetes clientset") + } + + // Building OpenEBS Clientset + openebsClient, err := clientset.NewForConfig(cfg) + if err != nil { + return errors.Wrap(err, "error building openebs clientset") + } + + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30) + bkpInformerFactory := informers.NewSharedInformerFactory(openebsClient, time.Second*30) + // Build() fn of all controllers calls AddToScheme to adds all types of this + // clientset into the given scheme. + // If multiple controllers happen to call this AddToScheme same time, + // it causes panic with error saying concurrent map access. + // This lock is used to serialize the AddToScheme call of all controllers. + controllerMtx.Lock() + + controller, err := NewBkpControllerBuilder(). + withKubeClient(kubeClient). + withOpenEBSClient(openebsClient). + withBkpSynced(bkpInformerFactory). + withBkpLister(bkpInformerFactory). + withRecorder(kubeClient). + withEventHandler(bkpInformerFactory). + withWorkqueueRateLimiting().Build() + + // blocking call, can't use defer to release the lock + controllerMtx.Unlock() + + if err != nil { + return errors.Wrapf(err, "error building controller instance") + } + + go kubeInformerFactory.Start(stopCh) + go bkpInformerFactory.Start(stopCh) + + // Threadiness defines the number of workers to be launched in Run function + return controller.Run(2, stopCh) +} + +// GetClusterConfig return the config for k8s. +func getClusterConfig(kubeconfig string) (*rest.Config, error) { + cfg, err := rest.InClusterConfig() + if err != nil { + klog.Errorf("Failed to get k8s Incluster config. %+v", err) + if kubeconfig == "" { + return nil, errors.Wrap(err, "kubeconfig is empty") + } + cfg, err = clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) + if err != nil { + return nil, errors.Wrap(err, "error building kubeconfig") + } + } + return cfg, err +} diff --git a/pkg/mgmt/restore/builder.go b/pkg/mgmt/restore/builder.go new file mode 100644 index 000000000..ea25f6f6f --- /dev/null +++ b/pkg/mgmt/restore/builder.go @@ -0,0 +1,136 @@ +/* +Copyright 2020 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "k8s.io/klog" + + clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset" + openebsScheme "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/scheme" + informers "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions" + listers "github.com/openebs/zfs-localpv/pkg/generated/lister/zfs/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" +) + +const controllerAgentName = "zfsrestore-controller" + +// RstrController is the controller implementation for Restore resources +type RstrController struct { + // kubeclientset is a standard kubernetes clientset + kubeclientset kubernetes.Interface + + // clientset is a openebs custom resource package generated for custom API group. + clientset clientset.Interface + + rstrLister listers.ZFSRestoreLister + + // backupSynced is used for caches sync to get populated + rstrSynced cache.InformerSynced + + // workqueue is a rate limited work queue. This is used to queue work to be + // processed instead of performing it as soon as a change happens. This + // means we can ensure we only process a fixed amount of resources at a + // time, and makes it easy to ensure we are never processing the same item + // simultaneously in two different workers. + workqueue workqueue.RateLimitingInterface + + // recorder is an event recorder for recording Event resources to the + // Kubernetes API. + recorder record.EventRecorder +} + +// RstrControllerBuilder is the builder object for controller. +type RstrControllerBuilder struct { + RstrController *RstrController +} + +// NewRstrControllerBuilder returns an empty instance of controller builder. +func NewRstrControllerBuilder() *RstrControllerBuilder { + return &RstrControllerBuilder{ + RstrController: &RstrController{}, + } +} + +// withKubeClient fills kube client to controller object. +func (cb *RstrControllerBuilder) withKubeClient(ks kubernetes.Interface) *RstrControllerBuilder { + cb.RstrController.kubeclientset = ks + return cb +} + +// withOpenEBSClient fills openebs client to controller object. +func (cb *RstrControllerBuilder) withOpenEBSClient(cs clientset.Interface) *RstrControllerBuilder { + cb.RstrController.clientset = cs + return cb +} + +// withRestoreLister fills rstr lister to controller object. +func (cb *RstrControllerBuilder) withRestoreLister(sl informers.SharedInformerFactory) *RstrControllerBuilder { + rstrInformer := sl.Zfs().V1().ZFSRestores() + cb.RstrController.rstrLister = rstrInformer.Lister() + return cb +} + +// withRestoreSynced adds object sync information in cache to controller object. +func (cb *RstrControllerBuilder) withRestoreSynced(sl informers.SharedInformerFactory) *RstrControllerBuilder { + rstrInformer := sl.Zfs().V1().ZFSRestores() + cb.RstrController.rstrSynced = rstrInformer.Informer().HasSynced + return cb +} + +// withWorkqueue adds workqueue to controller object. +func (cb *RstrControllerBuilder) withWorkqueueRateLimiting() *RstrControllerBuilder { + cb.RstrController.workqueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Restore") + return cb +} + +// withRecorder adds recorder to controller object. +func (cb *RstrControllerBuilder) withRecorder(ks kubernetes.Interface) *RstrControllerBuilder { + klog.Infof("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: ks.CoreV1().Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) + cb.RstrController.recorder = recorder + return cb +} + +// withEventHandler adds event handlers controller object. +func (cb *RstrControllerBuilder) withEventHandler(cvcInformerFactory informers.SharedInformerFactory) *RstrControllerBuilder { + cvcInformer := cvcInformerFactory.Zfs().V1().ZFSRestores() + // Set up an event handler for when Restore resources change + cvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: cb.RstrController.addRestore, + UpdateFunc: cb.RstrController.updateRestore, + DeleteFunc: cb.RstrController.deleteRestore, + }) + return cb +} + +// Build returns a controller instance. +func (cb *RstrControllerBuilder) Build() (*RstrController, error) { + err := openebsScheme.AddToScheme(scheme.Scheme) + if err != nil { + return nil, err + } + return cb.RstrController, nil +} diff --git a/pkg/mgmt/restore/restore.go b/pkg/mgmt/restore/restore.go new file mode 100644 index 000000000..858464db0 --- /dev/null +++ b/pkg/mgmt/restore/restore.go @@ -0,0 +1,246 @@ +/* +Copyright 2020 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "fmt" + "time" + + "k8s.io/klog" + + apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + zfs "github.com/openebs/zfs-localpv/pkg/zfs" + k8serror "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" +) + +// isDeletionCandidate checks if a zfs backup is a deletion candidate. +func (c *RstrController) isDeletionCandidate(rstr *apis.ZFSRestore) bool { + return rstr.ObjectMeta.DeletionTimestamp != nil +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. +func (c *RstrController) syncHandler(key string) error { + // Convert the namespace/name string into a distinct namespace and name + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) + return nil + } + + // Get the rstr resource with this namespace/name + rstr, err := c.rstrLister.ZFSRestores(namespace).Get(name) + if k8serror.IsNotFound(err) { + runtime.HandleError(fmt.Errorf("zfs restore '%s' has been deleted", key)) + return nil + } + if err != nil { + return err + } + rstrCopy := rstr.DeepCopy() + err = c.syncRestore(rstrCopy) + return err +} + +// enqueueRestore takes a ZFSRestore resource and converts it into a namespace/name +// string which is then put onto the work queue. This method should *not* be +// passed resources of any type other than ZFSRestore. +func (c *RstrController) enqueueRestore(obj interface{}) { + var key string + var err error + if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { + runtime.HandleError(err) + return + } + c.workqueue.Add(key) +} + +// synRestore is the function which tries to converge to a desired state for the +// ZFSRestore +func (c *RstrController) syncRestore(rstr *apis.ZFSRestore) error { + var err error = nil + // ZFSRestore should be deleted. Check if deletion timestamp is set + if !c.isDeletionCandidate(rstr) { + // if finalizer is not set then it means we are creating + // the zfs backup. + if rstr.Status == apis.RSTZFSStatusInit { + err = zfs.CreateRestore(rstr) + if err == nil { + klog.Infof("restore %s done %s", rstr.Name, rstr.Spec.VolumeName) + err = zfs.UpdateRestoreInfo(rstr, apis.RSTZFSStatusDone) + } else { + klog.Errorf("restore %s failed %s", rstr.Name, rstr.Spec.VolumeName) + err = zfs.UpdateRestoreInfo(rstr, apis.RSTZFSStatusFailed) + } + } + } + return err +} + +// addRestore is the add event handler for ZFSRestore +func (c *RstrController) addRestore(obj interface{}) { + rstr, ok := obj.(*apis.ZFSRestore) + if !ok { + runtime.HandleError(fmt.Errorf("Couldn't get rstr object %#v", obj)) + return + } + + if zfs.NodeID != rstr.Spec.OwnerNodeID { + return + } + klog.Infof("Got add event for Restore %s", rstr.Name) + c.enqueueRestore(rstr) +} + +// updateRestore is the update event handler for ZFSRestore +func (c *RstrController) updateRestore(oldObj, newObj interface{}) { + + newRestore, ok := newObj.(*apis.ZFSRestore) + if !ok { + runtime.HandleError(fmt.Errorf("Couldn't get rstr object %#v", newRestore)) + return + } + + if zfs.NodeID != newRestore.Spec.OwnerNodeID { + return + } + + if c.isDeletionCandidate(newRestore) { + klog.Infof("Got update event for Restore %s", newRestore.Name) + c.enqueueRestore(newRestore) + } +} + +// deleteRestore is the delete event handler for ZFSRestore +func (c *RstrController) deleteRestore(obj interface{}) { + rstr, ok := obj.(*apis.ZFSRestore) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + runtime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + return + } + rstr, ok = tombstone.Obj.(*apis.ZFSRestore) + if !ok { + runtime.HandleError(fmt.Errorf("Tombstone contained object that is not a zfsbackup %#v", obj)) + return + } + } + + if zfs.NodeID != rstr.Spec.OwnerNodeID { + return + } + + klog.Infof("Got delete event for Restore %s", rstr.Spec.VolumeName) + c.enqueueRestore(rstr) +} + +// Run will set up the event handlers for types we are interested in, as well +// as syncing informer caches and starting workers. It will block until stopCh +// is closed, at which point it will shutdown the workqueue and wait for +// workers to finish processing their current work items. +func (c *RstrController) Run(threadiness int, stopCh <-chan struct{}) error { + defer runtime.HandleCrash() + defer c.workqueue.ShutDown() + + // Start the informer factories to begin populating the informer caches + klog.Info("Starting Restore controller") + + // Wait for the k8s caches to be synced before starting workers + klog.Info("Waiting for informer caches to sync") + if ok := cache.WaitForCacheSync(stopCh, c.rstrSynced); !ok { + return fmt.Errorf("failed to wait for caches to sync") + } + klog.Info("Starting Restore workers") + // Launch worker to process Restore resources + // Threadiness will decide the number of workers you want to launch to process work items from queue + for i := 0; i < threadiness; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + klog.Info("Started Restore workers") + <-stopCh + klog.Info("Shutting down Restore workers") + + return nil +} + +// runWorker is a long-running function that will continually call the +// processNextWorkItem function in order to read and process a message on the +// workqueue. +func (c *RstrController) runWorker() { + for c.processNextWorkItem() { + } +} + +// processNextWorkItem will read a single work item off the workqueue and +// attempt to process it, by calling the syncHandler. +func (c *RstrController) processNextWorkItem() bool { + obj, shutdown := c.workqueue.Get() + + if shutdown { + return false + } + + // We wrap this block in a func so we can defer c.workqueue.Done. + err := func(obj interface{}) error { + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer c.workqueue.Done(obj) + var key string + var ok bool + // We expect strings to come off the workqueue. These are of the + // form namespace/name. We do this as the delayed nature of the + // workqueue means the items in the informer cache may actually be + // more up to date that when the item was initially put onto the + // workqueue. + if key, ok = obj.(string); !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.workqueue.Forget(obj) + runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) + return nil + } + // Run the syncHandler, passing it the namespace/name string of the + // Restore resource to be synced. + if err := c.syncHandler(key); err != nil { + // Put the item back on the workqueue to handle any transient errors. + c.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.workqueue.Forget(obj) + klog.Infof("Successfully synced '%s'", key) + return nil + }(obj) + + if err != nil { + runtime.HandleError(err) + return true + } + + return true +} diff --git a/pkg/mgmt/restore/start.go b/pkg/mgmt/restore/start.go new file mode 100644 index 000000000..af9e5f9dd --- /dev/null +++ b/pkg/mgmt/restore/start.go @@ -0,0 +1,107 @@ +/* +Copyright 2020 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "sync" + + "github.com/pkg/errors" + "k8s.io/klog" + + "time" + + clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset" + informers "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +var ( + masterURL string + kubeconfig string +) + +// Start starts the zfsbackup controller. +func Start(controllerMtx *sync.RWMutex, stopCh <-chan struct{}) error { + + // Get in cluster config + cfg, err := getClusterConfig(kubeconfig) + if err != nil { + return errors.Wrap(err, "error building kubeconfig") + } + + // Building Kubernetes Clientset + kubeClient, err := kubernetes.NewForConfig(cfg) + if err != nil { + return errors.Wrap(err, "error building kubernetes clientset") + } + + // Building OpenEBS Clientset + openebsClient, err := clientset.NewForConfig(cfg) + if err != nil { + return errors.Wrap(err, "error building openebs clientset") + } + + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30) + bkpInformerFactory := informers.NewSharedInformerFactory(openebsClient, time.Second*30) + // Build() fn of all controllers calls AddToScheme to adds all types of this + // clientset into the given scheme. + // If multiple controllers happen to call this AddToScheme same time, + // it causes panic with error saying concurrent map access. + // This lock is used to serialize the AddToScheme call of all controllers. + controllerMtx.Lock() + + controller, err := NewRstrControllerBuilder(). + withKubeClient(kubeClient). + withOpenEBSClient(openebsClient). + withRestoreSynced(bkpInformerFactory). + withRestoreLister(bkpInformerFactory). + withRecorder(kubeClient). + withEventHandler(bkpInformerFactory). + withWorkqueueRateLimiting().Build() + + // blocking call, can't use defer to release the lock + controllerMtx.Unlock() + + if err != nil { + return errors.Wrapf(err, "error building controller instance") + } + + go kubeInformerFactory.Start(stopCh) + go bkpInformerFactory.Start(stopCh) + + // Threadiness defines the number of workers to be launched in Run function + return controller.Run(2, stopCh) +} + +// GetClusterConfig return the config for k8s. +func getClusterConfig(kubeconfig string) (*rest.Config, error) { + cfg, err := rest.InClusterConfig() + if err != nil { + klog.Errorf("Failed to get k8s Incluster config. %+v", err) + if kubeconfig == "" { + return nil, errors.Wrap(err, "kubeconfig is empty") + } + cfg, err = clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) + if err != nil { + return nil, errors.Wrap(err, "error building kubeconfig") + } + } + return cfg, err +} diff --git a/pkg/zfs/volume.go b/pkg/zfs/volume.go index 0843915ec..9a63d7767 100644 --- a/pkg/zfs/volume.go +++ b/pkg/zfs/volume.go @@ -19,6 +19,8 @@ import ( "strconv" apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" + "github.com/openebs/zfs-localpv/pkg/builder/bkpbuilder" + "github.com/openebs/zfs-localpv/pkg/builder/restorebuilder" "github.com/openebs/zfs-localpv/pkg/builder/snapbuilder" "github.com/openebs/zfs-localpv/pkg/builder/volbuilder" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -45,6 +47,8 @@ const ( ZFSTopologyKey string = "openebs.io/nodename" // ZFSStatusPending shows object has not handled yet ZFSStatusPending string = "Pending" + // ZFSStatusFailed shows object operation has failed + ZFSStatusFailed string = "Failed" // ZFSStatusReady shows object has been processed ZFSStatusReady string = "Ready" ) @@ -63,7 +67,7 @@ var ( func init() { OpenEBSNamespace = os.Getenv(OpenEBSNamespaceKey) - if OpenEBSNamespace == "" { + if OpenEBSNamespace == "" && os.Getenv("OPENEBS_NODE_DRIVER") != "" { klog.Fatalf("OPENEBS_NAMESPACE environment variable not set") } NodeID = os.Getenv("OPENEBS_NODE_ID") @@ -256,3 +260,44 @@ func RemoveSnapFinalizer(snap *apis.ZFSSnapshot) error { _, err := snapbuilder.NewKubeclient().WithNamespace(OpenEBSNamespace).Update(snap) return err } + +// RemoveBkpFinalizer removes finalizer from ZFSBackup CR +func RemoveBkpFinalizer(bkp *apis.ZFSBackup) error { + bkp.Finalizers = nil + + _, err := bkpbuilder.NewKubeclient().WithNamespace(OpenEBSNamespace).Update(bkp) + return err +} + +// UpdateBkpInfo updates the backup info with the status +func UpdateBkpInfo(bkp *apis.ZFSBackup, status apis.ZFSBackupStatus) error { + finalizers := []string{ZFSFinalizer} + newBkp, err := bkpbuilder.BuildFrom(bkp).WithFinalizer(finalizers).Build() + + // set the status + newBkp.Status = status + + if err != nil { + klog.Errorf("Update snapshot failed %s err: %s", bkp.Spec.VolumeName, err.Error()) + return err + } + + _, err = bkpbuilder.NewKubeclient().WithNamespace(OpenEBSNamespace).Update(newBkp) + return err +} + +// UpdateRestoreInfo updates the rstr info with the status +func UpdateRestoreInfo(rstr *apis.ZFSRestore, status apis.ZFSRestoreStatus) error { + newRstr, err := restorebuilder.BuildFrom(rstr).Build() + + // set the status + newRstr.Status = status + + if err != nil { + klog.Errorf("Update snapshot failed %s err: %s", rstr.Spec.VolumeName, err.Error()) + return err + } + + _, err = restorebuilder.NewKubeclient().WithNamespace(OpenEBSNamespace).Update(newRstr) + return err +} diff --git a/pkg/zfs/zfs_util.go b/pkg/zfs/zfs_util.go index 753f9b3bd..498967afe 100644 --- a/pkg/zfs/zfs_util.go +++ b/pkg/zfs/zfs_util.go @@ -24,6 +24,7 @@ import ( apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1" "k8s.io/klog" + "strings" ) // zfs related constants @@ -42,6 +43,8 @@ const ( ZFSGetArg = "get" ZFSListArg = "list" ZFSSnapshotArg = "snapshot" + ZFSSendArg = "send" + ZFSRecvArg = "recv" ) // constants to define volume type @@ -292,13 +295,56 @@ func buildVolumeResizeArgs(vol *apis.ZFSVolume) []string { return ZFSVolArg } +// builldVolumeBackupArgs returns volume send command for sending the zfs volume +func buildVolumeBackupArgs(bkp *apis.ZFSBackup, vol *apis.ZFSVolume) []string { + var ZFSVolArg []string + backupDest := bkp.Spec.BackupDest + + bkpAddr := strings.Split(backupDest, ":") + + curSnap := vol.Spec.PoolName + "/" + vol.Name + "@" + bkp.Name + + remote := " | nc -w 3 " + bkpAddr[0] + " " + bkpAddr[1] + + cmd := ZFSVolCmd + " " + + if len(bkp.Spec.PrevSnapName) > 0 { + prevSnap := vol.Spec.PoolName + "/" + vol.Name + "@" + bkp.Spec.PrevSnapName + // do incremental send + cmd += ZFSSendArg + " -i " + prevSnap + " " + curSnap + " " + remote + } else { + cmd += ZFSSendArg + " " + curSnap + remote + } + + ZFSVolArg = append(ZFSVolArg, "-c", cmd) + + return ZFSVolArg +} + +// builldVolumeRestoreArgs returns volume recv command for receiving the zfs volume +func buildVolumeRestoreArgs(rstr *apis.ZFSRestore, vol *apis.ZFSVolume) []string { + var ZFSVolArg []string + restoreSrc := rstr.Spec.RestoreSrc + + volume := vol.Spec.PoolName + "/" + vol.Name + + rstrAddr := strings.Split(restoreSrc, ":") + source := "nc -w 3 " + rstrAddr[0] + " " + rstrAddr[1] + " | " + + cmd := source + ZFSVolCmd + " " + ZFSRecvArg + " -F " + volume + + ZFSVolArg = append(ZFSVolArg, "-c", cmd) + + return ZFSVolArg +} + // builldVolumeDestroyArgs returns volume destroy command along with attributes as a string array func buildVolumeDestroyArgs(vol *apis.ZFSVolume) []string { var ZFSVolArg []string volume := vol.Spec.PoolName + "/" + vol.Name - ZFSVolArg = append(ZFSVolArg, ZFSDestroyArg, volume) + ZFSVolArg = append(ZFSVolArg, ZFSDestroyArg, "-r", volume) return ZFSVolArg } @@ -620,3 +666,96 @@ func ResizeZFSVolume(vol *apis.ZFSVolume, mountpath string) error { err = handleVolResize(vol, mountpath) return err } + +// CreateBackup creates the backup +func CreateBackup(bkp *apis.ZFSBackup) error { + vol, err := GetZFSVolume(bkp.Spec.VolumeName) + if err != nil { + return err + } + + volume := vol.Spec.PoolName + "/" + vol.Name + + /* create the snapshot for the backup */ + snap := &apis.ZFSSnapshot{} + snap.Name = bkp.Name + snap.Spec.PoolName = vol.Spec.PoolName + snap.Labels = map[string]string{ZFSVolKey: vol.Name} + + err = CreateSnapshot(snap) + + if err != nil { + klog.Errorf( + "zfs: could not create snapshot for the backup vol %s snap %s err %v", volume, snap.Name, err, + ) + return err + } + + args := buildVolumeBackupArgs(bkp, vol) + cmd := exec.Command("bash", args...) + out, err := cmd.CombinedOutput() + + if err != nil { + klog.Errorf( + "zfs: could not backup the volume %v cmd %v error: %s", volume, args, string(out), + ) + } + + return err +} + +// DestoryBackup deletes the snapshot created +func DestoryBackup(bkp *apis.ZFSBackup) error { + vol, err := GetZFSVolume(bkp.Spec.VolumeName) + if err != nil { + return err + } + + volume := vol.Spec.PoolName + "/" + vol.Name + + /* create the snapshot for the backup */ + snap := &apis.ZFSSnapshot{} + snap.Name = bkp.Name + snap.Spec.PoolName = vol.Spec.PoolName + snap.Labels = map[string]string{ZFSVolKey: vol.Name} + + err = DestroySnapshot(snap) + + if err != nil { + klog.Errorf( + "zfs: could not destroy snapshot for the backup vol %s snap %s err %v", volume, snap.Name, err, + ) + } + return err +} + +// CreateRestore creates the restore +func CreateRestore(rstr *apis.ZFSRestore) error { + vol, err := GetZFSVolume(rstr.Spec.VolumeName) + if err != nil { + return err + } + volume := vol.Spec.PoolName + "/" + vol.Name + args := buildVolumeRestoreArgs(rstr, vol) + cmd := exec.Command("bash", args...) + out, err := cmd.CombinedOutput() + + if err != nil { + klog.Errorf( + "zfs: could not restore the volume %v cmd %v error: %s", volume, args, string(out), + ) + } + + /* + * need to generate a new uuid for zfs and btrfs volumes + * so that we can mount it. + */ + if vol.Spec.FsType == "xfs" { + return xfsGenerateUUID(volume) + } + if vol.Spec.FsType == "btrfs" { + return btrfsGenerateUUID(volume) + } + + return err +}