diff --git a/api/crds/recovery.yaml b/api/crds/recovery.yaml index c119d27ef..fcc89473e 100644 --- a/api/crds/recovery.yaml +++ b/api/crds/recovery.yaml @@ -9,10 +9,10 @@ metadata: spec: additionalPrinterColumns: - JSONPath: .spec.repository.namespace - name: RepositoryNamespace + name: Repository-Namespace type: string - JSONPath: .spec.repository.name - name: RepositoryName + name: Repository-Name type: string - JSONPath: .spec.snapshot name: Snapshot diff --git a/api/crds/repository.yaml b/api/crds/repository.yaml index 5c004f3cc..e6bd4fde3 100644 --- a/api/crds/repository.yaml +++ b/api/crds/repository.yaml @@ -9,11 +9,11 @@ metadata: spec: additionalPrinterColumns: - JSONPath: .status.backupCount - name: BackupCount + name: Backup-Count type: integer - JSONPath: .status.lastBackupTime format: date-time - name: LastSuccessfulBackup + name: Last-Successful-Backup type: date - JSONPath: .metadata.creationTimestamp name: Age diff --git a/api/crds/restic.yaml b/api/crds/restic.yaml index 6029c8fa1..d2cb325e2 100644 --- a/api/crds/restic.yaml +++ b/api/crds/restic.yaml @@ -15,7 +15,7 @@ spec: name: Schedule type: string - JSONPath: .spec.type - name: BackupType + name: Backup-Type priority: 10 type: string - JSONPath: .spec.paused diff --git a/apis/stash/v1alpha1/crd.go b/apis/stash/v1alpha1/crd.go index 3b834bbdc..3a15a4930 100644 --- a/apis/stash/v1alpha1/crd.go +++ b/apis/stash/v1alpha1/crd.go @@ -44,7 +44,7 @@ func (c Restic) CustomResourceDefinition() *apiextensions.CustomResourceDefiniti JSONPath: ".spec.schedule", }, { - Name: "BackupType", + Name: "Backup-Type", Type: "string", JSONPath: ".spec.type", Priority: 10, @@ -88,12 +88,12 @@ func (c Recovery) CustomResourceDefinition() *apiextensions.CustomResourceDefini EnableStatusSubresource: EnableStatusSubresource, AdditionalPrinterColumns: []apiextensions.CustomResourceColumnDefinition{ { - Name: "RepositoryNamespace", + Name: "Repository-Namespace", Type: "string", JSONPath: ".spec.repository.namespace", }, { - Name: "RepositoryName", + Name: "Repository-Name", Type: "string", JSONPath: ".spec.repository.name", }, @@ -141,12 +141,12 @@ func (c Repository) CustomResourceDefinition() *apiextensions.CustomResourceDefi EnableStatusSubresource: EnableStatusSubresource, AdditionalPrinterColumns: []apiextensions.CustomResourceColumnDefinition{ { - Name: "BackupCount", + Name: "Backup-Count", Type: "integer", JSONPath: ".status.backupCount", }, { - Name: "LastSuccessfulBackup", + Name: "Last-Successful-Backup", Type: "date", JSONPath: ".status.lastBackupTime", Format: "date-time", diff --git a/docs/concepts/crds/recovery.md b/docs/concepts/crds/recovery.md index b3dabc3da..5cf1619c1 100644 --- a/docs/concepts/crds/recovery.md +++ b/docs/concepts/crds/recovery.md @@ -85,6 +85,6 @@ Stash operator updates `.status` of a Recovery CRD when the recovery operation i - To run backup in offline mode see [here](/docs/guides/offline_backup.md) - See the list of supported backends and how to configure them [here](/docs/guides/backends/overview.md). - See working examples for supported workload types [here](/docs/guides/workloads.md). -- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring.md). +- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring/overview.md). - Learn about how to configure [RBAC roles](/docs/guides/rbac.md). - Want to hack on Stash? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/concepts/crds/repository.md b/docs/concepts/crds/repository.md index 205b470fb..c8db688a2 100644 --- a/docs/concepts/crds/repository.md +++ b/docs/concepts/crds/repository.md @@ -123,7 +123,7 @@ $ kubectl get repository -l node-name=minikube ## Deleting Repository -Stash allows the users to delete **only `Repository` crd** or **`Repository` crd with respective restic repository**. Here, we will show how to perform these delete operations. +Stash allows the users to delete **only `Repository` crd** or **`Repository` crd with respective restic repository**. Here, we are going to show how to perform these delete operations. ### Delete only Repository crd @@ -215,6 +215,6 @@ If everything goes well, respective restic repository will be deleted from the b - To run backup in offline mode see [here](/docs/guides/offline_backup.md) - See the list of supported backends and how to configure them [here](/docs/guides/backends/overview.md). - See working examples for supported workload types [here](/docs/guides/workloads.md). -- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring.md). +- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring/overview.md). - Learn about how to configure [RBAC roles](/docs/guides/rbac.md). - Want to hack on Stash? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/concepts/crds/restic.md b/docs/concepts/crds/restic.md index 0c032e9f2..448447ef3 100644 --- a/docs/concepts/crds/restic.md +++ b/docs/concepts/crds/restic.md @@ -156,6 +156,6 @@ For more details about how to disable and resume Restic see [here](/docs/guides/ - To run backup in offline mode see [here](/docs/guides/offline_backup.md) - See the list of supported backends and how to configure them [here](/docs/guides/backends/overview.md). - See working examples for supported workload types [here](/docs/guides/workloads.md). -- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring.md). +- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring/overview.md). - Learn about how to configure [RBAC roles](/docs/guides/rbac.md). - Want to hack on Stash? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/concepts/crds/snapshot.md b/docs/concepts/crds/snapshot.md index b94202b6a..a5e8c68a4 100644 --- a/docs/concepts/crds/snapshot.md +++ b/docs/concepts/crds/snapshot.md @@ -172,6 +172,6 @@ snapshot "statefulset.stash-demo-0-d690726d" deleted - To run backup in offline mode see [here](/docs/guides/offline_backup.md) - See the list of supported backends and how to configure them [here](/docs/guides/backends/overview.md). - See working examples for supported workload types [here](/docs/guides/workloads.md). -- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring.md). +- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring/overview.md). - Learn about how to configure [RBAC roles](/docs/guides/rbac.md). - Want to hack on Stash? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/examples/backends/local/local-restic.yaml b/docs/examples/backends/local/local-restic-hostPath.yaml similarity index 100% rename from docs/examples/backends/local/local-restic.yaml rename to docs/examples/backends/local/local-restic-hostPath.yaml diff --git a/docs/examples/backends/local/local-restic-nfs.yaml b/docs/examples/backends/local/local-restic-nfs.yaml new file mode 100644 index 000000000..956d29405 --- /dev/null +++ b/docs/examples/backends/local/local-restic-nfs.yaml @@ -0,0 +1,27 @@ +apiVersion: stash.appscode.com/v1alpha1 +kind: Restic +metadata: + name: local-restic + namespace: default +spec: + selector: + matchLabels: + app: stash-demo + fileGroups: + - path: /source/data + retentionPolicyName: 'keep-last-5' + backend: + local: + mountPath: /safe/data + nfs: + server: "nfs-service.storage.svc.cluster.local" # use you own NFS server address + path: "/" # this path is relative to "/exports" path of NFS server + storageSecretName: local-secret + schedule: '@every 1m' + volumeMounts: + - mountPath: /source/data + name: source-data + retentionPolicies: + - name: 'keep-last-5' + keepLast: 5 + prune: true \ No newline at end of file diff --git a/docs/examples/backends/minio/minio-deployment.yaml b/docs/examples/backends/minio/minio-deployment.yaml deleted file mode 100644 index 88a46d8d0..000000000 --- a/docs/examples/backends/minio/minio-deployment.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - # This name uniquely identifies the Deployment - name: minio-deployment - labels: - app: minio -spec: - strategy: - type: Recreate # If pod fail, we want to recreate pod rather than restarting it. - template: - metadata: - labels: - # Label is used as a selector in the service. - app: minio-server - spec: - volumes: - # Refer to the PVC have created earlier - - name: storage - persistentVolumeClaim: - # Name of the PVC created earlier - claimName: minio-pvc - - name: minio-certs - secret: - secretName: minio-server-secret - items: - - key: public.crt - path: public.crt - - key: private.key - path: private.key - - key: public.crt - path: CAs/public.crt # mark self signed certificate as trusted - containers: - - name: minio - # Pulls the default Minio image from Docker Hub - image: minio/minio - args: - - server - - --address - - ":443" - - /storage - env: - # Minio access key and secret key - - name: MINIO_ACCESS_KEY - value: "" - - name: MINIO_SECRET_KEY - value: "" - ports: - - containerPort: 443 - # This ensures containers are allocated on separate hosts. Remove hostPort to allow multiple Minio containers on one host - hostPort: 443 - # Mount the volumes into the pod - volumeMounts: - - name: storage # must match the volume name, above - mountPath: "/storage" - - name: minio-certs - mountPath: "/root/.minio/certs" \ No newline at end of file diff --git a/docs/examples/backends/minio/minio-pvc.yaml b/docs/examples/backends/minio/minio-pvc.yaml deleted file mode 100644 index bb67767e3..000000000 --- a/docs/examples/backends/minio/minio-pvc.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - # This name uniquely identifies the PVC. Will be used in minio deployment. - name: minio-pvc - labels: - app: minio -spec: - storageClassName: standard - accessModes: - - ReadWriteOnce - resources: - # This is the request for storage. Should be available in the cluster. - requests: - storage: 2Gi \ No newline at end of file diff --git a/docs/examples/backends/minio/minio-recovery.yaml b/docs/examples/backends/minio/minio-recovery.yaml deleted file mode 100644 index b0e50d5a4..000000000 --- a/docs/examples/backends/minio/minio-recovery.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: stash.appscode.com/v1alpha1 -kind: Recovery -metadata: - name: minio-recovery - namespace: default -spec: - repository: - name: deployment.stash-demo - namespace: default - paths: - - /source/data - recoveredVolumes: - - mountPath: /source/data # where the volume will be mounted - name: stash-recovered-volume - hostPath: # volume source, where the recovered data will be stored. - path: /data/stash-recovered/ # directory in volume source where recovered data will be stored \ No newline at end of file diff --git a/docs/examples/backends/minio/minio-restic.yaml b/docs/examples/backends/minio/minio-restic.yaml deleted file mode 100644 index 6623d88c2..000000000 --- a/docs/examples/backends/minio/minio-restic.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: stash.appscode.com/v1alpha1 -kind: Restic -metadata: - name: minio-restic - namespace: default -spec: - selector: - matchLabels: - app: stash-demo # Must match with the label of busybox pod we have created before. - fileGroups: - - path: /source/data - retentionPolicyName: 'keep-last-5' - backend: - s3: - endpoint: 'https://minio-service.default.svc' # Use your own Minio server address. - bucket: stash-qa # Give a name of the bucket where you want to backup. - prefix: demo # . Path prefix into bucket where repository will be created.(optional). - storageSecretName: minio-restic-secret - schedule: '@every 1m' - volumeMounts: - - mountPath: /source/data - name: source-data - retentionPolicies: - - name: 'keep-last-5' - keepLast: 5 - prune: true \ No newline at end of file diff --git a/docs/examples/backends/minio/minio-service.yaml b/docs/examples/backends/minio/minio-service.yaml deleted file mode 100644 index b4fd79cc7..000000000 --- a/docs/examples/backends/minio/minio-service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: minio-service - labels: - app: minio -spec: - type: LoadBalancer - ports: - - port: 443 - targetPort: 443 - protocol: TCP - selector: - app: minio-server # must match with the label used in the deployment \ No newline at end of file diff --git a/docs/examples/tutorial/busybox.yaml b/docs/examples/backup/deployment.yaml similarity index 71% rename from docs/examples/tutorial/busybox.yaml rename to docs/examples/backup/deployment.yaml index 60e32a14d..4d398cfb9 100644 --- a/docs/examples/tutorial/busybox.yaml +++ b/docs/examples/backup/deployment.yaml @@ -1,12 +1,15 @@ -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: labels: app: stash-demo name: stash-demo - namespace: default + namespace: demo spec: replicas: 1 + selector: + matchLabels: + app: stash-demo template: metadata: labels: @@ -14,7 +17,7 @@ spec: name: busybox spec: containers: - - command: + - args: - sleep - "3600" image: busybox @@ -25,6 +28,6 @@ spec: name: source-data restartPolicy: Always volumes: - - gitRepo: - repository: https://github.com/appscode/stash-data.git - name: source-data + - name: source-data + configMap: + name: stash-sample-data diff --git a/docs/examples/tutorial/restic.yaml b/docs/examples/backup/restic.yaml similarity index 72% rename from docs/examples/tutorial/restic.yaml rename to docs/examples/backup/restic.yaml index e76ae9608..5771838d7 100644 --- a/docs/examples/tutorial/restic.yaml +++ b/docs/examples/backup/restic.yaml @@ -1,8 +1,8 @@ apiVersion: stash.appscode.com/v1alpha1 kind: Restic metadata: - name: stash-demo - namespace: default + name: local-restic + namespace: demo spec: selector: matchLabels: @@ -13,9 +13,10 @@ spec: backend: local: mountPath: /safe/data - hostPath: - path: /data/stash-test/restic-repo - storageSecretName: stash-demo + nfs: + server: "nfs-service.storage.svc.cluster.local" + path: "/" + storageSecretName: local-secret schedule: '@every 1m' volumeMounts: - mountPath: /source/data diff --git a/docs/examples/tutorial/restic_offline.yaml b/docs/examples/backup/restic_offline.yaml similarity index 72% rename from docs/examples/tutorial/restic_offline.yaml rename to docs/examples/backup/restic_offline.yaml index fb8289cd8..50b0fc1ac 100644 --- a/docs/examples/tutorial/restic_offline.yaml +++ b/docs/examples/backup/restic_offline.yaml @@ -1,8 +1,8 @@ apiVersion: stash.appscode.com/v1alpha1 kind: Restic metadata: - name: stash-demo - namespace: default + name: offline-restic + namespace: demo spec: selector: matchLabels: @@ -14,9 +14,10 @@ spec: backend: local: mountPath: /safe/data - hostPath: - path: /data/stash-test/restic-repo - storageSecretName: stash-demo + nfs: + server: "nfs-service.storage.svc.cluster.local" + path: "/" + storageSecretName: local-secret schedule: '@every 5m' volumeMounts: - mountPath: /source/data diff --git a/docs/examples/platforms/minio/deployment.yaml b/docs/examples/platforms/minio/deployment.yaml new file mode 100644 index 000000000..4d398cfb9 --- /dev/null +++ b/docs/examples/platforms/minio/deployment.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: stash-demo + name: stash-demo + namespace: demo +spec: + replicas: 1 + selector: + matchLabels: + app: stash-demo + template: + metadata: + labels: + app: stash-demo + name: busybox + spec: + containers: + - args: + - sleep + - "3600" + image: busybox + imagePullPolicy: IfNotPresent + name: busybox + volumeMounts: + - mountPath: /source/data + name: source-data + restartPolicy: Always + volumes: + - name: source-data + configMap: + name: stash-sample-data diff --git a/docs/examples/platforms/minio/pvc.yaml b/docs/examples/platforms/minio/pvc.yaml new file mode 100644 index 000000000..42110cc4f --- /dev/null +++ b/docs/examples/platforms/minio/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: stash-recovered + namespace: demo + labels: + app: stash-demo +spec: + storageClassName: standard + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Mi diff --git a/docs/examples/backends/rook/restored-deployment.yaml b/docs/examples/platforms/minio/recovered-deployment.yaml similarity index 80% rename from docs/examples/backends/rook/restored-deployment.yaml rename to docs/examples/platforms/minio/recovered-deployment.yaml index f8c169d0d..8c78b17f2 100644 --- a/docs/examples/backends/rook/restored-deployment.yaml +++ b/docs/examples/platforms/minio/recovered-deployment.yaml @@ -1,12 +1,15 @@ -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: labels: app: stash-demo name: stash-demo - namespace: default + namespace: demo spec: replicas: 1 + selector: + matchLabels: + app: stash-demo template: metadata: labels: @@ -27,4 +30,4 @@ spec: volumes: - name: source-data persistentVolumeClaim: - claimName: stash-recovered \ No newline at end of file + claimName: stash-recovered diff --git a/docs/examples/tutorial/recovery.yaml b/docs/examples/platforms/minio/recovery.yaml similarity index 61% rename from docs/examples/tutorial/recovery.yaml rename to docs/examples/platforms/minio/recovery.yaml index 86ca120ca..caafaba50 100644 --- a/docs/examples/tutorial/recovery.yaml +++ b/docs/examples/platforms/minio/recovery.yaml @@ -1,15 +1,15 @@ apiVersion: stash.appscode.com/v1alpha1 kind: Recovery metadata: - name: stash-demo - namespace: default + name: minio-recovery + namespace: demo spec: repository: name: deployment.stash-demo - namespace: default + namespace: demo paths: - /source/data recoveredVolumes: - mountPath: /source/data - hostPath: - path: /data/stash-test/restic-restored \ No newline at end of file + persistentVolumeClaim: + claimName: stash-recovered diff --git a/docs/examples/platforms/minio/restic.yaml b/docs/examples/platforms/minio/restic.yaml new file mode 100644 index 000000000..0c989ae8c --- /dev/null +++ b/docs/examples/platforms/minio/restic.yaml @@ -0,0 +1,26 @@ +apiVersion: stash.appscode.com/v1alpha1 +kind: Restic +metadata: + name: minio-restic + namespace: demo +spec: + selector: + matchLabels: + app: stash-demo + fileGroups: + - path: /source/data + retentionPolicyName: 'keep-last-5' + backend: + s3: + endpoint: 'https://minio.storage.svc' + bucket: stash-repo + prefix: demo + storageSecretName: minio-secret + schedule: '@every 1m' + volumeMounts: + - mountPath: /source/data + name: source-data + retentionPolicies: + - name: 'keep-last-5' + keepLast: 5 + prune: true diff --git a/docs/examples/platforms/rook/deployment.yaml b/docs/examples/platforms/rook/deployment.yaml new file mode 100644 index 000000000..4d398cfb9 --- /dev/null +++ b/docs/examples/platforms/rook/deployment.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: stash-demo + name: stash-demo + namespace: demo +spec: + replicas: 1 + selector: + matchLabels: + app: stash-demo + template: + metadata: + labels: + app: stash-demo + name: busybox + spec: + containers: + - args: + - sleep + - "3600" + image: busybox + imagePullPolicy: IfNotPresent + name: busybox + volumeMounts: + - mountPath: /source/data + name: source-data + restartPolicy: Always + volumes: + - name: source-data + configMap: + name: stash-sample-data diff --git a/docs/examples/platforms/rook/recovered-deployment.yaml b/docs/examples/platforms/rook/recovered-deployment.yaml new file mode 100644 index 000000000..8c78b17f2 --- /dev/null +++ b/docs/examples/platforms/rook/recovered-deployment.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: stash-demo + name: stash-demo + namespace: demo +spec: + replicas: 1 + selector: + matchLabels: + app: stash-demo + template: + metadata: + labels: + app: stash-demo + name: busybox + spec: + containers: + - args: + - sleep + - "3600" + image: busybox + imagePullPolicy: IfNotPresent + name: busybox + volumeMounts: + - mountPath: /source/data + name: source-data + restartPolicy: Always + volumes: + - name: source-data + persistentVolumeClaim: + claimName: stash-recovered diff --git a/docs/examples/backends/rook/rook-recovery.yaml b/docs/examples/platforms/rook/recovery.yaml similarity index 86% rename from docs/examples/backends/rook/rook-recovery.yaml rename to docs/examples/platforms/rook/recovery.yaml index 99e4c7a22..20151e7e2 100644 --- a/docs/examples/backends/rook/rook-recovery.yaml +++ b/docs/examples/platforms/rook/recovery.yaml @@ -2,11 +2,11 @@ apiVersion: stash.appscode.com/v1alpha1 kind: Recovery metadata: name: rook-recovery - namespace: default + namespace: demo spec: repository: name: deployment.stash-demo - namespace: default + namespace: demo paths: - /source/data recoveredVolumes: diff --git a/docs/examples/backends/rook/rook-restic.yaml b/docs/examples/platforms/rook/restic.yaml similarity index 55% rename from docs/examples/backends/rook/rook-restic.yaml rename to docs/examples/platforms/rook/restic.yaml index 5e3694703..7dc63bc0c 100644 --- a/docs/examples/backends/rook/rook-restic.yaml +++ b/docs/examples/platforms/rook/restic.yaml @@ -2,20 +2,20 @@ apiVersion: stash.appscode.com/v1alpha1 kind: Restic metadata: name: rook-restic - namespace: default + namespace: demo spec: selector: matchLabels: - app: stash-demo # Must match with the label of busybox pod we have created before. + app: stash-demo # Must match with the label of pod we want to backup. fileGroups: - path: /source/data retentionPolicyName: 'keep-last-5' backend: s3: - endpoint: 'http://rook-ceph-rgw-my-store.rook' # Use your own rook object storage end point. + endpoint: 'http://rook-ceph-rgw-my-store.rook-ceph.svc' # Use your own rook object storage endpoint. bucket: stash-backup # Give a name of the bucket where you want to backup. - prefix: demo # . Path prefix into bucket where repository will be created.(optional). - storageSecretName: rook-restic-secret + prefix: demo # A prefix for the directory where repository will be created.(optional). + storageSecretName: rook-secret schedule: '@every 1m' volumeMounts: - mountPath: /source/data @@ -23,4 +23,4 @@ spec: retentionPolicies: - name: 'keep-last-5' keepLast: 5 - prune: true \ No newline at end of file + prune: true diff --git a/docs/examples/platforms/rook/rook-pvc.yaml b/docs/examples/platforms/rook/rook-pvc.yaml new file mode 100644 index 000000000..9a515898e --- /dev/null +++ b/docs/examples/platforms/rook/rook-pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: stash-recovered + namespace: demo + labels: + app: stash-demo +spec: + storageClassName: rook-ceph-block + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Mi diff --git a/docs/examples/backends/rook/rook-pvc.yaml b/docs/examples/recovery/pvc.yaml similarity index 72% rename from docs/examples/backends/rook/rook-pvc.yaml rename to docs/examples/recovery/pvc.yaml index 8387b903f..a9d5088e3 100644 --- a/docs/examples/backends/rook/rook-pvc.yaml +++ b/docs/examples/recovery/pvc.yaml @@ -2,12 +2,13 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: name: stash-recovered + namespace: demo labels: app: stash-demo spec: - storageClassName: rook-block + storageClassName: standard accessModes: - ReadWriteOnce resources: requests: - storage: 2Gi \ No newline at end of file + storage: 50Mi \ No newline at end of file diff --git a/docs/examples/recovery/recovered-deployment.yaml b/docs/examples/recovery/recovered-deployment.yaml new file mode 100644 index 000000000..8c78b17f2 --- /dev/null +++ b/docs/examples/recovery/recovered-deployment.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: stash-demo + name: stash-demo + namespace: demo +spec: + replicas: 1 + selector: + matchLabels: + app: stash-demo + template: + metadata: + labels: + app: stash-demo + name: busybox + spec: + containers: + - args: + - sleep + - "3600" + image: busybox + imagePullPolicy: IfNotPresent + name: busybox + volumeMounts: + - mountPath: /source/data + name: source-data + restartPolicy: Always + volumes: + - name: source-data + persistentVolumeClaim: + claimName: stash-recovered diff --git a/docs/examples/recovery/recovery-specific-snapshot.yaml b/docs/examples/recovery/recovery-specific-snapshot.yaml new file mode 100644 index 000000000..58d60d94a --- /dev/null +++ b/docs/examples/recovery/recovery-specific-snapshot.yaml @@ -0,0 +1,16 @@ +apiVersion: stash.appscode.com/v1alpha1 +kind: Recovery +metadata: + name: local-recovery-specific-snapshot + namespace: demo +spec: + repository: + name: deployment.stash-demo + namespace: demo + snapshot: deployment.stash-demo-baff6c47 + paths: + - /source/data + recoveredVolumes: + - mountPath: /source/data + persistentVolumeClaim: + claimName: stash-recovered diff --git a/docs/examples/tutorial/recovery-specific-snapshot.yaml b/docs/examples/recovery/recovery.yaml similarity index 53% rename from docs/examples/tutorial/recovery-specific-snapshot.yaml rename to docs/examples/recovery/recovery.yaml index b50e2cf8a..2719dbb85 100644 --- a/docs/examples/tutorial/recovery-specific-snapshot.yaml +++ b/docs/examples/recovery/recovery.yaml @@ -1,16 +1,15 @@ apiVersion: stash.appscode.com/v1alpha1 kind: Recovery metadata: - name: stash-demo - namespace: default + name: local-recovery + namespace: demo spec: repository: name: deployment.stash-demo - namespace: default - snapshot: deployment.stash-demo-d3050010 + namespace: demo paths: - /source/data recoveredVolumes: - mountPath: /source/data - hostPath: - path: /data/stash-test/restic-restored \ No newline at end of file + persistentVolumeClaim: + claimName: stash-recovered diff --git a/docs/examples/tutorial/secret.yaml b/docs/examples/tutorial/secret.yaml deleted file mode 100644 index 7ba8e30ae..000000000 --- a/docs/examples/tutorial/secret.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -data: - RESTIC_PASSWORD: Y2hhbmdlaXQ= -kind: Secret -metadata: - name: stash-demo - namespace: default -type: Opaque diff --git a/docs/guides/README.md b/docs/guides/README.md index 691fd041c..baf2d872b 100644 --- a/docs/guides/README.md +++ b/docs/guides/README.md @@ -26,5 +26,5 @@ Guides show you how to perform tasks with Stash. - Learn to use Stash with self-hosted TLS secured Minio Server [here](/docs/guides/minio_server.md). - Learn to use Stash in Rook Storage System [here](/docs/guides/rook.md) - See working examples for supported workload types [here](/docs/guides/workloads.md). -- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring.md). +- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring/overview.md). - Learn about how to configure [RBAC roles](/docs/guides/rbac.md). diff --git a/docs/guides/backends/_index.md b/docs/guides/backends/_index.md index c7cd9d8f9..597976ef0 100644 --- a/docs/guides/backends/_index.md +++ b/docs/guides/backends/_index.md @@ -5,6 +5,6 @@ menu: identifier: backend name: Supported Backends parent: guides - weight: 10 + weight: 30 menu_name: product_stash_0.7.0 ---- \ No newline at end of file +--- diff --git a/docs/guides/backends/local.md b/docs/guides/backends/local.md index 618b77404..2778217c6 100644 --- a/docs/guides/backends/local.md +++ b/docs/guides/backends/local.md @@ -61,7 +61,9 @@ Following parameters are available for `Local` backend. | `local.subPath` | `Optional`. Sub-path inside the referenced volume instead of its root. | | `local.VolumeSource` | `Required`. Any Kubernetes volume. Can be specified inlined. Example: `hostPath` | -Below, the YAML for Restic crd configured to use Local backend. +**Sample Restic for `hostPath` as Backend :** + +Below, the YAML for Restic crd configured to use `hostPath` as Local backend. ```yaml apiVersion: stash.appscode.com/v1alpha1 @@ -92,14 +94,55 @@ spec: prune: true ``` -Now, create the Restic we have configured above for `local` backend, +Now, create the `Restic` we have configured above for `local` backend, + +```console +$ kubectl apply -f ./docs/examples/backends/local/local-restic-hostPath.yaml +restic "local-restic" created +``` + +**Sample Restic for `NFS` Server as Backend :** + +Below, the YAML for Restic crd configured to use a `NFS` server as Local backend. + +```yaml +apiVersion: stash.appscode.com/v1alpha1 +kind: Restic +metadata: + name: local-restic + namespace: default +spec: + selector: + matchLabels: + app: stash-demo + fileGroups: + - path: /source/data + retentionPolicyName: 'keep-last-5' + backend: + local: + mountPath: /safe/data + nfs: + server: "nfs-service.storage.svc.cluster.local" # use you own NFS server address + path: "/" # this path is relative to "/exports" path of NFS server + storageSecretName: local-secret + schedule: '@every 1m' + volumeMounts: + - mountPath: /source/data + name: source-data + retentionPolicies: + - name: 'keep-last-5' + keepLast: 5 + prune: true +``` + +Now, create the `Restic` we have configured above for `local` backend, ```console -$ kubectl apply -f ./docs/examples/backends/local/local-restic.yaml +$ kubectl apply -f ./docs/examples/backends/local/local-restic-nfs.yaml restic "local-restic" created ``` ## Next Steps - Learn how to use Stash to backup a Kubernetes deployment from [here](/docs/guides/backup.md). -- Learn how to recover from backed up snapshot from [here](/docs/guides/restore.md). \ No newline at end of file +- To learn how to recover from backed up snapshot, visit [here](/docs/guides/restore.md). diff --git a/docs/guides/backup.md b/docs/guides/backup.md index 15af7949e..e2444032f 100644 --- a/docs/guides/backup.md +++ b/docs/guides/backup.md @@ -14,29 +14,81 @@ section_menu_id: guides > New to Stash? Please start [here](/docs/concepts/README.md). -# Backup +# Backup Volumes using Stash -This tutorial will show you how to use Stash to backup a Kubernetes deployment. At first, you need to have a Kubernetes cluster, -and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, -you can create one by using [Minikube](https://github.com/kubernetes/minikube). Now, install Stash in your cluster following the steps [here](/docs/setup/install.md). +This tutorial will show you how to use Stash to back up a Kubernetes volume. Here, we are going to backup the `/source/data` folder of a busybox pod into a [NFS](https://kubernetes.io/docs/concepts/storage/volumes/#nfs) volume. NFS volume is configured as a [Local](/docs/guides/backends/local.md) bakend of Stash. -In this tutorial, we are going to backup the `/source/data` folder of a `busybox` pod into a local backend. First deploy the following `busybox` Deployment in your cluster. Here we are using a git repository as source volume for demonstration purpose. +## Before You Begin + +At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [Minikube](https://github.com/kubernetes/minikube). + +- Install `Stash` in your cluster following the steps [here](/docs/setup/install.md). + +- You should be familiar with the following Stash concepts: + + - [Restic](/docs/concepts/crds/restic.md) + - [Repository](/docs/concepts/crds/repository.md) + - [Snapshot](/docs/concepts/crds/snapshot.md) + +- You will need an NFS server to store backed up data. If you already do not have an NFS server running, deploy one following the tutorial from [here](https://github.com/appscode/third-party-tools/blob/master/storage/nfs/README.md). For this tutorial, we have deployed NFS server in `storage` namespace and it is accessible through `nfs-service.storage.svc.cluster.local` dns. + +To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. ```console -$ kubectl apply -f ./docs/examples/tutorial/busybox.yaml -deployment "stash-demo" created +$ kubectl create ns demo +namespace/demo created ``` +>Note: YAML files used in this tutorial are stored in [/docs/examples/backup](/docs/examples/backup) directory of [appscode/stash](https://github.com/appscode/stash) repository. + +## Overview + +The following diagram shows how Stash takes backup of a Kubernetes volume. Open the image in a new tab to see the enlarged image. + +

+  Stash Backup Flow +

+ +The backup process consists of the following steps: + +1. At first, a user creates a `Secret`. This secret holds the credentials to access the backend where backed up data will be stored. It also holds a password (`RESTIC_PASSWORD`) that will be used to encrypt the backed up data. +2. Then, the user creates a `Restic` crd which specifies the targeted workload for backup. It also specifies the backend information where the backed up data will be stored. +3. Stash operator watches for `Restic` crd. Once, it found a `Restic` crd, it identifies the targeted workloads that match `Restic`'s selector. +4. Then, Stash operator injects a sidecar container named `stash` and mounts the target volume into it. +5. Finally, `stash` sidecar container takes periodic backup of the volume to specified backend. It also creates a `Repository` crd in first backup which represents the original repository in the backend in Kubernetes native way. + +## Backup + +In order to take back up, we need some sample data. Stash has some sample data in [appscode/stash-data](https://github.com/appscode/stash-data) repository. As [gitRepo](https://kubernetes.io/docs/concepts/storage/volumes/#gitrepo) volume has been deprecated, we are not going to use this repository volume directly. Instead, we are going to create a [configMap](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) from these data and use that ConfigMap as the data source. + +Let's create a ConfigMap from these sample data, + +```console +$ kubectl create configmap -n demo stash-sample-data \ + --from-literal=LICENSE="$(curl -fsSL https://raw.githubusercontent.com/appscode/stash-data/master/LICENSE)" \ + --from-literal=README.md="$(curl -fsSL https://raw.githubusercontent.com/appscode/stash-data/master/README.md)" +configmap/stash-sample-data created +``` + +**Deploy Workload:** + +Now, deploy the following Deployment. Here, we have mounted the ConfigMap `stash-sample-data` as data source volume. + +Below, the YAML for the Deployment we are going to create. + ```yaml -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: labels: app: stash-demo name: stash-demo - namespace: default + namespace: demo spec: replicas: 1 + selector: + matchLabels: + app: stash-demo template: metadata: labels: @@ -44,7 +96,7 @@ spec: name: busybox spec: containers: - - command: + - args: - sleep - "3600" image: busybox @@ -55,58 +107,87 @@ spec: name: source-data restartPolicy: Always volumes: - - gitRepo: - repository: https://github.com/appscode/stash-data.git - name: source-data + - name: source-data + configMap: + name: stash-sample-data ``` -Run the following command to confirm that `busybox` pods are running. +Let's create the deployment we have shown above, ```console -$ kubectl get pods -l app=stash-demo -NAME READY STATUS RESTARTS AGE -stash-demo-788ffcf9c6-6t6lj 1/1 Running 0 12s +$ kubectl apply -f ./docs/examples/backup/deployment.yaml +deployment.apps/stash-demo created ``` -Now, create a `Secret` that contains the key `RESTIC_PASSWORD`. This will be used as the password for your restic repository. +Now, wait for deployment's pod to go into `Running` state. ```console -$ kubectl create secret generic stash-demo --from-literal=RESTIC_PASSWORD=changeit -secret "stash-demo" created +$ kubectl get pod -n demo -l app=stash-demo +NAME READY STATUS RESTARTS AGE +stash-demo-7ccd56bf5d-4x27d 1/1 Running 0 21s ``` -You can check that the secret was created like this: +You can check that the `/source/data/` directory of this pod is populated with data from the `stash-sample-data` ConfigMap using this command, -```yaml -$ kubectl get secret stash-demo -o yaml +```console +$ kubectl exec -n demo stash-demo-7ccd56bf5d-4x27d -- ls -R /source/data +/source/data: +LICENSE +README.md +``` + +Now, we are ready to backup `/source/data` directory into an NFS backend. +**Create Secret:** + +At first, we need to create a storage secret. To configure this backend, the following secret keys are needed: + +| Key | Description | +| ----------------- | ---------------------------------------------------------- | +| `RESTIC_PASSWORD` | `Required`. Password used to encrypt snapshots by `restic` | + +Create the secret as below, + +```console +$ echo -n 'changeit' > RESTIC_PASSWORD +$ kubectl create secret generic -n demo local-secret \ + --from-file=./RESTIC_PASSWORD +secret/local-secret created +``` + +Verify that the secret has been created successfully, + +```console +$ kubectl get secret -n demo local-secret -o yaml +``` + +```yaml apiVersion: v1 data: RESTIC_PASSWORD: Y2hhbmdlaXQ= kind: Secret metadata: - creationTimestamp: 2017-12-04T05:24:22Z - name: stash-demo - namespace: default - resourceVersion: "22328" - selfLink: /api/v1/namespaces/default/secrets/stash-demo - uid: 62aa8ef8-d8b3-11e7-be92-0800277f19c0 + creationTimestamp: 2018-12-07T06:04:56Z + name: local-secret + namespace: demo + resourceVersion: "6049" + selfLink: /api/v1/namespaces/demo/secrets/local-secret + uid: 05a8d2a3-f9e6-11e8-8905-0800277ca39d type: Opaque ``` -Now, create a `Restic` CRD with selectors matching the labels of the `busybox` Deployment. +**Create Restic:** -```console -$ kubectl apply -f ./docs/examples/tutorial/restic.yaml -restic "stash-demo" created -``` +Now, we are going to create a `Restic` crd to back up `/source/data` directory of `stash-demo` deployment. This will create a repository in the directory of NFS server specified by `local.nfs.path` field and start taking periodic backup of `/source/data` directory. + +Below, the YAML for Restic crd we are going to create, ```yaml apiVersion: stash.appscode.com/v1alpha1 kind: Restic metadata: - name: stash-demo - namespace: default + name: local-restic + namespace: demo spec: selector: matchLabels: @@ -117,9 +198,10 @@ spec: backend: local: mountPath: /safe/data - hostPath: - path: /data/stash-test/restic-repo - storageSecretName: stash-demo + nfs: + server: "nfs-service.storage.svc.cluster.local" + path: "/" + storageSecretName: local-secret schedule: '@every 1m' volumeMounts: - mountPath: /source/data @@ -132,226 +214,261 @@ spec: Here, - - `spec.selector` is used to select workloads upon which this `Restic` configuration will be applied. `Restic` always selects workloads in the same Kubernetes namespace. In this tutorial, labels of `busybox` Deployment match this `Restic`'s selectors. If multiple `Restic` objects are matched to a given workload, Stash operator will error out and avoid adding sidecar container. + - `spec.selector` is used to select workloads upon which this `Restic` configuration will be applied. `Restic` always selects workloads in the same Kubernetes namespace. In this tutorial, labels of `stash-demo` Deployment match this `Restic`'s selectors. If multiple `Restic` objects are matched to a given workload, Stash operator will error out and avoid adding sidecar container. - `spec.retentionPolicies` defines an array of retention policies, which can be used in `fileGroups` using `retentionPolicyName`. - `spec.fileGroups` indicates an array of local paths that will be backed up using restic. For each path, users can also specify the retention policy for old snapshots using `retentionPolicyName`, which must be defined in `spec.retentionPolicies`. Here, we are backing up the `/source/data` folder and only keeping the last 5 snapshots. - - `spec.backend.local` indicates that restic will store the snapshots in a local path `/safe/data`. For the purpose of this tutorial, we are using an `hostPath` to store the snapshots. But any Kubernetes volume that can be mounted locally can be used as a backend (example, NFS, Ceph, etc). Stash can also store snapshots in cloud storage solutions like S3, GCS, Azure, etc. To use a remote backend, you need to configure the storage secret to include your cloud provider credentials and set one of `spec.backend.(s3|gcs|azure|swift|b2)`. Please visit [here](/docs/guides/backends.md) for more detailed examples. + - `spec.backend.local` indicates that restic will store the snapshots in a local path `/safe/data`. For the purpose of this tutorial, we are using an `NFS` server to store the snapshots. But any Kubernetes volume that can be mounted locally can be used as a backend (i.e. `hostPath`, `Ceph` etc). Stash can also store snapshots in cloud storage solutions like S3, GCS, Azure, etc. To use a remote backend, you need to configure the storage secret to include your cloud provider credentials and set one of `spec.backend.(s3|gcs|azure|swift|b2)`. Please visit [here](/docs/guides/backends/overview.md) for more detailed examples. - `spec.backend.storageSecretName` points to the Kubernetes secret created earlier in this tutorial. `Restic` always points to secrets in its own namespace. This secret is used to pass restic repository password and other cloud provider secrets to `restic` binary. - `spec.schedule` is a [cron expression](https://github.com/robfig/cron/blob/v2/doc.go#L26) that indicates that file groups will be backed up every 1 minute. - `spec.volumeMounts` refers to volumes to be mounted in `stash` sidecar to get access to fileGroup path `/source/data`. -Stash operator watches for `Restic` objects using Kubernetes api. Stash operator will notice that the `busybox` Deployment matches the selector for `stash-demo` Restic object. So, it will add a sidecar container named `stash` to `busybox` Deployment and restart the running `busybox` pods. Since a local backend is used in `stash-demo` Restic, sidecar container will mount the corresponding persistent volume. +Let's create the `Restic` we have shown above, ```console -$ kubectl get pods -l app=stash-demo -NAME READY STATUS RESTARTS AGE -stash-demo-788ffcf9c6-6t6lj 0/1 Terminating 0 3m -stash-demo-79554ff97b-wsdx2 2/2 Running 0 49s +$ kubectl apply -f ./docs/examples/backup/restic.yaml +restic.stash.appscode.com/local-restic created ``` -```yaml -$ kubectl get deployment stash-demo -o yaml +If everything goes well, Stash will inject a sidecar container into the `stash-demo` deployment to take periodic backup. Let's check that sidecar has been injected successfully, -apiVersion: extensions/v1beta1 -kind: Deployment +```console +$ kubectl get pod -n demo -l app=stash-demo +NAME READY STATUS RESTARTS AGE +stash-demo-7ffdb5d7fd-5x8l6 2/2 Running 0 37s +``` + +Look at the pod. It now has 2 containers. If you view the resource definition of this pod, you will see that there is a container named `stash` which running `backup` command. + +```console +$ kubectl get pod -n demo stash-demo-7ffdb5d7fd-5x8l6 -o yaml +``` + +```yaml +apiVersion: v1 +kind: Pod metadata: annotations: - deployment.kubernetes.io/revision: "2" - restic.appscode.com/last-applied-configuration: | - {"kind":"Restic","apiVersion":"stash.appscode.com/v1alpha1","metadata":{"name":"stash-demo","namespace":"default","selfLink":"/apis/stash.appscode.com/v1alpha1/namespaces/default/restics/stash-demo","uid":"d8768901-d8b9-11e7-be92-0800277f19c0","resourceVersion":"27379","creationTimestamp":"2017-12-04T06:10:37Z"},"spec":{"selector":{"matchLabels":{"app":"stash-demo"}},"fileGroups":[{"path":"/source/data","retentionPolicyName":"keep-last-5"}],"backend":{"storageSecretName":"stash-demo","local":{"volumeSource":{"hostPath":{"path":"/data/stash-test/restic-repo"}},"path":"/safe/data"}},"schedule":"@every 1m","volumeMounts":[{"name":"source-data","mountPath":"/source/data"}],"resources":{},"retentionPolicies":[{"name":"keep-last-5","keepLast":5,"prune":true}]},"status":{}} - restic.appscode.com/tag: canary - creationTimestamp: 2017-12-04T06:08:55Z - generation: 2 + restic.appscode.com/resource-hash: "7515193209300432018" + creationTimestamp: 2018-12-07T06:23:00Z + generateName: stash-demo-7ffdb5d7fd- labels: app: stash-demo - name: stash-demo - namespace: default - resourceVersion: "27401" - selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/stash-demo - uid: 9c2bf209-d8b9-11e7-be92-0800277f19c0 + pod-template-hash: 7ffdb5d7fd + name: stash-demo-7ffdb5d7fd-5x8l6 + namespace: demo + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: stash-demo-7ffdb5d7fd + uid: 8bbc5b0e-f9e8-11e8-8905-0800277ca39d + resourceVersion: "7496" + selfLink: /api/v1/namespaces/demo/pods/stash-demo-7ffdb5d7fd-5x8l6 + uid: 8bc19dc8-f9e8-11e8-8905-0800277ca39d spec: - progressDeadlineSeconds: 600 - replicas: 1 - revisionHistoryLimit: 2 - selector: - matchLabels: - app: stash-demo - strategy: - rollingUpdate: - maxSurge: 25% - maxUnavailable: 25% - type: RollingUpdate - template: - metadata: - creationTimestamp: null - labels: - app: stash-demo - name: busybox - spec: - containers: - - command: - - sleep - - "3600" - image: busybox - imagePullPolicy: IfNotPresent - name: busybox - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /source/data - name: source-data - - args: - - backup - - --restic-name=stash-demo - - --workload-kind=Deployment - - --workload-name=stash-demo - - --run-via-cron=true - - --v=3 - env: - - name: NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - image: appscode/stash:0.7.0 - imagePullPolicy: IfNotPresent - name: stash - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /tmp - name: stash-scratchdir - - mountPath: /etc/stash - name: stash-podinfo - - mountPath: /source/data - name: source-data - readOnly: true - - mountPath: /safe/data - name: stash-local - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - terminationGracePeriodSeconds: 30 - volumes: - - gitRepo: - repository: https://github.com/appscode/stash-data.git - name: source-data - - emptyDir: {} - name: stash-scratchdir - - downwardAPI: - defaultMode: 420 - items: - - fieldRef: - apiVersion: v1 - fieldPath: metadata.labels - path: labels - name: stash-podinfo - - hostPath: - path: /data/stash-test/restic-repo - type: "" - name: stash-local + containers: + - args: + - sleep + - "3600" + image: busybox + imagePullPolicy: IfNotPresent + name: busybox + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /source/data + name: source-data + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-6dqgm + readOnly: true + - args: + - backup + - --restic-name=local-restic + - --workload-kind=Deployment + - --workload-name=stash-demo + - --docker-registry=appscodeci + - --image-tag=e3 + - --run-via-cron=true + - --pushgateway-url=http://stash-operator.kube-system.svc:56789 + - --enable-status-subresource=true + - --use-kubeapiserver-fqdn-for-aks=true + - --enable-analytics=true + - --enable-rbac=true + - --logtostderr=true + - --alsologtostderr=false + - --v=3 + - --stderrthreshold=0 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: APPSCODE_ANALYTICS_CLIENT_ID + value: 90b12fedfef2068a5f608219d5e7904a + image: appscodeci/stash:e3 + imagePullPolicy: IfNotPresent + name: stash + resources: {} + securityContext: + procMount: Default + runAsUser: 0 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /tmp + name: stash-scratchdir + - mountPath: /etc/stash + name: stash-podinfo + - mountPath: /source/data + name: source-data + readOnly: true + - mountPath: /safe/data + name: stash-local + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-6dqgm + readOnly: true + dnsPolicy: ClusterFirst + nodeName: minikube + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - configMap: + defaultMode: 420 + name: stash-sample-data + name: source-data + - emptyDir: {} + name: stash-scratchdir + - downwardAPI: + defaultMode: 420 + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.labels + path: labels + name: stash-podinfo + - name: stash-local + nfs: + path: / + server: nfs-service.storage.svc.cluster.local + - name: default-token-6dqgm + secret: + defaultMode: 420 + secretName: default-token-6dqgm status: conditions: - - lastTransitionTime: 2017-12-04T06:10:37Z - lastUpdateTime: 2017-12-04T06:10:37Z - message: Deployment does not have minimum availability. - reason: MinimumReplicasUnavailable - status: "False" - type: Available - - lastTransitionTime: 2017-12-04T06:08:55Z - lastUpdateTime: 2017-12-04T06:10:37Z - message: ReplicaSet "stash-demo-79554ff97b" is progressing. - reason: ReplicaSetUpdated + - lastProbeTime: null + lastTransitionTime: 2018-12-07T06:23:00Z + status: "True" + type: Initialized + - lastProbeTime: null + lastTransitionTime: 2018-12-07T06:23:02Z status: "True" - type: Progressing - observedGeneration: 2 - replicas: 2 - unavailableReplicas: 2 - updatedReplicas: 1 + type: Ready + - lastProbeTime: null + lastTransitionTime: 2018-12-07T06:23:02Z + status: "True" + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: 2018-12-07T06:23:00Z + status: "True" + type: PodScheduled + containerStatuses: + - containerID: docker://ba9282c73548f2c7e9e34313198c17814cfceaa60f2712547dfd8bcb40f8d4dc + image: busybox:latest + imageID: docker-pullable://busybox@sha256:2a03a6059f21e150ae84b0973863609494aad70f0a80eaeb64bddd8d92465812 + lastState: {} + name: busybox + ready: true + restartCount: 0 + state: + running: + startedAt: 2018-12-07T06:23:01Z + - containerID: docker://81afe30d602fa1a39d33bef894d7f4c67386d4c2a5c09afcfb8d1f10c6f63bf5 + image: appscodeci/stash:e3 + imageID: docker-pullable://appscodeci/stash@sha256:1e965663d00280a14cebb926f29d95547b746e6060c9aaaef649664f4600ffbe + lastState: {} + name: stash + ready: true + restartCount: 0 + state: + running: + startedAt: 2018-12-07T06:23:01Z + hostIP: 10.0.2.15 + phase: Running + podIP: 172.17.0.7 + qosClass: BestEffort + startTime: 2018-12-07T06:23:00Z ``` -Now, wait until the deployment is in running state. Once the pod with sidecar is in running state, it will create a `Repository` CRD for this deployment with name `deployment.stash-demo`. Check the `Repository` CRD object has been created successfully by, +**Verify Backup:** -```console -$ kubectl get repository -l workload-name=stash-demo -NAME AGE -deployment.stash-demo 3m -``` -`Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup is taking successfully by, +Stash will create a `Repository` crd with name `deployment.stash-demo` for the respective repository in local backend at first backup schedule. To verify, run the following command, -```console -$ kubectl get snapshots -l repository=deployment.stash-demo -NAME AGE -deployment.stash-demo-c1014ca6 10s +```console +$ kubectl get repository deployment.stash-demo -n demo +NAME BACKUPCOUNT LASTSUCCESSFULBACKUP AGE +deployment.stash-demo 4 23s 4m ``` -Here, `deployment.stash-demo-c1014ca6` represents the name of the successful backup [Snapshot](/docs/concepts/crds/snapshot.md) taken by Stash in `deployment.stash-demo` repository. +Here, `BACKUPCOUNT` field indicates the number of backup snapshots has taken in this repository. - You can also check the `status.backupCount` of `deployment.stash-demo` Repository CRD to see number of successful backup taken in this repository. +`Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup snapshots have been created successfully by running the following command: ```console -$ kubectl get repository deployment.stash-demo -o yaml +$ kubectl get snapshots -n demo -l repository=deployment.stash-demo +NAME AGE +deployment.stash-demo-9a6e6b78 3m18s +deployment.stash-demo-2da5b6bc 2m18s +deployment.stash-demo-0f89f60e 78s +deployment.stash-demo-f9c704e4 18s ``` -```yaml -apiVersion: stash.appscode.com/v1alpha1 -kind: Repository -metadata: - clusterName: "" - creationTimestamp: 2018-03-29T08:28:10Z - generation: 0 - labels: - restic: stash-demo - workload-kind: Deployment - workload-name: stash-demo - name: deployment.stash-demo - namespace: default - resourceVersion: "999" - selfLink: /apis/stash.appscode.com/v1alpha1/namespaces/default/repositories/deployment.stash-demo - uid: 1d814fdb-332b-11e8-94e6-08002792cb23 -spec: - backend: - local: - hostPath: - path: /data/stash-test/restic-repo - mountPath: /safe/data - storageSecretName: local-secret - backupPath: deployment/stash-demo -status: - backupCount: 1 - firstBackupTime: 2018-03-29T08:29:10Z - lastBackupDuration: 2.105757874s - lastBackupTime: 2018-03-29T08:29:10Z -``` +Here, we can see 4 last successful backup [Snapshot](/docs/concepts/crds/snapshot.md) taken by Stash in `deployment.stash-demo` repository. ## Disable Backup -To stop Restic from taking backup, you can do following things: -* Set `spec.paused: true` in Restic `yaml` and then apply the update. This means: +To stop Stash from taking backup, you can do following things: + +- Set `spec.paused: true` in Restic `yaml` and then apply the update. +This means: - Paused Restic CRDs will not applied to newly created workloads. - Stash sidecar containers will not be removed from existing workloads but the sidecar will stop taking backup. ```command -$ kubectl patch restic stash-demo --type="merge" --patch='{"spec": {"paused": true}}' -restic "stash-demo" patched +$ kubectl patch restic -n demo local-restic --type="merge" --patch='{"spec": {"paused": true}}' +restic.stash.appscode.com/local-restic patched ``` ```yaml apiVersion: stash.appscode.com/v1alpha1 kind: Restic metadata: - name: stash-demo - namespace: default + name: local-restic + namespace: demo spec: selector: matchLabels: @@ -362,9 +479,10 @@ spec: backend: local: mountPath: /safe/data - hostPath: - path: /data/stash-test/restic-repo - storageSecretName: stash-demo + nfs: + server: "nfs-service.storage.svc.cluster.local" + path: "/" + storageSecretName: local-secret schedule: '@every 1m' paused: true volumeMounts: @@ -376,29 +494,35 @@ spec: prune: true ``` -* Delete the Restic CRD. Stash operator will remove the sidecar container from all matching workloads. +- Delete the Restic CRD. Stash operator will remove the sidecar container from all matching workloads. -```commands -$ kubectl delete restic stash-demo -restic "stash-demo" deleted -``` -* Change the labels of a workload. Stash operator will remove sidecar container from that workload. This way you can selectively stop backup of a Deployment, ReplicaSet etc. + ```commands + $ kubectl delete restic -n demo local-restic + restic.stash.appscode.com/local-restic deleted + ``` + +- Change the labels of a workload. Stash operator will remove sidecar container from that workload. This way you can selectively stop backup of a Deployment, ReplicaSet etc. ### Resume Backup + You can resume Restic to backup by setting `spec.paused: false` in Restic `yaml` and applying the update or you can patch Restic using, + ```command -$ kubectl patch restic stash-demo --type="merge" --patch='{"spec": {"paused": false}}' +$ kubectl patch restic -n demo local-restic --type="merge" --patch='{"spec": {"paused": false}}' +restic.stash.appscode.com/local-restic patched ``` - ## Cleaning up To cleanup the Kubernetes resources created by this tutorial, run: + ```console -$ kubectl delete deployment stash-demo -$ kubectl delete secret stash-demo -$ kubectl delete restic stash-demo -$ kubectl delete repository deployment.stash-demo +$ kubectl delete -n demo deployment stash-demo +$ kubectl delete -n demo secret local-secret +$ kubectl delete -n demo restic local-restic +$ kubectl delete -n demo repository deployment.stash-demo + +$ kubectl delete ns demo ``` If you would like to uninstall Stash operator, please follow the steps [here](/docs/setup/uninstall.md). @@ -411,6 +535,6 @@ If you would like to uninstall Stash operator, please follow the steps [here](/d - To run backup in offline mode see [here](/docs/guides/offline_backup.md) - See the list of supported backends and how to configure them [here](/docs/guides/backends/overview.md). - See working examples for supported workload types [here](/docs/guides/workloads.md). -- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring.md). +- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring/overview.md). - Learn about how to configure [RBAC roles](/docs/guides/rbac.md). - Want to hack on Stash? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/minio_server.md b/docs/guides/minio_server.md deleted file mode 100644 index dd9cdbb3c..000000000 --- a/docs/guides/minio_server.md +++ /dev/null @@ -1,549 +0,0 @@ ---- -title: Minio | Stash -description: Using Stash with TLS secured Minio Server -menu: - product_stash_0.7.0: - identifier: minio-stash - name: Backup to Minio - parent: guides - weight: 45 -product_name: stash -menu_name: product_stash_0.7.0 -section_menu_id: guides ---- - -> New to Stash? Please start [here](/docs/concepts/README.md). - -# Using Stash with TLS secured Minio Server - -Minio is an open source object storage server compatible with Amazon S3 cloud storage service. You can deploy Minio server in docker container locally, in a Kubernetes cluster, Microsoft Azure, GCP etc. You can find a guide for Minio server [here](https://docs.minio.io/). This tutorial will show you how to use [Stash](/docs/concepts/what-is-stash/overview.md) to backup a Kubernetes `Deployment` in a TLS secure [Minio](https://docs.minio.io/) Server. It will also show you how to recover this backed up data. - -## Before You Begin - -At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [Minikube](https://github.com/kubernetes/minikube). Now, install `Stash` in your cluster following the steps [here](/docs/setup/install.md). - -You should have understanding the following Stash terms: - -- [Restic](/docs/concepts/crds/restic.md) -- [Repository](/docs/concepts/crds/repository.md) -- [Recovery](/docs/concepts/crds/recovery.md) -- [Snapshot](/docs/concepts/crds/snapshot.md) - -Then, you will need a TLS secure [Minio](https://docs.minio.io/) server to store backed up data. You can deploy a TLS secure Minio server in your cluster by following this [official guide](https://github.com/minio/minio/tree/master/docs/tls/kubernetes) or these steps below: - -### Create self-signed SSl certificate - -A Certificate is used to verify the identity of server or client. Usually, a certificate issued by trusted third party is used to verify identity. We can also use a self-signed certificate. In this tutorial, we will use a self-signed certificate to verify the identity of Minio server. - -You can generate self-signed certificate easily with our [onessl](https://github.com/kubepack/onessl) tool. - -Here is an example how we can generate a self-signed certificate using `onessl` tool. - -First install onessl by, - -```console -$ curl -fsSL -o onessl curl -fsSL -o onessl https://github.com/kubepack/onessl/releases/download/0.3.0/onessl-linux-amd64 \ - && chmod +x onessl \ - && sudo mv onessl /usr/local/bin/ -``` - -Now generate CA's root certificate, - -```console -$ onessl create ca-cert -``` - -This will create two files `ca.crt` and `ca.key`. - -Now, generate certificate for server, - -```console -$ onessl create server-cert --domains minio-service.default.svc -``` - -This will generate two files `server.crt` and `server.key`. - -Minio server will start TLS secure service if it find `public.crt` and `private.key` files in `/root/.minio/certs/` directory of the docker container. The `public.crt` file is concatenation of `server.crt` and `ca.crt` where `private.key` file is only the `server.key` file. - -Let's generate `public.crt` and `private.key` file, - -```console -$ cat {server.crt,ca.crt} > public.crt -$ cat server.key > private.key -``` - -Be sure about the order of `server.crt` and `ca.crt`. The order will be `server's certificate`, any `intermediate certificates` and finally the `CA's root certificate`. The intermediate certificates are required if the server certificate is created using a certificate which is not the root certificate but signed by the root certificate. [onessl](https://github.com/appscode/onessl) use root certificate by default to generate server certificate if no certificate path is specified by `--cert-dir` flag. Hence, the intermediate certificates are not required here. - -We will create a Kubernetes secret with this `public.crt` and `private.key` files and mount the secret to `/root/.minio/certs/` directory of minio container. - -> Minio server will not trust a self-signed certificate by default. We can mark the self-signed certificate as a trusted certificate by adding `public.crt` file in `/root/.minio/certs/CAs` directory. - -### Create Secret - -Now, let's create a secret from `public.crt` and `private.key` files, - -```console -$ kubectl create secret generic minio-server-secret \ - --from-file=./public.crt \ - --from-file=./private.key -secret "minio-server-secret" created - -$ kubectl label secret minio-server-secret app=minio -n default -``` - -Now, verify that the secret is created successfully - -```console -$ kubectl get secret minio-server-secret -o yaml -``` - -If secret is created successfully then you will see output like this, - -```yaml -apiVersion: v1 -data: - private.key: - public.crt: -kind: Secret -metadata: - creationTimestamp: 2018-01-26T12:02:09Z - name: minio-server-secret - namespace: default - resourceVersion: "40701" - selfLink: /api/v1/namespaces/default/secrets/minio-server-secret - uid: bc57add7-0290-11e8-9a26-080027b344c9 - labels: - app: minio -type: Opaque -``` - -### Create Persistent Volume Claim - -Minio server needs a Persistent Volume to store data. Let's create a `Persistent Volume Claim` to request Persistent Volume from the cluster. - -```console -$ kubectl apply -f ./docs/examples/backends/minio/minio-pvc.yaml -persistentvolumeclaim "minio-pvc" created -``` - -YAML for PersistentVolumeClaim, - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - # This name uniquely identifies the PVC. Will be used in minio deployment. - name: minio-pvc - labels: - app: minio -spec: - storageClassName: standard - accessModes: - - ReadWriteOnce - resources: - # This is the request for storage. Should be available in the cluster. - requests: - storage: 2Gi -``` - -### Create Deployment - -Minio deployment creates pod where the Minio server will run. Let's create a deployment for minio server by, - -```console -$ kubectl apply -f ./docs/examples/backends/minio/minio-deployment.yaml -deployment "minio-deployment" created -``` - -YAML for minio-deployment - -```yaml -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - # This name uniquely identifies the Deployment - name: minio-deployment - labels: - app: minio -spec: - strategy: - type: Recreate # If pod fail, we want to recreate pod rather than restarting it. - template: - metadata: - labels: - # Label is used as a selector in the service. - app: minio-server - spec: - volumes: - # Refer to the PVC have created earlier - - name: storage - persistentVolumeClaim: - # Name of the PVC created earlier - claimName: minio-pvc - - name: minio-certs - secret: - secretName: minio-server-secret - items: - - key: public.crt - path: public.crt - - key: private.key - path: private.key - - key: public.crt - path: CAs/public.crt # mark self signed certificate as trusted - containers: - - name: minio - # Pulls the default Minio image from Docker Hub - image: minio/minio - args: - - server - - --address - - ":443" - - /storage - env: - # Minio access key and secret key - - name: MINIO_ACCESS_KEY - value: "" - - name: MINIO_SECRET_KEY - value: "" - ports: - - containerPort: 443 - # This ensures containers are allocated on separate hosts. Remove hostPort to allow multiple Minio containers on one host - hostPort: 443 - # Mount the volumes into the pod - volumeMounts: - - name: storage # must match the volume name, above - mountPath: "/storage" - - name: minio-certs - mountPath: "/root/.minio/certs" -``` - -### Create Service - -Now, the final touch. Minio server is running in the cluster. Let's create a service so that other pods can access the server. - -```console -$ kubectl apply -f ./docs/examples/backends/minio/minio-service.yaml -service "minio-service" created -``` - -YAML for minio-service - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: minio-service - labels: - app: minio -spec: - type: LoadBalancer - ports: - - port: 443 - targetPort: 443 - protocol: TCP - selector: - app: minio-server # must match with the label used in the deployment -``` - -Verify that the service is created successfully, - -```console -$ kubectl get service minio-service -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -minio-service LoadBalancer 10.106.121.137 443:30722/TCP 49s -``` - -## Overview - -In this tutorial, we are going to backup the `/source/data` folder of a `busybox` pod into a `Minio` backend. Then, we will recover the data to another `HostPath` volume form backed up snapshots. - -## Backup - -First, deploy the following `busybox` Deployment in your cluster. Here we are using a git repository as a source volume for demonstration purpose. - -```console -$ kubectl apply -f ./docs/examples/tutorial/busybox.yaml -deployment "stash-demo" created -``` - -YAML for `busybox` deplyment, - -```yaml -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: stash-demo - name: stash-demo - namespace: default -spec: - replicas: 1 - template: - metadata: - labels: - app: stash-demo - name: busybox - spec: - containers: - - command: - - sleep - - "3600" - image: busybox - imagePullPolicy: IfNotPresent - name: busybox - volumeMounts: - - mountPath: /source/data - name: source-data - restartPolicy: Always - volumes: - - gitRepo: - repository: https://github.com/appscode/stash-data.git - name: source-data -``` - -Run the following command to confirm that `busybox` pods are running. - -```console -$ kubectl get pods -l app=stash-demo -NAME READY STATUS RESTARTS AGE -stash-demo-69d9dd8d76-bz2bz 1/1 Running 0 12s -``` - -You can check that the `/source/data/` directory of pod is populated with data from the volume source using this command, - -```console -$ kubectl exec stash-demo-69d9dd8d76-bz2bz -- ls -R /source/data/ -/source/data/: -stash-data - -/source/data/stash-data: -Eureka-by-EdgarAllanPoe.txt -LICENSE -README.md -``` - -Now, let's backup the directory into a Minio server. - -At first, we need to create a secret for `Restic` crd. To configure this backend, following secret keys are needed: - -| Key | Description | -|-------------------------|-------------------------------------------------------------------------| -| `RESTIC_PASSWORD` | `Required`. Password used to encrypt snapshots by `restic` | -| `AWS_ACCESS_KEY_ID` | `Required`. Minio access key ID | -| `AWS_SECRET_ACCESS_KEY` | `Required`. Minio secret access key | -| `CA_CERT_DATA` |`Required`. Root certificate by which Minio server certificate is signed | - -Create secret for `Restic` crd, - -```console -$ echo -n 'changeit' > RESTIC_PASSWORD -$ echo -n '' > AWS_ACCESS_KEY_ID -$ echo -n '' > AWS_SECRET_ACCESS_KEY -$ cat ./directory/of/root/certificate/ca.crt > CA_CERT_DATA -$ kubectl create secret generic minio-restic-secret \ - --from-file=./RESTIC_PASSWORD \ - --from-file=./AWS_ACCESS_KEY_ID \ - --from-file=./AWS_SECRET_ACCESS_KEY \ - --from-file=./CA_CERT_DATA -secret "minio-restic-secret" created -``` - -Verify that the secret has been created successfully, - -```console -$ kubectl get secret minio-restic-secret -o yaml -``` - -```yaml -apiVersion: v1 -data: - AWS_ACCESS_KEY_ID: PGVtcnV6Pg== - AWS_SECRET_ACCESS_KEY: PDEyMzQ1Njc4OTA+ - CA_CERT_DATA: - RESTIC_PASSWORD: ZW1ydXo= -kind: Secret -metadata: - creationTimestamp: 2018-01-29T11:20:35Z - name: minio-restic-secret - namespace: default - resourceVersion: "7773" - selfLink: /api/v1/namespaces/default/secrets/minio-restic-secret - uid: 6d70a2c1-04e6-11e8-b4cd-0800279de528 -type: Opaque -``` - -Now, we can create `Restic` crd. This will create a repository in Minio server and start taking periodic backup of `/source/data/` folder. - -```console -$ kubectl apply -f ./docs/examples/backends/minio/minio-restic.yaml -restic "minio-restic" created -``` - -YAML of `Restic` crd for Minio backend, - -```yaml -apiVersion: stash.appscode.com/v1alpha1 -kind: Restic -metadata: - name: minio-restic - namespace: default -spec: - selector: - matchLabels: - app: stash-demo # Must match with the label of busybox pod we have created before. - fileGroups: - - path: /source/data - retentionPolicyName: 'keep-last-5' - backend: - s3: - endpoint: 'https://minio-service.default.svc' # Use your own Minio server address. - bucket: stash-qa # Give a name of the bucket where you want to backup. - prefix: demo # . Path prefix into bucket where repository will be created.(optional). - storageSecretName: minio-restic-secret - schedule: '@every 1m' - volumeMounts: - - mountPath: /source/data - name: source-data - retentionPolicies: - - name: 'keep-last-5' - keepLast: 5 - prune: true -``` - -If everything goes well, a `Repository` crd with name `deployment.stash-demo` will be created for the respective repository in Minio backend. Verify that, `Repository` is created successfully using this command, - -```console -$ kubectl get repository deployment.stash-demo -NAME AGE -deployment.stash-demo 1m -``` - -`Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup is taking successfully by, - -```console -$ kubectl get snapshots -l repository=deployment.stash-demo -NAME AGE -deployment.stash-demo-c1014ca6 10s -``` - -Here, `deployment.stash-demo-c1014ca6` represents the name of the successful backup [Snapshot](/docs/concepts/crds/snapshot.md) taken by Stash in `deployment.stash-demo` repository. - -## Recovery - -Now, it is time to recover the backed up data. At first, delete `Restic` crd so that it does not lock the restic repository while we are trying to recover from it. - -```console -$ kubectl delete restic minio-restic -restic "minio-restic" deleted -``` - -Now, create a `Recovery` crd. - -```console -$ kubectl apply -f ./docs/examples/backends/minio/minio-recovery.yaml -recovery "minio-recovery" created -``` - -YAML for `Recovery` crd - -```yaml -apiVersion: stash.appscode.com/v1alpha1 -kind: Recovery -metadata: - name: minio-recovery - namespace: default -spec: - repository: - name: deployment.stash-demo - namespace: default - paths: - - /source/data - recoveredVolumes: - - mountPath: /source/data # where the volume will be mounted - name: stash-recovered-volume - hostPath: # volume source, where the recovered data will be stored. - path: /data/stash-recovered/ # directory in volume source where recovered data will be stored -``` - -Wait until `Recovery` job completed its task. To verify that recovery completed successfully run, - -```console -$ kubectl get recovery minio-recovery -o yaml -``` - -```yaml -apiVersion: stash.appscode.com/v1alpha1 -kind: Recovery -metadata: - annotations: - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"stash.appscode.com/v1alpha1","kind":"Recovery","metadata":{"name":"minio-recovery","namespace":"default"},"spec":{"repository":"deployment.stash-demo","paths":["/source/data"],"recoveredVolumes":[{"mountPath":"/source/data","name":"stash-recovered-volume","hostPath":{"path":"/data/stash-recovered/"}}]}} - clusterName: "" - creationTimestamp: 2018-01-30T06:54:18Z - generation: 0 - name: minio-recovery - namespace: default - resourceVersion: "10060" - selfLink: /apis/stash.appscode.com/v1alpha1/namespaces/default/recoveries/minio-recovery - uid: 64c12ff7-058a-11e8-9976-08002750604b -spec: - repository: - name: deployment.stash-demo - namespace: default - paths: - - /source/data - recoveredVolumes: - - hostPath: - path: /data/stash-recovered/ - mountPath: /source/data - name: stash-recovered-volume -status: - phase: Succeeded -``` - -`status.phase: Succeeded` indicates that the recovery was successful. Now, we can check `/data/stash-recovered/` directory of `HostPath` to see the recovered data. If you are using `minikube` for cluster then you can check by, - -```console -$ minikube ssh - _ _ - _ _ ( ) ( ) - ___ ___ (_) ___ (_)| |/') _ _ | |_ __ -/' _ ` _ `\| |/' _ `\| || , < ( ) ( )| '_`\ /'__`\ -| ( ) ( ) || || ( ) || || |\`\ | (_) || |_) )( ___/ -(_) (_) (_)(_)(_) (_)(_)(_) (_)`\___/'(_,__/'`\____) - -$ sudo su -$ cd / -$ ls -R /data/stash-recovered/ -/data/stash-recovered/: -data - -/data/stash-recovered/data: -stash-data - -/data/stash-recovered/data/stash-data: -Eureka-by-EdgarAllanPoe.txt LICENSE README.md -``` - -You can mount this recovered volume into any pod. - -## Cleanup - -To cleanup the Kubernetes resources created by this tutorial, run: - -```console -$ kubectl delete deployment stash-demo -$ kubectl delete restic minio-restic -$ kubectl delete recovery minio-recovery -$ kubectl delete secret minio-restic-secret -$ kubectl delete repository deployment.stash-demo -``` - -To cleanup the minio server, run: - -```console -$ kubectl delete deployment minio-deployment -$ kubectl delete service minio-service -$ kubectl delete pvc minio-pvc -$ kubectl delete secret minio-server-secret -``` \ No newline at end of file diff --git a/docs/guides/monitoring/_index.md b/docs/guides/monitoring/_index.md index 505275cd7..eba90d87b 100644 --- a/docs/guides/monitoring/_index.md +++ b/docs/guides/monitoring/_index.md @@ -5,6 +5,6 @@ menu: identifier: monitoring name: Monitoring parent: guides - weight: 60 + weight: 40 menu_name: product_stash_0.7.0 ---- \ No newline at end of file +--- diff --git a/docs/guides/monitoring/builtin.md b/docs/guides/monitoring/builtin.md index 907635f6e..c1d1dc608 100644 --- a/docs/guides/monitoring/builtin.md +++ b/docs/guides/monitoring/builtin.md @@ -19,7 +19,7 @@ This tutorial will show you how to configure builtin [Prometheus](https://github At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [Minikube](https://github.com/kubernetes/minikube). -To keep Prometheus resources isolated, we will use a separate namespace to deploy Prometheus server. +To keep Prometheus resources isolated, we are going to use a separate namespace to deploy Prometheus server. ```console $ kubectl create ns demo @@ -104,7 +104,7 @@ Now, we are ready to configure our Prometheus server to scrap those metrics. ## Deploy Prometheus Server -We have deployed Stash in `kube-system` namespace. Stash exports operator metrics via TLS secured `api` endpoint. So, Prometheus server need to provide certificate while scrapping metrics from this endpoint. Stash has created a secret named `stash-apiserver-certs` with this certificate in `demo` namespace as we have specified that we will deploy Prometheus in that namespace through `--prometheus-namespace` flag. We have to mount this secret in Prometheus deployment. +We have deployed Stash in `kube-system` namespace. Stash exports operator metrics via TLS secured `api` endpoint. So, Prometheus server need to provide certificate while scrapping metrics from this endpoint. Stash has created a secret named `stash-apiserver-certs` with this certificate in `demo` namespace as we have specified that we are going to deploy Prometheus in that namespace through `--prometheus-namespace` flag. We have to mount this secret in Prometheus deployment. Let's check `stash-apiserver-cert` certificate has been created in `demo` namespace. @@ -308,7 +308,7 @@ deployment.apps/stash-prometheus-server created ### Verify Monitoring Metrics -Prometheus server is running on port `9090`. We will use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. Run following command on a separate terminal, +Prometheus server is running on port `9090`. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. Run following command on a separate terminal, ```console $ kubectl port-forward -n demo stash-prometheus-server-9ddbf79b6-8l6hk 9090 diff --git a/docs/guides/monitoring/coreos.md b/docs/guides/monitoring/coreos.md index b0f2bf8cb..58e8c436c 100644 --- a/docs/guides/monitoring/coreos.md +++ b/docs/guides/monitoring/coreos.md @@ -19,7 +19,7 @@ CoreOS [prometheus-operator](https://github.com/coreos/prometheus-operator) prov - At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [Minikube](https://github.com/kubernetes/minikube). -- To keep Prometheus resources isolated, we will use a separate namespace to deploy Prometheus operator and respective resources. +- To keep Prometheus resources isolated, we are going to use a separate namespace to deploy Prometheus operator and respective resources. ```console $ kubectl create ns demo @@ -84,7 +84,7 @@ spec: Here, we have two endpoints at `spec.endpoints` field. One is `pushgateway` that exports backup and recovery metrics and another is `api` which exports operator metrics. -Stash exports operator metrics via TLS secured `api` endpoint. So, Prometheus server need to provide certificate while scrapping metrics from this endpoint. Stash has created a secret named `stash-apiserver-certs` with this certificate in `demo` namespace as we have specified that we will deploy Prometheus in that namespace through `--prometheus-namespace` flag. We have to specify this secret in Prometheus crd through `spec.secrets` field. Prometheus operator will mount this secret at `/etc/prometheus/secrets/stash-apiserver-cert` directory of respective Prometheus pod. So, we need to configure `tlsConfig` field to use that certificate. Here, `caFile` indicates the certificate to use and `serverName` is used to verify hostname. In our case, the certificate is valid for hostname `server` and `stash-operator.kube-system.svc`. +Stash exports operator metrics via TLS secured `api` endpoint. So, Prometheus server need to provide certificate while scrapping metrics from this endpoint. Stash has created a secret named `stash-apiserver-certs` with this certificate in `demo` namespace as we have specified that we are going to deploy Prometheus in that namespace through `--prometheus-namespace` flag. We have to specify this secret in Prometheus crd through `spec.secrets` field. Prometheus operator will mount this secret at `/etc/prometheus/secrets/stash-apiserver-cert` directory of respective Prometheus pod. So, we need to configure `tlsConfig` field to use that certificate. Here, `caFile` indicates the certificate to use and `serverName` is used to verify hostname. In our case, the certificate is valid for hostname `server` and `stash-operator.kube-system.svc`. Let's check secret `stash-apiserver-cert` has been created in demo namespace. @@ -160,7 +160,7 @@ Now, we are ready to access Prometheus dashboard. ### Verify Monitoring Metrics -Prometheus server is running on port `9090`. We will use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. Run following command on a separate terminal, +Prometheus server is running on port `9090`. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. Run following command on a separate terminal, ```console $ kubectl port-forward -n demo prometheus-prometheus-0 9090 diff --git a/docs/guides/monitoring/grafana.md b/docs/guides/monitoring/grafana.md index 91b5e5520..da329f94c 100644 --- a/docs/guides/monitoring/grafana.md +++ b/docs/guides/monitoring/grafana.md @@ -25,7 +25,7 @@ Grafana provides an elegant graphical user interface to visualize data. You can ## Add Prometheus Data Source -We have to add our Prometheus server `prometheus-prometheus-0` as data source of grafana. We will use a `ClusterIP` service to connect Prometheus server with grafana. Let's create a service to select Prometheus server `prometheus-prometheus-0`, +We have to add our Prometheus server `prometheus-prometheus-0` as data source of grafana. We are going to use a `ClusterIP` service to connect Prometheus server with grafana. Let's create a service to select Prometheus server `prometheus-prometheus-0`, ```console $ kubectl apply -f https://raw.githubusercontent.com/appscode/stash/0.7.0/docs/examples/monitoring/coreos/prometheus-service.yaml diff --git a/docs/guides/offline_backup.md b/docs/guides/offline_backup.md index f29b9d109..77b4e8932 100644 --- a/docs/guides/offline_backup.md +++ b/docs/guides/offline_backup.md @@ -16,22 +16,87 @@ section_menu_id: guides # Offline Backup -This tutorial will show you how to backup a Kubernetes deployment using Stash in offline mode. By default, stash takes backup in [online](/docs/guides/backup.md) mode where sidecar container is added to take periodic backups and check backups. But sometimes you need to ensure that source data is not being modified while taking the backup, that means running backup while keeping workload pod stopped. In such case, you can run the backup in offline mode. To do this you need to specify `spec.type: offline` in `Restic` CRD. +This tutorial will show you how to backup a Kubernetes deployment using Stash in offline mode. By default, stash takes backup in [online](/docs/guides/backup.md) mode where sidecar container is added to take periodic backups and check backups. But sometimes you need to ensure that source data is not being modified while taking the backup, that means running backup while keeping workload pod stopped. In such case, you can run the backup in offline mode. To do this you need to specify `spec.type: offline` in `Restic` crd. -At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [Minikube](https://github.com/kubernetes/minikube). Now, install Stash in your cluster following the steps [here](/docs/setup/install.md). +## Before You Begin -In this tutorial, we are going to backup the `/source/data` folder of a `busybox` pod into a local backend. First, deploy the following `busybox` Deployment in your cluster. Here we are using a git repository as a source volume for demonstration purpose. +At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [Minikube](https://github.com/kubernetes/minikube). + +- Install `Stash` in your cluster following the steps [here](/docs/setup/install.md). + +- You should be familiar with the following Stash concepts: + - [Restic](/docs/concepts/crds/restic.md) + - [Repository](/docs/concepts/crds/repository.md) + - [Snapshot](/docs/concepts/crds/snapshot.md) + +- You will need an NFS server to store backed up data. If you already do not have an NFS server running, deploy one following the tutorial from [here](https://github.com/appscode/third-party-tools/blob/master/storage/nfs/README.md). For this tutorial, we have deployed NFS server in `storage` namespace and it is accessible through `nfs-service.storage.svc.cluster.local` dns. + +To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```console +$ kubectl create ns demo +namespace/demo created +``` + +>Note: YAML files used in this tutorial are stored in [/docs/examples/backup](/docs/examples/backup) directory of [appscode/stash](https://github.com/appscode/stash) repository. + +## Overview + +The following diagram shows how Stash takes offline backup of a Kubernetes volume. Open the image in a new tab to see the enlarged image. + +

+  Stash Offline Backup Flow +

+ +The offline backup process consists of the following steps: + +1. At first, a user creates a `Secret`. This secret holds the credentials to access the backend where backed up data will be stored. It also holds a password (`RESTIC_PASSWORD`) that will be used to encrypt the backed up data. +2. Then, the user creates a `Restic` crd which specifies the targeted workload for backup. It also specifies the backend information where the backed up data will be stored. +3. Stash operator watches for `Restic` crd. Once it sees a `Restic` crd, it identifies the targeted workload that matches the selector of this `Restic`. +4. Then, Stash operator injects an [init-container](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) named `stash` and mounts the target volume in it. +5. Stash operator creates a [CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) with name `stash-scaledown-cron-{restic-name}`. +6. The `CronJob` restarts workload on the scheduled interval. +7. Finally, `stash` init-container takes backup of the volume to the specified backend when pod restarts. It also creates a `Repository` crd during the first backup which represents the backend in Kubernetes native way. + +The `CronJob` restarts workloads according to the following rules: + +1. If the workload is a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) or a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), it will delete all pods of the workload. The workload will automatically re-creates the pods and each pod will take backup with their `init-container`. +2. If the workload is a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [ReplicaSet](https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/) or [ReplicationController](https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/), it will scale down the workload to 0 replica. When all pods are terminated, it will scale up the workload to 1 replica. This single replica will take backup with `init-container`. When backup is complete, the `init-container` will scale up the workload to original replica. The rest of the replicas will not take backup even through they have `init-container`. + +## Backup + +In order to take backup, we need some sample data. Stash has some sample data in [appscode/stash-data](https://github.com/appscode/stash-data) repository. As [gitRepo](https://kubernetes.io/docs/concepts/storage/volumes/#gitrepo) volume has been deprecated, we are not going to use this repository as volume directly. Instead, we are going to create a [configMap](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) from the stash-data repository and use that ConfigMap as data source. + +Let's create a ConfigMap from these sample data, + +```console +$ kubectl create configmap -n demo stash-sample-data \ + --from-literal=LICENSE="$(curl -fsSL https://raw.githubusercontent.com/appscode/stash-data/master/LICENSE)" \ + --from-literal=README.md="$(curl -fsSL https://raw.githubusercontent.com/appscode/stash-data/master/README.md)" +configmap/stash-sample-data created +``` + +Here, we are going to backup the `/source/data` folder of a `busybox` pod into an [NFS](https://kubernetes.io/docs/concepts/storage/volumes/#nfs) volume. NFS volume is a type of [local](/docs/guides/backends/local.md) backend for Stash. + +**Deploy Workload:** + +Now, deploy the following Deployment. Here, we have mounted the ConfigMap `stash-sample-data` as data source volume. + +Below, the YAML for the Deployment we are going to create. ```yaml -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: labels: app: stash-demo name: stash-demo - namespace: default + namespace: demo spec: replicas: 1 + selector: + matchLabels: + app: stash-demo template: metadata: labels: @@ -39,7 +104,7 @@ spec: name: busybox spec: containers: - - command: + - args: - sleep - "3600" image: busybox @@ -50,63 +115,87 @@ spec: name: source-data restartPolicy: Always volumes: - - gitRepo: - repository: https://github.com/appscode/stash-data.git - name: source-data + - name: source-data + configMap: + name: stash-sample-data ``` +Let's create the deployment we have shown above, + ```console -$ kubectl apply -f ./docs/examples/tutorial/busybox.yaml -deployment "stash-demo" created +$ kubectl apply -f ./docs/examples/backup/deployment.yaml +deployment.apps/stash-demo created ``` -Run the following command to confirm that `busybox` pods are running. +Now, wait for deployment's pod to go into `Running` state. ```console -$ kubectl get pods -l app=stash-demo -NAME READY STATUS RESTARTS AGE -stash-demo-788ffcf9c6-p5kxc 1/1 Running 0 12s +$ kubectl get pod -n demo -l app=stash-demo +NAME READY STATUS RESTARTS AGE +stash-demo-7ccd56bf5d-p9p2p 1/1 Running 0 2m29s ``` -Now, create a `Secret` that contains the key `RESTIC_PASSWORD`. This will be used as the password for your restic repository. +You can check that the `/source/data/` directory of this pod is populated with data from the `stash-sample-data` ConfigMap using this command, ```console -$ kubectl create secret generic stash-demo --from-literal=RESTIC_PASSWORD=changeit -secret "stash-demo" created +$ kubectl exec -n demo stash-demo-7ccd56bf5d-p9p2p -- ls -R /source/data +/source/data: +LICENSE +README.md ``` -You can check that the secret was created like this: +Now, we are ready to backup `/source/data` directory into an NFS backend. -```yaml -$ kubectl get secret stash-demo -o yaml +**Create Secret:** +At first, we need to create a storage secret. To configure this backend, the following secret keys are needed: + +| Key | Description | +| ----------------- | ---------------------------------------------------------- | +| `RESTIC_PASSWORD` | `Required`. Password used to encrypt snapshots by `restic` | + +Create the secret as below, + +```console +$ echo -n 'changeit' > RESTIC_PASSWORD +$ kubectl create secret generic -n demo local-secret \ + --from-file=./RESTIC_PASSWORD +secret/local-secret created +``` + +Verify that the secret has been created successfully. + +```console +$ kubectl get secret -n demo local-secret -o yaml +``` + +```yaml apiVersion: v1 data: RESTIC_PASSWORD: Y2hhbmdlaXQ= kind: Secret metadata: - creationTimestamp: 2017-12-04T05:24:22Z - name: stash-demo - namespace: default - resourceVersion: "22328" - selfLink: /api/v1/namespaces/default/secrets/stash-demo - uid: 62aa8ef8-d8b3-11e7-be92-0800277f19c0 + creationTimestamp: 2018-12-07T11:44:09Z + name: local-secret + namespace: demo + resourceVersion: "36409" + selfLink: /api/v1/namespaces/demo/secrets/local-secret + uid: 68ab5960-fa15-11e8-8905-0800277ca39d type: Opaque ``` -Now, create a `Restic` CRD with selectors matching the labels of the `busybox` Deployment and `spec.type: offline`. +**Create Restic:** -```console -$ kubectl apply -f ./docs/examples/tutorial/restic_offline.yaml -restic "stash-demo" created -``` +Now, we are going to create `Restic` crd to take backup `/source/data` directory of `stash-demo` deployment in offline mode. + +Below, the YAML for Restic crd we are going to create for offline backup, ```yaml apiVersion: stash.appscode.com/v1alpha1 kind: Restic metadata: - name: stash-demo - namespace: default + name: offline-restic + namespace: demo spec: selector: matchLabels: @@ -118,9 +207,10 @@ spec: backend: local: mountPath: /safe/data - hostPath: - path: /data/stash-test/restic-repo - storageSecretName: stash-demo + nfs: + server: "nfs-service.storage.svc.cluster.local" + path: "/" + storageSecretName: local-secret schedule: '@every 5m' volumeMounts: - mountPath: /source/data @@ -131,41 +221,47 @@ spec: prune: true ``` -When a `Restic` is created with `spec.type: offline`, stash operator adds an [init-container](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) instead of sidecar container to target workload pods. The init-container takes backup once. If the backup is successfully completed, then it creates a job to perform `restic check` and exits. The app container starts only after the init-container exits without any error. This ensures that the app container is not running while taking backup. -Stash operator also creates a cron-job that deletes the workload pods according to the `spec.schedule`. Thus the workload pods get restarted periodically and allow the init-container to take backup. +Here, we have set `spec.type: offline`. This tell Stash to take backup in offline mode. + +Let's create the `Restic` we have shown above, ```console -$ kubectl get pods -l app=stash-demo -w -NAME READY STATUS RESTARTS AGE -stash-demo-788ffcf9c6-p5kxc 1/1 Terminating 0 1m -stash-demo-7b4f6877dc-nhrz9 0/1 Init:0/1 0 4s -stash-demo-7b4f6877dc-nhrz9 1/1 Running 0 32s +$ kubectl apply -f ./docs/examples/backup/restic_offline.yaml +restic.stash.appscode.com/offline-restic created ``` -```yaml -$ kubectl get deployment stash-demo -o yaml +If everything goes well, Stash will inject an [init-container](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) into the `stash-demo` deployment to take backup while pod starts. + +Let's check that `init-container` has been injected successfully, + +```console +$ kubectl get deployment -n demo stash-demo -o yaml +``` +```yaml apiVersion: extensions/v1beta1 kind: Deployment metadata: annotations: deployment.kubernetes.io/revision: "2" + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"stash-demo"},"name":"stash-demo","namespace":"demo"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"stash-demo"}},"template":{"metadata":{"labels":{"app":"stash-demo"},"name":"busybox"},"spec":{"containers":[{"args":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox","volumeMounts":[{"mountPath":"/source/data","name":"source-data"}]}],"restartPolicy":"Always","volumes":[{"configMap":{"name":"stash-sample-data"},"name":"source-data"}]}}}} restic.appscode.com/last-applied-configuration: | - {"kind":"Restic","apiVersion":"stash.appscode.com/v1alpha1","metadata":{"name":"stash-demo","namespace":"default","selfLink":"/apis/stash.appscode.com/v1alpha1/namespaces/default/restics/stash-demo","uid":"c55d5918-d8da-11e7-be92-0800277f19c0","resourceVersion":"57719","creationTimestamp":"2017-12-04T10:06:18Z"},"spec":{"selector":{"matchLabels":{"app":"stash-demo"}},"fileGroups":[{"path":"/source/data","retentionPolicyName":"keep-last-5"}],"backend":{"storageSecretName":"stash-demo","local":{"volumeSource":{"hostPath":{"path":"/data/stash-test/restic-repo"}},"path":"/safe/data"}},"schedule":"@every 5m","volumeMounts":[{"name":"source-data","mountPath":"/source/data"}],"resources":{},"retentionPolicies":[{"name":"keep-last-5","keepLast":5,"prune":true}],"type":"offline"},"status":{}} - restic.appscode.com/tag: canary - creationTimestamp: 2017-12-04T10:04:11Z + {"kind":"Restic","apiVersion":"stash.appscode.com/v1alpha1","metadata":{"name":"offline-restic","namespace":"demo","selfLink":"/apis/stash.appscode.com/v1alpha1/namespaces/demo/restics/offline-restic","uid":"f5b3abe7-fa15-11e8-8905-0800277ca39d","resourceVersion":"36693","generation":1,"creationTimestamp":"2018-12-07T11:48:05Z","annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"stash.appscode.com/v1alpha1\",\"kind\":\"Restic\",\"metadata\":{\"annotations\":{},\"name\":\"offline-restic\",\"namespace\":\"demo\"},\"spec\":{\"backend\":{\"local\":{\"mountPath\":\"/safe/data\",\"nfs\":{\"path\":\"/\",\"server\":\"nfs-service.storage.svc.cluster.local\"}},\"storageSecretName\":\"local-secret\"},\"fileGroups\":[{\"path\":\"/source/data\",\"retentionPolicyName\":\"keep-last-5\"}],\"retentionPolicies\":[{\"keepLast\":5,\"name\":\"keep-last-5\",\"prune\":true}],\"schedule\":\"@every 5m\",\"selector\":{\"matchLabels\":{\"app\":\"stash-demo\"}},\"type\":\"offline\",\"volumeMounts\":[{\"mountPath\":\"/source/data\",\"name\":\"source-data\"}]}}\n"}},"spec":{"selector":{"matchLabels":{"app":"stash-demo"}},"fileGroups":[{"path":"/source/data","retentionPolicyName":"keep-last-5"}],"backend":{"storageSecretName":"local-secret","local":{"nfs":{"server":"nfs-service.storage.svc.cluster.local","path":"/"},"mountPath":"/safe/data"}},"schedule":"@every 5m","volumeMounts":[{"name":"source-data","mountPath":"/source/data"}],"resources":{},"retentionPolicies":[{"name":"keep-last-5","keepLast":5,"prune":true}],"type":"offline"}} + restic.appscode.com/tag: e3 + creationTimestamp: 2018-12-07T11:40:30Z generation: 2 labels: app: stash-demo name: stash-demo - namespace: default - resourceVersion: "57824" - selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/stash-demo - uid: 798d4e60-d8da-11e7-be92-0800277f19c0 + namespace: demo + resourceVersion: "36735" + selfLink: /apis/extensions/v1beta1/namespaces/demo/deployments/stash-demo + uid: e6996fbd-fa14-11e8-8905-0800277ca39d spec: progressDeadlineSeconds: 600 replicas: 1 - revisionHistoryLimit: 2 + revisionHistoryLimit: 10 selector: matchLabels: app: stash-demo @@ -176,13 +272,15 @@ spec: type: RollingUpdate template: metadata: + annotations: + restic.appscode.com/resource-hash: "16527601205197612609" creationTimestamp: null labels: app: stash-demo name: busybox spec: containers: - - command: + - args: - sleep - "3600" image: busybox @@ -198,10 +296,19 @@ spec: initContainers: - args: - backup - - --restic-name=stash-demo + - --restic-name=offline-restic - --workload-kind=Deployment - --workload-name=stash-demo - - --image-tag=canary + - --docker-registry=appscodeci + - --image-tag=e3 + - --pushgateway-url=http://stash-operator.kube-system.svc:56789 + - --enable-status-subresource=true + - --use-kubeapiserver-fqdn-for-aks=true + - --enable-analytics=true + - --logtostderr=true + - --alsologtostderr=false + - --v=3 + - --stderrthreshold=0 - --enable-rbac=true env: - name: NODE_NAME @@ -214,10 +321,15 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.name - image: appscode/stash:0.7.0 + - name: APPSCODE_ANALYTICS_CLIENT_ID + value: 90b12fedfef2068a5f608219d5e7904a + image: appscodeci/stash:e3 imagePullPolicy: IfNotPresent name: stash resources: {} + securityContext: + procMount: Default + runAsUser: 0 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: @@ -235,8 +347,9 @@ spec: securityContext: {} terminationGracePeriodSeconds: 30 volumes: - - gitRepo: - repository: https://github.com/appscode/stash-data.git + - configMap: + defaultMode: 420 + name: stash-sample-data name: source-data - emptyDir: {} name: stash-scratchdir @@ -248,22 +361,22 @@ spec: fieldPath: metadata.labels path: labels name: stash-podinfo - - hostPath: - path: /data/stash-test/restic-repo - type: "" - name: stash-local + - name: stash-local + nfs: + path: / + server: nfs-service.storage.svc.cluster.local status: availableReplicas: 1 conditions: - - lastTransitionTime: 2017-12-04T10:06:26Z - lastUpdateTime: 2017-12-04T10:06:26Z + - lastTransitionTime: 2018-12-07T11:42:45Z + lastUpdateTime: 2018-12-07T11:42:45Z message: Deployment has minimum availability. reason: MinimumReplicasAvailable status: "True" type: Available - - lastTransitionTime: 2017-12-04T10:04:11Z - lastUpdateTime: 2017-12-04T10:06:26Z - message: ReplicaSet "stash-demo-7b4f6877dc" has successfully progressed. + - lastTransitionTime: 2018-12-07T11:40:31Z + lastUpdateTime: 2018-12-07T11:48:08Z + message: ReplicaSet "stash-demo-684cd86f7b" has successfully progressed. reason: NewReplicaSetAvailable status: "True" type: Progressing @@ -273,42 +386,39 @@ status: updatedReplicas: 1 ``` -If everything goes well, A `Repository` crd with name `deployment.stash-demo` will be created for the respective repository in the specified backend. Verify that, `Repository` is created successfully using this command, - -```console -$ kubectl get repository deployment.stash-demo -NAME AGE -deployment.stash-demo 1m -``` +Notice that `stash-demo` deployment has an `init-container` named `stash` which is running `backup` command. -`Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup is taking successfully by, +Stash operator also has created a [CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) with name format `stash-scaledown-cron-{restic-name}`. Verify that the `CronJob` has been created successfully, -```console -$ kubectl get snapshots -l repository=deployment.stash-demo -NAME AGE -deployment.stash-demo-c1014ca6 10s +```console +$ kubectl get cronjob -n demo +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +stash-scaledown-cron-offline-restic @every 5m False 0 2m34s ``` -Here, `deployment.stash-demo-c1014ca6` represents the name of the successful backup [Snapshot](/docs/concepts/crds/snapshot.md) taken by Stash in `deployment.stash-demo` repository. +**Verify Backup:** -Stash operator also creates a cron job to periodically delete workload pods according to `spec.schedule`. Please note that Kubernetes cron jobs [do not support timezone](https://github.com/kubernetes/kubernetes/issues/47202). +Stash will create a `Repository` crd with name `deployment.stash-demo` for the respective repository during the first backup run. To verify, run the following command, ```console -kubectl get cronjob -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE -stash-kubectl-cron-stash-demo @every 5m False 0 +$ kubectl get repository deployment.stash-demo -n demo +NAME BACKUP-COUNT LAST-SUCCESSFUL-BACKUP AGE +deployment.stash-demo 1 2m 2m ``` -Note that offline backup is not supported for workload kind `Deployment`, `Replicaset` and `ReplicationController` with `replicas > 1`. +Here, `BACKUP-COUNT` field indicates number of backup snapshot has taken in this repository. ## Cleaning up To cleanup the Kubernetes resources created by this tutorial, run: + ```console -$ kubectl delete deployment stash-demo -$ kubectl delete secret stash-demo -$ kubectl delete restic stash-demo -$ kubectl delete repository deployment.stash-demo +$ kubectl delete -n demo deployment stash-demo +$ kubectl delete -n demo secret local-secret +$ kubectl delete -n demo restic offline-restic +$ kubectl delete -n demo repository deployment.stash-demo + +$ kubectl delete namespace demo ``` If you would like to uninstall Stash operator, please follow the steps [here](/docs/setup/uninstall.md). @@ -321,6 +431,6 @@ If you would like to uninstall Stash operator, please follow the steps [here](/d - Learn about the details of Recovery CRD [here](/docs/concepts/crds/recovery.md). - See the list of supported backends and how to configure them [here](/docs/guides/backends/overview.md). - See working examples for supported workload types [here](/docs/guides/workloads.md). -- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring.md). +- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring/overview.md). - Learn about how to configure [RBAC roles](/docs/guides/rbac.md). -- Want to hack on Stash? Check our [contribution guidelines](/docs/CONTRIBUTING.md). \ No newline at end of file +- Want to hack on Stash? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/platforms/_index.md b/docs/guides/platforms/_index.md index ece508a64..091d701e4 100644 --- a/docs/guides/platforms/_index.md +++ b/docs/guides/platforms/_index.md @@ -5,6 +5,6 @@ menu: identifier: platforms name: Platforms parent: guides - weight: 50 + weight: 35 menu_name: product_stash_0.7.0 ---- \ No newline at end of file +--- diff --git a/docs/guides/platforms/aks.md b/docs/guides/platforms/aks.md index 540a2dce0..5e5ee858e 100644 --- a/docs/guides/platforms/aks.md +++ b/docs/guides/platforms/aks.md @@ -15,22 +15,23 @@ menu_name: product_stash_0.7.0 # Using Stash with Azure Kubernetes Service (AKS) -This tutorial will show you how to use Stash to **backup** and **restore** a volume in [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/). Here, we are going to backup the `/source/data` folder of a busybox pod into [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/). Then, we will show how to recover this data into a `PersistentVolumeClaim(PVC)`. We will also re-deploy deployment using this recovered volume. +This tutorial will show you how to use Stash to **backup** and **restore** a volume in [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/). Here, we are going to backup the `/source/data` folder of a busybox pod into [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/). Then, we are going to show how to recover this data into a `PersistentVolumeClaim(PVC)`. We are going to also re-deploy deployment using this recovered volume. ## Before You Begin -At first, you need to have a AKS cluster. If you don't already have a cluster, create one from [here](https://azure.microsoft.com/en-us/services/kubernetes-service/). Now, install Stash in your cluster following the steps [here](/docs/setup/install.md). +At first, you need to have a AKS cluster. If you don't already have a cluster, create one from [here](https://azure.microsoft.com/en-us/services/kubernetes-service/). -Then, you will need to have a [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) to store the backup snapshots. +- Install Stash in your cluster following the steps [here](/docs/setup/install.md). -You should have understanding the following Stash concepts: +- You should be familiar with the following Stash concepts: + - [Restic](/docs/concepts/crds/restic.md) + - [Repository](/docs/concepts/crds/repository.md) + - [Recovery](/docs/concepts/crds/recovery.md) + - [Snapshot](/docs/concepts/crds/snapshot.md) -- [Restic](/docs/concepts/crds/restic.md) -- [Repository](/docs/concepts/crds/repository.md) -- [Recovery](/docs/concepts/crds/recovery.md) -- [Snapshot](/docs/concepts/crds/snapshot.md) +- You will need a [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) to store the backup snapshots. -To keep things isolated, we will use a separate namespace called `demo` throughout this tutorial. +To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. ```console $ kubectl create ns demo @@ -41,7 +42,7 @@ namespace/demo created ## Backup -In order to take backup, we need some sample data. Stash has some sample data in [stash-data](https://github.com/appscode/stash-data) repository. As [gitRepo](https://kubernetes.io/docs/concepts/storage/volumes/#gitrepo) volume has been deprecated, we will not use this repository as volume directly. Instead, we will create a [configMap](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) from these data and use that ConfigMap as data source. +In order to take backup, we need some sample data. Stash has some sample data in [stash-data](https://github.com/appscode/stash-data) repository. As [gitRepo](https://kubernetes.io/docs/concepts/storage/volumes/#gitrepo) volume has been deprecated, we are not going to use this repository as volume directly. Instead, we are going to create a [configMap](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) from the stash-data repository and use that ConfigMap as data source. Let's create a ConfigMap from these sample data, @@ -120,9 +121,17 @@ README.md Now, we are ready to backup `/source/data` directory into [Azure Blob Container](https://azure.microsoft.com/en-us/services/storage/blobs/). -**Create Restic:** +**Create Secret:** + +At first, we need to create a storage secret that hold the credentials for the backend. To configure this backend, the following secret keys are needed: + +| Key | Description | +|-------------------------|------------------------------------------------------------| +| `RESTIC_PASSWORD` | `Required`. Password used to encrypt snapshots by `restic` | +| `AZURE_ACCOUNT_NAME` | `Required`. Azure Storage account name | +| `AZURE_ACCOUNT_KEY` | `Required`. Azure Storage account key | -At first, we need to create a secret for `Restic` crd. Create a secret for `Restic` using following commands, +Create the storage secret as below, ```console $ echo -n 'changeit' >RESTIC_PASSWORD @@ -159,7 +168,9 @@ type: Opaque ``` -Now, we will create `Restic` crd to take backup `/source/data` directory of `stash-demo` deployment. This will create a repository in the Azure blob container specified in `azure.container` field and start taking periodic backup of `/source/data` directory. +**Create Restic:** + +Now, we are going to create `Restic` crd to take backup `/source/data` directory of `stash-demo` deployment. This will create a repository in the Azure blob container specified in `azure.container` field and start taking periodic backup of `/source/data` directory. ```console $ kubectl apply -f ./docs/examples/platforms/aks/restic.yaml @@ -204,7 +215,7 @@ NAME READY STATUS RESTARTS AGE stash-demo-6b8c94cdd7-8jhtn 2/2 Running 1 1h ``` -Look at the pod. It now has 2 containers. If you view the YAML of this pod, you will see there is a container named `stash` which takes backup. +Look at the pod. It now has 2 containers. If you view the resource definition of this pod, you will see there is a container named `stash` which running `backup` command. **Verify Backup:** @@ -216,6 +227,8 @@ NAME BACKUPCOUNT LASTSUCCESSFULBACKUP AGE deployment.stash-demo 8 13s 8m ``` +Here, `BACKUPCOUNT` field indicates number of backup snapshot has taken in this repository. + `Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup snapshots are created successfully by, ```console @@ -331,7 +344,7 @@ spec: claimName: stash-recovered ``` -Wait until `Recovery` job completes its task. To verify that recovery is completed successfully run, +Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, ```console $ kubectl get recovery -n demo azure-recovery @@ -341,11 +354,17 @@ azure-recovery demo deployment.stash-demo Succee Here, `PHASE` `Succeeded` indicate that our recovery has been completed successfully. Backup data has been restored in `stash-recovered` PVC. Now, we are ready to use this PVC to re-deploy workload. +If you are using Kubernetes version older than v1.11.0 then run following command and check `status.phase` field to see whether the recovery succeeded or failed. + +```console +$ kubectl get recovery -n demo rook-recovery -o yaml +``` + **Re-deploy Workload:** -We have successfully restored backup data into `stash-recovered` PVC. Now, we will re-deploy our previous deployment `stash-demo`. This time, we will mount the `stash-recovered` PVC as `source-data` volume instead of ConfigMap `stash-sample-data`. +We have successfully restored backup data into `stash-recovered` PVC. Now, we are going to re-deploy our previous deployment `stash-demo`. This time, we are going to mount the `stash-recovered` PVC as `source-data` volume instead of ConfigMap `stash-sample-data`. -Below, the YAML for `stash-demo` deployment with `stash-recovered` pvc as `source-data` volume. +Below, the YAML for `stash-demo` deployment with `stash-recovered` PVC as `source-data` volume. ```yaml apiVersion: apps/v1 @@ -386,7 +405,7 @@ spec: Let's create the deployment, ```console -$ kubectl apply -f ./docs/examples/platforms/aks/recovered-deployment.yaml +$ kubectl apply -f ./docs/examples/platforms/aks/recovered-deployment.yaml deployment.apps/stash-demo created ``` @@ -429,4 +448,4 @@ $ kubectl delete repository -n demo deployment.stash-demo $ kubectl delete ns demo ``` -To uninstall Stash from your cluster, follow the instructions from [here](/docs/setup/uninstall.md). +- To uninstall Stash from your cluster, follow the instructions from [here](/docs/setup/uninstall.md). diff --git a/docs/guides/platforms/eks.md b/docs/guides/platforms/eks.md index 8715adadd..ca911f9ad 100644 --- a/docs/guides/platforms/eks.md +++ b/docs/guides/platforms/eks.md @@ -15,22 +15,23 @@ menu_name: product_stash_0.7.0 # Using Stash with Amazon EKS -This tutorial will show you how to use Stash to **backup** and **restore** a volume in [Amazon Elastic Container Service for Kubernetes (EKS)](https://aws.amazon.com/eks/). Here, we are going to backup the `/source/data` folder of a busybox pod into [AWS S3 bucket](https://aws.amazon.com/s3/). Then, we will show how to recover this data into a `PersistentVolumeClaim(PVC)`. We will also re-deploy deployment using this recovered volume. +This tutorial will show you how to use Stash to **backup** and **restore** a volume in [Amazon Elastic Container Service for Kubernetes (EKS)](https://aws.amazon.com/eks/). Here, we are going to backup the `/source/data` folder of a busybox pod into [AWS S3 bucket](https://aws.amazon.com/s3/). Then, we are going to show how to recover this data into a `PersistentVolumeClaim(PVC)`. We are going to also re-deploy deployment using this recovered volume. ## Before You Begin -At first, you need to have a EKS cluster. If you don't already have a cluster, create one from [here](https://aws.amazon.com/eks/). You can use [eksctl](https://github.com/weaveworks/eksctl) command line tool to create EKS cluster easily. Now, install Stash in your cluster following the steps [here](/docs/setup/install.md). +At first, you need to have a EKS cluster. If you don't already have a cluster, create one from [here](https://aws.amazon.com/eks/). You can use [eksctl](https://github.com/weaveworks/eksctl) command line tool to create EKS cluster easily. -Then, you will need to have a [AWS S3 Bucket](https://aws.amazon.com/s3/) to store the backup snapshots. +- Install Stash in your cluster following the steps [here](/docs/setup/install.md). -You should have understanding the following Stash concepts: +- You should be familiar with the following Stash concepts: + - [Restic](/docs/concepts/crds/restic.md) + - [Repository](/docs/concepts/crds/repository.md) + - [Recovery](/docs/concepts/crds/recovery.md) + - [Snapshot](/docs/concepts/crds/snapshot.md) -- [Restic](/docs/concepts/crds/restic.md) -- [Repository](/docs/concepts/crds/repository.md) -- [Recovery](/docs/concepts/crds/recovery.md) -- [Snapshot](/docs/concepts/crds/snapshot.md) +- You will need a [AWS S3 Bucket](https://aws.amazon.com/s3/) to store the backup snapshots. -To keep things isolated, we will use a separate namespace called `demo` throughout this tutorial. +To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. ```console $ kubectl create ns demo @@ -41,7 +42,7 @@ namespace/demo created ## Backup -In order to take backup, we need some sample data. Stash has some sample data in [stash-data](https://github.com/appscode/stash-data) repository. As [gitRepo](https://kubernetes.io/docs/concepts/storage/volumes/#gitrepo) volume has been deprecated, we will not use this repository as volume directly. Instead, we will create a [configMap](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) from these data and use that ConfigMap as data source. +In order to take backup, we need some sample data. Stash has some sample data in [stash-data](https://github.com/appscode/stash-data) repository. As [gitRepo](https://kubernetes.io/docs/concepts/storage/volumes/#gitrepo) volume has been deprecated, we are not going to use this repository as volume directly. Instead, we are going to create a [configMap](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) from the stash-data repository and use that ConfigMap as data source. Let's create a ConfigMap from these sample data, @@ -120,9 +121,17 @@ README.md Now, we are ready to backup `/source/data` directory into [AWS S3 Bucket](https://aws.amazon.com/s3/). -**Create Restic:** +**Create Secret:** + +At first, we need to create a storage secret that hold the credentials for the backend. To configure this backend, the following secret keys are needed: + +| Key | Description | +| ----------------------- | ---------------------------------------------------------- | +| `RESTIC_PASSWORD` | `Required`. Password used to encrypt snapshots by `restic` | +| `AWS_ACCESS_KEY_ID` | `Required`. AWS access key ID for bucket | +| `AWS_SECRET_ACCESS_KEY` | `Required`. AWS secret access key for bucket | -At first, we need to create a secret for `Restic` crd. Create a secret for `Restic` using following commands, +Create a the storage secret as below, ```console $ echo -n 'changeit' > RESTIC_PASSWORD @@ -158,10 +167,12 @@ metadata: type: Opaque ``` -Now, we will create `Restic` crd to take backup `/source/data` directory of `stash-demo` deployment. This will create a repository in the S3 bucket specified in `s3.bucket` field and start taking periodic backup of `/source/data` directory. +**Create Restic:** + +Now, we are going to create `Restic` crd to take backup `/source/data` directory of `stash-demo` deployment. This will create a repository in the S3 bucket specified in `s3.bucket` field and start taking periodic backup of `/source/data` directory. ```console -$ kubectl apply -f ./docs/examples/platforms/eks/restic.yaml +$ kubectl apply -f ./docs/examples/platforms/eks/restic.yaml restic.stash.appscode.com/s3-restic created ``` @@ -204,7 +215,7 @@ NAME READY STATUS RESTARTS AGE stash-demo-646c854778-t4d72 2/2 Running 0 1m ``` -Look at the pod. It now has 2 containers. If you view the YAML of this pod, you will see there is a container named `stash` which takes backup. +Look at the pod. It now has 2 containers. If you view the resource definition of this pod, you will see there is a container named `stash` which running `backup` command. **Verify Backup:** @@ -350,7 +361,7 @@ Look at the `STATUS` filed. `stash-recovered` PVC is bounded to volume `pvc-d86e Now, we have to create a `Recovery` crd to recover backed up data into this PVC. ```console -$ kubectl apply -f ./docs/examples/platforms/eks/recovery.yaml +$ kubectl apply -f ./docs/examples/platforms/eks/recovery.yaml recovery.stash.appscode.com/s3-recovery created ``` @@ -374,7 +385,7 @@ spec: claimName: stash-recovered ``` -Wait until `Recovery` job completes its task. To verify that recovery is completed successfully run, +Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, ```yaml $ kubectl get recovery -n demo s3-recovery -o yaml @@ -411,9 +422,9 @@ Here, `status.phase: Succeeded` indicate that our recovery has been completed su **Re-deploy Workload:** -We have successfully restored backup data into `stash-recovered` PVC. Now, we will re-deploy our previous deployment `stash-demo`. This time, we will mount the `stash-recovered` PVC as `source-data` volume instead of ConfigMap `stash-sample-data`. +We have successfully restored backup data into `stash-recovered` PVC. Now, we are going to re-deploy our previous deployment `stash-demo`. This time, we are going to mount the `stash-recovered` PVC as `source-data` volume instead of ConfigMap `stash-sample-data`. -Below, the YAML for `stash-demo` deployment with `stash-recovered` pvc as `source-data` volume. +Below, the YAML for `stash-demo` deployment with `stash-recovered` PVC as `source-data` volume. ```yaml apiVersion: apps/v1 @@ -454,7 +465,7 @@ spec: Let's create the deployment, ```console -$ kubectl apply -f ./docs/examples/platforms/eks/recovered-deployment.yaml +$ kubectl apply -f ./docs/examples/platforms/eks/recovered-deployment.yaml deployment.apps/stash-demo created ``` @@ -497,4 +508,4 @@ $ kubectl delete repository -n demo deployment.stash-demo $ kubectl delete ns demo ``` -To uninstall Stash from your cluster, follow the instructions from [here](/docs/setup/uninstall.md). +- To uninstall Stash from your cluster, follow the instructions from [here](/docs/setup/uninstall.md). diff --git a/docs/guides/platforms/gke.md b/docs/guides/platforms/gke.md index 817f012d8..ff70653cf 100644 --- a/docs/guides/platforms/gke.md +++ b/docs/guides/platforms/gke.md @@ -15,22 +15,23 @@ menu_name: product_stash_0.7.0 # Using Stash with Google Kubernetes Engine (GKE) -This tutorial will show you how to use Stash to **backup** and **restore** a Kubernetes deployment in [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/). Here, we are going to backup the `/source/data` folder of a busybox pod into [GCS bucket](/docs/guides/backends.md#google-cloud-storage-gcs). Then, we will show how to recover this data into a `gcePersistentDisk` and `PersistentVolumeClaim`. We will also re-deploy deployment using this recovered volume. +This tutorial will show you how to use Stash to **backup** and **restore** a Kubernetes deployment in [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/). Here, we are going to back up the `/source/data` folder of a busybox pod into [GCS bucket](/docs/guides/backends.md#google-cloud-storage-gcs). Then, we are going to show how to recover this data into a `gcePersistentDisk` and `PersistentVolumeClaim`. We are going to also re-deploy deployment using this recovered volume. ## Before You Begin -At first, you need to have a Kubernetes cluster in Google Cloud Platform. If you don't already have a cluster, you can create one from [here](https://console.cloud.google.com/kubernetes). Now, install Stash in your cluster following the steps [here](/docs/setup/install.md). +At first, you need to have a Kubernetes cluster in Google Cloud Platform. If you don't already have a cluster, you can create one from [here](https://console.cloud.google.com/kubernetes). -You should have understanding the following Stash concepts: +- Install Stash in your cluster following the steps [here](/docs/setup/install.md). -- [Restic](/docs/concepts/crds/restic.md) -- [Repository](/docs/concepts/crds/repository.md) -- [Recovery](/docs/concepts/crds/recovery.md) -- [Snapshot](/docs/concepts/crds/snapshot.md) +- You should be familiar with the following Stash concepts: + - [Restic](/docs/concepts/crds/restic.md) + - [Repository](/docs/concepts/crds/repository.md) + - [Recovery](/docs/concepts/crds/recovery.md) + - [Snapshot](/docs/concepts/crds/snapshot.md) -Then, you will need to have a [GCS Bucket](https://console.cloud.google.com/storage) and [GCE persistent disk](https://console.cloud.google.com/compute/disks). GCE persistent disk must be in the same GCE project and zone as the cluster. +- You will need a [GCS Bucket](https://console.cloud.google.com/storage) and [GCE persistent disk](https://console.cloud.google.com/compute/disks). GCE persistent disk must be in the same GCE project and zone as the cluster. -To keep things isolated, we will use a separate namespace called `demo` throughout this tutorial. +To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. ```console $ kubectl create ns demo @@ -41,7 +42,7 @@ namespace/demo created ## Backup -In order to take backup, we need some sample data. Stash has some sample data in [stash-data](https://github.com/appscode/stash-data) repository. As [gitRepo](https://kubernetes.io/docs/concepts/storage/volumes/#gitrepo) volume has been deprecated, we will not use this repository as volume directly. Instead, we will create a [configMap](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) from these data and use that ConfigMap as data source. +In order to take backup, we need some sample data. Stash has some sample data in [stash-data](https://github.com/appscode/stash-data) repository. As [gitRepo](https://kubernetes.io/docs/concepts/storage/volumes/#gitrepo) volume has been deprecated, we are not going to use this repository as volume directly. Instead, we are going to create a [configMap](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) from the stash-data repository and use that ConfigMap as data source. Let's create a ConfigMap from these sample data, @@ -120,9 +121,17 @@ README.md Now, we are ready backup `/source/data` directory into a [GCS bucket](/docs/guides/backends.md#google-cloud-storage-gcs), -**Create Restic:** +**Create Secret:** + +At first, we need to create a storage secret that hold the credentials for the backend. To configure this backend, the following secret keys are needed: + +| Key | Description | +| --------------------------------- | ---------------------------------------------------------- | +| `RESTIC_PASSWORD` | `Required`. Password used to encrypt snapshots by `restic` | +| `GOOGLE_PROJECT_ID` | `Required`. Google Cloud project ID | +| `GOOGLE_SERVICE_ACCOUNT_JSON_KEY` | `Required`. Google Cloud service account json key | -At first, we need to create a secret for `Restic` crd. Create a secret for `Restic` using following commands, +Create storage secret as below, ```console $ echo -n 'changeit' > RESTIC_PASSWORD @@ -158,6 +167,8 @@ metadata: type: Opaque ``` +**Create Restic:** + Now, we can create `Restic` crd. This will create a repository in the GCS bucket specified in `gcs.bucket` field and start taking periodic backup of `/source/data` directory. ```console @@ -203,7 +214,7 @@ NAME READY STATUS RESTARTS AGE stash-demo-6b8c94cdd7-8jhtn 2/2 Running 1 1h ``` -Look at the pod. It now has 2 containers. If you view the YAML of this pod, you will see there is a container named `stash` which takes backup +Look at the pod. It now has 2 containers. If you view the resource definition of this pod, you will see there is a container named `stash` which running `backup` command. **Verify Backup:** @@ -215,6 +226,8 @@ NAME BACKUPCOUNT LASTSUCCESSFULBACKUP AGE deployment.stash-demo 1 13s 1m ``` +Here, `BACKUPCOUNT` field indicates number of backup snapshot has taken in this repository. + `Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup is taking successfully by, ```console @@ -241,7 +254,7 @@ To view the snapshot files, navigate to `snapshots` directory of the repository, ## Recovery -Now, consider that we have lost our workload as well as data volume. We want to recover the data into a new volume and re-deploy the workload. In this section, we will see how to recover data into a [gcePersistentDisk](https://kubernetes.io/docs/concepts/storage/volumes/#gcepersistentdisk) and [persistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim). +Now, consider that we have lost our workload as well as data volume. We want to recover the data into a new volume and re-deploy the workload. In this section, we are going to see how to recover data into a [gcePersistentDisk](https://kubernetes.io/docs/concepts/storage/volumes/#gcepersistentdisk) and [persistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim). At first, let's delete `Restic` crd, `stash-demo` deployment and `stash-sample-data` ConfigMap. @@ -262,7 +275,7 @@ In order to perform recovery, we need `Repository` crd `deployment.stah-demo` an ### Recover to GCE Persistent Disk -Now, we will recover the backed up data into GCE Persistent Disk. At first, create a GCE disk named `stash-recovered` from [Google cloud console](https://console.cloud.google.com/compute/disks). Then create `Recovery` crd, +Now, we are going to recover the backed up data into GCE Persistent Disk. At first, create a GCE disk named `stash-recovered` from [Google cloud console](https://console.cloud.google.com/compute/disks). Then create `Recovery` crd, ```console $ kubectl apply -f ./docs/examples/platforms/gke/recovery-gcePD.yaml @@ -290,7 +303,7 @@ spec: fsType: ext4 ``` -Wait until `Recovery` job completes its task. To verify that recovery is completed successfully run, +Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, ```console $ kubectl get recovery -n demo gcs-recovery @@ -300,9 +313,15 @@ gcs-recovery demo deployment.stash-demo Succee Here, `PHASE` `Succeeded` indicate that our recovery has been completed successfully. Backup data has been restored in `stash-recovered` Persistent Disk. Now, we are ready to use this Persistent Disk to re-deploy workload. +If you are using Kubernetes version older than v1.11.0 then run following command and check `status.phase` field to see whether the recovery succeeded or failed. + +```console +$ kubectl get recovery -n demo rook-recovery -o yaml +``` + **Re-deploy Workload:** -We have successfully restored backup data into `stash-recovered` gcePersistentDisk. Now, we will re-deploy our previous deployment `stash-demo`. This time, we will mount the `stash-recovered` Persistent Disk as `source-data` volume instead of ConfigMap `stash-sample-data`. +We have successfully restored backup data into `stash-recovered` gcePersistentDisk. Now, we are going to re-deploy our previous deployment `stash-demo`. This time, we are going to mount the `stash-recovered` Persistent Disk as `source-data` volume instead of ConfigMap `stash-sample-data`. Below, the YAML for `stash-demo` deployment with `stash-recovered` persistent disk as `source-data` volume. @@ -378,7 +397,7 @@ So, we can see that the data we had backed up from original deployment are now p ### Recover to `PersistentVolumeClaim` -Here, we will show how to recover the backed up data into a PVC. If you have re-deployed `stash-demo` deployment by following previous tutorial on `gcePersistentDisk`, delete the deployment first, +Here, we are going to show how to recover the backed up data into a PVC. If you have re-deployed `stash-demo` deployment by following previous tutorial on `gcePersistentDisk`, delete the deployment first, ```console $ kubectl delete deployment -n demo stash-demo @@ -450,7 +469,7 @@ spec: claimName: stash-recovered ``` -Wait until `Recovery` job completes its task. To verify that recovery is completed successfully run, +Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, ```console $ kubectl get recovery -n demo gcs-recovery @@ -462,9 +481,9 @@ Here, `PHASE` `Succeeded` indicate that our recovery has been completed successf **Re-deploy Workload:** -We have successfully restored backup data into `stash-recovered` PVC. Now, we will re-deploy our previous deployment `stash-demo`. This time, we will mount the `stash-recovered` PVC as `source-data` volume instead of ConfigMap `stash-sample-data`. +We have successfully restored backup data into `stash-recovered` PVC. Now, we are going to re-deploy our previous deployment `stash-demo`. This time, we are going to mount the `stash-recovered` PVC as `source-data` volume instead of ConfigMap `stash-sample-data`. -Below, the YAML for `stash-demo` deployment with `stash-recovered` pvc as `source-data` volume. +Below, the YAML for `stash-demo` deployment with `stash-recovered` PVC as `source-data` volume. ```yaml apiVersion: apps/v1 @@ -548,4 +567,4 @@ $ kubectl delete repository -n demo deployment.stash-demo $ kubectl delete ns demo ``` -To uninstall Stash from your cluster, follow the instructions from [here](/docs/setup/uninstall.md). +- To uninstall Stash from your cluster, follow the instructions from [here](/docs/setup/uninstall.md). diff --git a/docs/guides/platforms/minio.md b/docs/guides/platforms/minio.md new file mode 100644 index 000000000..60a964ba1 --- /dev/null +++ b/docs/guides/platforms/minio.md @@ -0,0 +1,463 @@ +--- +title: Minio | Stash +description: Using Stash with TLS secured Minio Server +menu: + product_stash_0.7.0: + identifier: platforms-minio + name: Minio + parent: platforms + weight: 40 +product_name: stash +menu_name: product_stash_0.7.0 +--- + +> New to Stash? Please start [here](/docs/concepts/README.md). + +# Using Stash with TLS secured Minio Server + +[Minio](https://minio.io/) is an open source object storage server compatible with AWS S3 cloud storage service. This tutorial will show you how to use Stash to **backup** and **restore** a volume with a Minio backend. Here, we are going to backup the `/source/data` folder of a busybox pod into a Minio bucket. Then, we are going to show how to recover this data into a `PersistentVolumeClaim(PVC)`. We are going to also re-deploy deployment using this recovered volume. + +## Before You Begin + +At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [Minikube](https://github.com/kubernetes/minikube). + +- Install `Stash` in your cluster following the steps [here](/docs/setup/install.md). + +- You should be familiar with the following Stash concepts: + - [Restic](/docs/concepts/crds/restic.md) + - [Repository](/docs/concepts/crds/repository.md) + - [Recovery](/docs/concepts/crds/recovery.md) + - [Snapshot](/docs/concepts/crds/snapshot.md) + +- You will need a TLS secured [Minio](https://docs.minio.io/) server to store backed up data. If you already do not have a Minio server running, deploy one following the tutorial from [here](https://github.com/appscode/third-party-tools/blob/master/storage/minio/README.md). For this tutorial, we have deployed Minio server in `storage` namespace and it is accessible through `minio.storage.svc` dns. + +To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```console +$ kubectl create ns demo +namespace/demo created +``` + +>Note: YAML files used in this tutorial are stored in [/docs/examples/platforms/minio](/docs/examples/platforms/minio) directory of [appscode/stash](https://github.com/appscode/stash) repository. + +## Backup + +In order to take backup, we need some sample data. Stash has some sample data in [stash-data](https://github.com/appscode/stash-data) repository. As [gitRepo](https://kubernetes.io/docs/concepts/storage/volumes/#gitrepo) volume has been deprecated, we are not going to use this repository as volume directly. Instead, we are going to create a [configMap](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) from the stash-data repository and use that ConfigMap as data source. + +Let's create a ConfigMap from these sample data, + +```console +$ kubectl create configmap -n demo stash-sample-data \ + --from-literal=LICENSE="$(curl -fsSL https://raw.githubusercontent.com/appscode/stash-data/master/LICENSE)" \ + --from-literal=README.md="$(curl -fsSL https://raw.githubusercontent.com/appscode/stash-data/master/README.md)" +configmap/stash-sample-data created +``` + +**Deploy Workload:** + +Now, deploy the following Deployment. Here, we have mounted the ConfigMap `stash-sample-data` as data source volume. + +Below, the YAML for the Deployment we are going to create. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: stash-demo + name: stash-demo + namespace: demo +spec: + replicas: 1 + selector: + matchLabels: + app: stash-demo + template: + metadata: + labels: + app: stash-demo + name: busybox + spec: + containers: + - args: + - sleep + - "3600" + image: busybox + imagePullPolicy: IfNotPresent + name: busybox + volumeMounts: + - mountPath: /source/data + name: source-data + restartPolicy: Always + volumes: + - name: source-data + configMap: + name: stash-sample-data +``` + +Let's create the deployment we have shown above, + +```console +$ kubectl apply -f ./docs/examples/platforms/minio/deployment.yaml +deployment.apps/stash-demo created +``` + +Now, wait for deployment's pod to go in `Running` state. + +```console +$ kubectl get pod -n demo -l app=stash-demo +NAME READY STATUS RESTARTS AGE +stash-demo-7ccd56bf5d-n24vl 1/1 Running 0 16s +``` + +You can check that the `/source/data/` directory of this pod is populated with data from the `stash-sample-data` ConfigMap using this command, + +```console +$ kubectl exec -n demo stash-demo-7ccd56bf5d-n24vl -- ls -R /source/data +/source/data: +LICENSE +README.md +``` + +Now, we are ready to backup `/source/data` directory into a Minio bucket. + +**Create Secret:** + +At first, we need to create a secret for `Restic` crd. To configure this backend, the following secret keys are needed: + +| Key | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------- | +| `RESTIC_PASSWORD` | `Required`. Password used to encrypt snapshots by `restic` | +| `AWS_ACCESS_KEY_ID` | `Required`. Minio access key | +| `AWS_SECRET_ACCESS_KEY` | `Required`. Minio secret access key | +| `CA_CERT_DATA` | `Required` for TLS secured Minio server. Root certificate by which Minio server certificate is signed | + +Create the secret as below, + +```console +$ echo -n 'changeit' > RESTIC_PASSWORD +$ echo -n '' > AWS_ACCESS_KEY_ID +$ echo -n '' > AWS_SECRET_ACCESS_KEY +$ cat ./directory/of/root/certificate/ca.crt > CA_CERT_DATA +$ kubectl create secret generic -n demo minio-secret \ + --from-file=./RESTIC_PASSWORD \ + --from-file=./AWS_ACCESS_KEY_ID \ + --from-file=./AWS_SECRET_ACCESS_KEY \ + --from-file=./CA_CERT_DATA +secret/minio-secret created +``` + +Verify that the secret has been created successfully, + +```console +$ kubectl get secret -n demo minio-secret -o yaml +``` + +```yaml +apiVersion: v1 +data: + AWS_ACCESS_KEY_ID: bXktYWNjZXNzLWtleQ== + AWS_SECRET_ACCESS_KEY: bXktc2NyZXQta2V5 + CA_CERT_DATA: + RESTIC_PASSWORD: Y2hhbmdlaXQ= +kind: Secret +metadata: + creationTimestamp: 2018-12-05T11:42:48Z + name: minio-secret + namespace: demo + resourceVersion: "36660" + selfLink: /api/v1/namespaces/demo/secrets/minio-secret + uid: e3a2f905-f882-11e8-9a81-0800272171a4 +type: Opaque +``` + +**Create Restic:** + +Now, we are going to create `Restic` crd to take backup `/source/data` directory of `stash-demo` deployment. This will create a repository in the Minio bucket specified by `s3.bucket` field and start taking periodic backup of `/source/data` directory. + +```console +$ kubectl apply -f ./docs/examples/platforms/minio/restic.yaml +restic.stash.appscode.com/minio-restic created +``` + +Below, the YAML for Restic crd we have created above, + +```yaml +apiVersion: stash.appscode.com/v1alpha1 +kind: Restic +metadata: + name: minio-restic + namespace: demo +spec: + selector: + matchLabels: + app: stash-demo + fileGroups: + - path: /source/data + retentionPolicyName: 'keep-last-5' + backend: + s3: + endpoint: 'https://minio.storage.svc' + bucket: stash-repo + prefix: demo + storageSecretName: minio-secret + schedule: '@every 1m' + volumeMounts: + - mountPath: /source/data + name: source-data + retentionPolicies: + - name: 'keep-last-5' + keepLast: 5 + prune: true +``` + +If everything goes well, Stash will inject a sidecar container into the `stash-demo` deployment to take periodic backup. Let's check that sidecar has been injected successfully, + +```console +$ kubectl get pod -n demo -l app=stash-demo +NAME READY STATUS RESTARTS AGE +stash-demo-57656f6d74-hmc9z 2/2 Running 0 46s +``` + +Look at the pod. It now has 2 containers. If you view the resource definition of this pod, you will see that there is a container named `stash` which running `backup` command. + +**Verify Backup:** + +Stash will create a `Repository` crd with name `deployment.stash-demo` for the respective repository in Minio backend at first backup schedule. To verify, run the following command, + +```console +$ kubectl get repository deployment.stash-demo -n demo +NAME BACKUPCOUNT LASTSUCCESSFULBACKUP AGE +deployment.stash-demo 1 14s 1m +``` + +Here, `BACKUPCOUNT` field indicates number of backup snapshot has taken in this repository. + +`Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup snapshots has been created successfully by, + +```console +$ kubectl get snapshots -n demo -l repository=deployment.stash-demo +NAME AGE +deployment.stash-demo-c588c67c 4m3s +deployment.stash-demo-7dc0c17d 3m3s +deployment.stash-demo-21228047 2m3s +deployment.stash-demo-15873428 63s +deployment.stash-demo-a29263b1 3s +``` + +Here, we can see 5 last successful backup [Snapshot](/docs/concepts/crds/snapshot.md) taken by Stash in `deployment.stash-demo` repository. + +If you navigate to `/demo/deployment/stash-demo` directory in your Minio Web UI. You will see, a repository has been created there. + +

+  Repository in Minio Backend +

+ +To view the snapshot files, navigate to `snapshots` directory of the repository, + +

+  Snapshot in Minio Bucket +

+ +> Stash keeps all backup data encrypted. So, snapshot files in the bucket will not contain any meaningful data until they are decrypted. + +## Recovery + +Now, consider that we have lost our workload as well as data volume. We want to recover the data into a new volume and re-deploy the workload. + +At first, let's delete `Restic` crd, `stash-demo` deployment and `stash-sample-data` ConfigMap. + +```console +$ kubectl delete deployment -n demo stash-demo +deployment.extensions "stash-demo" deleted + +$ kubectl delete restic -n demo minio-restic +restic.stash.appscode.com "minio-restic" deleted + +$ kubectl delete configmap -n demo stash-sample-data +configmap "stash-sample-data" deleted +``` + +In order to perform recovery, we need `Repository` crd `deployment.stah-demo` and backend secret `minio-secret` to exist. + +>In case of cluster disaster, you might lose `Repository` crd and backend secret. In this scenario, you have to create the secret again and `Repository` crd manually. Follow the guide to understand `Repository` crd structure from [here](/concepts/crds/repository.md). + +**Create PVC:** + +We will recover backed up data into a PVC. At first, we need to know available [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) in our cluster. + +```console +$ kubectl get storageclass +NAME PROVISIONER AGE +standard (default) k8s.io/minikube-hostpath 8h +``` + +Now, let's create a `PersistentVolumeClaim` where our recovered data will be stored. + +```console +$ kubectl apply -f ./docs/examples/platforms/minio/pvc.yaml +persistentvolumeclaim/stash-recovered created +``` + +Below the YAML for `PersistentVolumeClaim` we have created above, + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: stash-recovered + namespace: demo + labels: + app: stash-demo +spec: + storageClassName: standard + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Mi +``` + +Check that if cluster has provisioned the requested claim, + +```console +$ kubectl get pvc -n demo -l app=stash-demo +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +stash-recovered Bound pvc-3d3b6a58-f886-11e8-9a81-0800272171a4 50Mi RWO standard 13s +``` + +Look at the `STATUS` filed. `stash-recovered` PVC is bounded to volume `pvc-3d3b6a58-f886-11e8-9a81-0800272171a4`. + +**Create Recovery:** + +Now, we have to create a `Recovery` crd to recover backed up data into this PVC. + +```console +$ kubectl apply -f ./docs/examples/platforms/minio/recovery.yaml +recovery.stash.appscode.com/minio-recovery created +``` + +Below, the YAML for `Recovery` crd we have created above. + +```yaml +apiVersion: stash.appscode.com/v1alpha1 +kind: Recovery +metadata: + name: minio-recovery + namespace: demo +spec: + repository: + name: deployment.stash-demo + namespace: demo + paths: + - /source/data + recoveredVolumes: + - mountPath: /source/data + persistentVolumeClaim: + claimName: stash-recovered +``` + +Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, + +```console +$ kubectl get recovery -n demo minio-recovery +NAME REPOSITORYNAMESPACE REPOSITORYNAME SNAPSHOT PHASE AGE +minio-recovery demo deployment.stash-demo Succeeded 26s +``` + +Here, `PHASE` `Succeeded` indicates that our recovery has been completed successfully. Backup data has been restored in `stash-recovered` PVC. Now, we are ready to use this PVC to re-deploy the workload. + +If you are using Kubernetes version older than v1.11.0 then run following command and check `status.phase` field to see whether the recovery succeeded or failed. + +```console +$ kubectl get recovery -n demo minio-recovery -o yaml +``` + +**Re-deploy Workload:** + +We have successfully restored backed up data into `stash-recovered` PVC. Now, we are going to re-deploy our previous deployment `stash-demo`. This time, we are going to mount the `stash-recovered` PVC as `source-data` volume instead of ConfigMap `stash-sample-data`. + +Below, the YAML for `stash-demo` deployment with `stash-recovered` PVC as `source-data` volume. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: stash-demo + name: stash-demo + namespace: demo +spec: + replicas: 1 + selector: + matchLabels: + app: stash-demo + template: + metadata: + labels: + app: stash-demo + name: busybox + spec: + containers: + - args: + - sleep + - "3600" + image: busybox + imagePullPolicy: IfNotPresent + name: busybox + volumeMounts: + - mountPath: /source/data + name: source-data + restartPolicy: Always + volumes: + - name: source-data + persistentVolumeClaim: + claimName: stash-recovered +``` + +Let's create the deployment, + +```console +$ kubectl apply -f ./docs/examples/platforms/minio/recovered-deployment.yaml +deployment.apps/stash-demo created +``` + +**Verify Recovered Data:** + +We have re-deployed `stash-demo` deployment with recovered volume. Now, it is time to verify that the recovered data are present in `/source/data` directory. + +Get the pod of new deployment, + +```console +$ kubectl get pod -n demo -l app=stash-demo +NAME READY STATUS RESTARTS AGE +stash-demo-69694789df-wq9vc 1/1 Running 0 14s +``` + +Run following command to view data of `/source/data` directory of this pod, + +```console +$ kubectl exec -n demo stash-demo-69694789df-wq9vc -- ls -R /source/data +/source/data: +LICENSE +README.md +``` + +So, we can see that the data we had backed up from original deployment are now present in re-deployed deployment. + +## Cleanup + +To cleanup the resources created by this tutorial, run following commands: + +```console +$ kubectl delete recovery -n demo minio-recovery +$ kubectl delete secret -n demo minio-secret +$ kubectl delete deployment -n demo stash-demo +$ kubectl delete pvc -n demo stash-recovered +$ kubectl delete repository -n demo deployment.stash-demo + +$ kubectl delete ns demo +``` + +- To uninstall Stash from your cluster, follow the instructions from [here](/docs/setup/uninstall.md). +- If you have deployed Minio server by following the tutorial we have linked in [Before You Begin](/docs/guides/platforms/minio.md#before-you-begin) section, please clean-up Minio resources by following the [cleanup](https://github.com/appscode/third-party-tools/blob/master/storage/minio/README.md#cleanup) section of that tutorial. \ No newline at end of file diff --git a/docs/guides/platforms/rook.md b/docs/guides/platforms/rook.md new file mode 100644 index 000000000..e5ab01f8d --- /dev/null +++ b/docs/guides/platforms/rook.md @@ -0,0 +1,452 @@ +--- +title: Rook | Stash +description: Using Stash with Rook Storage Service +menu: + product_stash_0.7.0: + identifier: platforms-rook + name: Rook + parent: platforms + weight: 50 +product_name: stash +menu_name: product_stash_0.7.0 +--- + +> New to Stash? Please start [here](/docs/concepts/README.md). + +# Using Stash with Rook Storage Service + +This tutorial will show you how to use Stash to **backup** and **restore** a Kubernetes volume in [Rook](https://rook.io/) storage service. Here, we are going to backup the `/source/data` folder of a busybox pod into [AWS S3](/docs/guides/backends.md#aws-s3) compatible [Rook Object Storage](https://rook.io/docs/rook/master/object.html). Then, we are going to show how to recover this data into a `PersistentVolumeClaim` of [Rook Block Storage](https://rook.io/docs/rook/master/block.html). We are going to also re-deploy deployment using this recovered volume. + +## Before You Begin + +At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [Minikube](https://github.com/kubernetes/minikube). + +- Install `Stash` in your cluster following the steps [here](/docs/setup/install.md). + +- You should be familiar with the following Stash concepts: + - [Restic](/docs/concepts/crds/restic.md) + - [Repository](/docs/concepts/crds/repository.md) + - [Recovery](/docs/concepts/crds/recovery.md) + - [Snapshot](/docs/concepts/crds/snapshot.md) + +- You will need a [Rook Storage Service](https://rook.io) with [Object Storage](https://rook.io/docs/rook/master/object.html) and [Block Storage](https://rook.io/docs/rook/master/block.html) configured. If you do not already have a **Rook Storage Service** configured, you can create one by following this [quickstart guide](https://rook.io/docs/rook/master/quickstart.html). + +To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```console +$ kubectl create ns demo +namespace/demo created +``` + +>Note: YAML files used in this tutorial are stored in [/docs/examples/platforms/rook](/docs/examples/platforms/rook) directory of [appscode/stash](https://github.com/appscode/stash) repository. + +## Backup + +In order to take backup, we need some sample data. Stash has some sample data in [stash-data](https://github.com/appscode/stash-data) repository. As [gitRepo](https://kubernetes.io/docs/concepts/storage/volumes/#gitrepo) volume has been deprecated, we are not going to use this repository as volume directly. Instead, we are going to create a [configMap](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) from the stash-data repository and use that ConfigMap as data source. + +Let's create a ConfigMap from these sample data, + +```console +$ kubectl create configmap -n demo stash-sample-data \ + --from-literal=LICENSE="$(curl -fsSL https://raw.githubusercontent.com/appscode/stash-data/master/LICENSE)" \ + --from-literal=README.md="$(curl -fsSL https://raw.githubusercontent.com/appscode/stash-data/master/README.md)" +configmap/stash-sample-data created +``` + +**Deploy Workload:** + +Now, deploy the following Deployment. Here, we have mounted the ConfigMap `stash-sample-data` as data source volume. + +Below, the YAML for the Deployment we are going to create. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: stash-demo + name: stash-demo + namespace: demo +spec: + replicas: 1 + selector: + matchLabels: + app: stash-demo + template: + metadata: + labels: + app: stash-demo + name: busybox + spec: + containers: + - args: + - sleep + - "3600" + image: busybox + imagePullPolicy: IfNotPresent + name: busybox + volumeMounts: + - mountPath: /source/data + name: source-data + restartPolicy: Always + volumes: + - name: source-data + configMap: + name: stash-sample-data +``` + +Let's create the deployment we have shown above, + +```console +$ kubectl apply -f ./docs/examples/platforms/rook/deployment.yaml +deployment.apps/stash-demo created +``` + +Now, wait for deployment's pod to go in `Running` state. + +```console +$ kubectl get pod -n demo -l app=stash-demo +NAME READY STATUS RESTARTS AGE +stash-demo-7ccd56bf5d-fm74f 1/1 Running 0 18s +``` + +You can check that the `/source/data/` directory of this pod is populated with data from the `stash-sample-data` ConfigMap using this command, + +```console +$ kubectl exec -n demo stash-demo-7ccd56bf5d-fm74f -- ls -R /source/data +/source/data: +LICENSE +README.md +``` + +Now, we are ready to backup `/source/data` directory into a Rook bucket. + +**Create Secret:** + +At first, we need to create a secret for `Restic` crd. To configure this backend, the following secret keys are needed: + +| Key | Description | +| ----------------------- | ---------------------------------------------------------- | +| `RESTIC_PASSWORD` | `Required`. Password used to encrypt snapshots by `restic` | +| `AWS_ACCESS_KEY_ID` | `Required`. Rook access key | +| `AWS_SECRET_ACCESS_KEY` | `Required`. Rook secret key | + +Create the secret as below, + +```console +$ echo -n 'changeit' > RESTIC_PASSWORD +$ echo -n '' > AWS_ACCESS_KEY_ID +$ echo -n '' > AWS_SECRET_ACCESS_KEY +$ kubectl create secret generic -n demo rook-secret \ + --from-file=./RESTIC_PASSWORD \ + --from-file=./AWS_ACCESS_KEY_ID \ + --from-file=./AWS_SECRET_ACCESS_KEY +secret/rook-secret created +``` + +Verify that the secret has been created successfully, + +```console +$ kubectl get secret -n demo rook-secret -o yaml +``` + +```yaml +apiVersion: v1 +data: + AWS_ACCESS_KEY_ID: UlhSQ0oyVjRZNlpFQUlBV0UyTEc= + AWS_SECRET_ACCESS_KEY: YWVtZG9IZ1g3UXBUSzF0VXpPZHVJcUNPb01sc1cwZlZES0RRaXM2MA== + RESTIC_PASSWORD: Y2hhbmdlaXQ= +kind: Secret +metadata: + creationTimestamp: 2018-12-06T07:24:58Z + name: rook-secret + namespace: demo + resourceVersion: "4680" + selfLink: /api/v1/namespaces/demo/secrets/rook-secret + uid: 0958c36c-f928-11e8-998e-080027a2d1ee +type: Opaque +``` + +**Create Restic:** + +Now, we are going to create `Restic` crd to take backup `/source/data` directory of `stash-demo` deployment. This will create a repository in the Rook bucket specified by `s3.bucket` field and start taking periodic backup of `/source/data` directory. + +```console +$ kubectl apply -f ./docs/examples/platforms/rook/restic.yaml +restic.stash.appscode.com/rook-restic created +``` + +Below, the YAML for Restic crd we have created above, + +```yaml +apiVersion: stash.appscode.com/v1alpha1 +kind: Restic +metadata: + name: rook-restic + namespace: demo +spec: + selector: + matchLabels: + app: stash-demo # Must match with the label of pod we want to backup. + fileGroups: + - path: /source/data + retentionPolicyName: 'keep-last-5' + backend: + s3: + endpoint: 'http://rook-ceph-rgw-my-store.rook-ceph.svc' # Use your own rook object storage endpoint. + bucket: stash-backup # Give a name of the bucket where you want to backup. + prefix: demo # A prefix for the directory where repository will be created.(optional). + storageSecretName: rook-secret + schedule: '@every 1m' + volumeMounts: + - mountPath: /source/data + name: source-data + retentionPolicies: + - name: 'keep-last-5' + keepLast: 5 + prune: true +``` + +If everything goes well, `Stash` will inject a sidecar container into the `stash-demo` deployment to take periodic backup. Let's check that sidecar has been injected successfully, + +```console +$ kubectl get pod -n demo -l app=stash-demo +NAME READY STATUS RESTARTS AGE +stash-demo-6c9cd4cf4c-bn5wm 2/2 Running 0 53s +``` + +Look at the pod. It now has 2 containers. If you view the resource definition of this pod, you will see that there is a container named `stash` which running `backup` command. + +**Verify Backup:** + +Stash will create a `Repository` crd with name `deployment.stash-demo` for the respective repository in Rook backend at first backup schedule. To verify, run the following command, + +```console +$ kubectl get repository deployment.stash-demo -n demo +NAME BACKUPCOUNT LASTSUCCESSFULBACKUP AGE +deployment.stash-demo 1 41s 1m +``` + +Here, `BACKUPCOUNT` field indicates number of backup snapshot has taken in this repository. + +`Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup snapshots has been created successfully by, + +```console +$ kubectl get snapshots -n demo -l repository=deployment.stash-demo +NAME AGE +NAME AGE +deployment.stash-demo-2960b90e 4m3s +deployment.stash-demo-79626d95 3m3s +deployment.stash-demo-6c5eb448 2m3s +deployment.stash-demo-05761ab3 63s +deployment.stash-demo-f8937bdf 2s +``` + +Here, we can see 5 last successful backup [Snapshot](/docs/concepts/crds/snapshot.md) taken by Stash in `deployment.stash-demo` repository. + +## Recovery + +Now, consider that we have lost our workload as well as data volume. We want to recover the data into a new volume and re-deploy the workload. + +At first, let's delete `Restic` crd, `stash-demo` deployment and `stash-sample-data` ConfigMap. + +```console +$ kubectl delete deployment -n demo stash-demo +deployment.extensions "stash-demo" deleted + +$ kubectl delete restic -n demo rook-restic +restic.stash.appscode.com "rook-restic" deleted + +$ kubectl delete configmap -n demo stash-sample-data +configmap "stash-sample-data" deleted +``` + +In order to perform recovery, we need `Repository` crd `deployment.stah-demo` and backend secret `rook-secret` to exist. + +>In case of cluster disaster, you might lose `Repository` crd and backend secret. In this scenario, you have to create the secret again and `Repository` crd manually. Follow the guide to understand `Repository` crd structure from [here](/concepts/crds/repository.md). + +**Create PVC:** + +We are going to recover our backed up data into a PVC. [Rook Block Storage](https://rook.io/docs/rook/master/block.html) allows mounting Rook storage into pod using a `PersistentVolumeClaim`. At first, we need to know respective [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) for Rook Block Storage. + +```console +$ kubectl get storageclass +NAME PROVISIONER AGE +rook-ceph-block ceph.rook.io/block 96m +standard (default) k8s.io/minikube-hostpath 124m +``` + +Here, `rook-ceph-block` storage class is responsible for provisioning the PVC from Rook Block Storage. + +Let's create a `PersistentVolumeClaim` with `rook-ceph-block` storage class where our recovered data will be stored. + +```console +$ kubectl apply -f ./docs/examples/platforms/rook/rook-pvc.yaml +persistentvolumeclaim/stash-recovered created +``` + +Below the YAML for `PersistentVolumeClaim` we have created above, + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: stash-recovered + namespace: demo + labels: + app: stash-demo +spec: + storageClassName: rook-ceph-block + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Mi +``` + +Check that if cluster has provisioned the requested claim, + +```console +$ kubectl get pvc -n demo -l app=stash-demo +kubectl get pvc -n demo -l app=stash-demo +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +stash-recovered Bound pvc-dd0739b2-f934-11e8-998e-080027a2d1ee 50Mi RWO rook-ceph-block 46s +``` + +Look at the `STATUS` filed. `stash-recovered` PVC is bounded to volume `pvc-dd0739b2-f934-11e8-998e-080027a2d1ee`. + +**Create Recovery:** + +Now, we have to create a `Recovery` crd to recover backed up data into this PVC. + +```console +$ kubectl apply -f ./docs/examples/platforms/rook/recovery.yaml +recovery.stash.appscode.com/rook-recovery created +``` + +Below, the YAML for `Recovery` crd we have created above. + +```yaml +apiVersion: stash.appscode.com/v1alpha1 +kind: Recovery +metadata: + name: rook-recovery + namespace: demo +spec: + repository: + name: deployment.stash-demo + namespace: demo + paths: + - /source/data + recoveredVolumes: + - mountPath: /source/data + persistentVolumeClaim: + claimName: stash-recovered +``` + +Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, + +```console +$ kubectl get recovery -n demo rook-recovery +NAME REPOSITORYNAMESPACE REPOSITORYNAME SNAPSHOT PHASE AGE +rook-recovery demo deployment.stash-demo Succeeded 26s +``` + +Here, `PHASE` `Succeeded` indicates that our recovery has been completed successfully. Backup data has been restored in `stash-recovered` PVC. Now, we are ready to use this PVC to re-deploy the workload. + +If you are using Kubernetes version older than v1.11.0 then run following command and check `status.phase` field to see whether the recovery succeeded or failed. + +```console +$ kubectl get recovery -n demo rook-recovery -o yaml +``` + +**Re-deploy Workload:** + +We have successfully restored backed up data into `stash-recovered` PVC. Now, we are going to re-deploy our previous deployment `stash-demo`. This time, we are going to mount the `stash-recovered` PVC as `source-data` volume instead of ConfigMap `stash-sample-data`. + +Below, the YAML for `stash-demo` deployment with `stash-recovered` PVC as `source-data` volume. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: stash-demo + name: stash-demo + namespace: demo +spec: + replicas: 1 + selector: + matchLabels: + app: stash-demo + template: + metadata: + labels: + app: stash-demo + name: busybox + spec: + containers: + - args: + - sleep + - "3600" + image: busybox + imagePullPolicy: IfNotPresent + name: busybox + volumeMounts: + - mountPath: /source/data + name: source-data + restartPolicy: Always + volumes: + - name: source-data + persistentVolumeClaim: + claimName: stash-recovered +``` + +Let's create the deployment, + +```console +$ kubectl apply -f ./docs/examples/platforms/rook/recovered-deployment.yaml +deployment.apps/stash-demo created +``` + +**Verify Recovered Data:** + +We have re-deployed `stash-demo` deployment with recovered volume. Now, it is time to verify that the recovered data are present in `/source/data` directory. + +Get the pod of new deployment, + +```console +$ kubectl get pod -n demo -l app=stash-demo +NAME READY STATUS RESTARTS AGE +stash-demo-69694789df-rsrz6 1/1 Running 0 15s +``` + +Run following command to view data of `/source/data` directory of this pod, + +```console +$ kubectl exec -n demo stash-demo-69694789df-rsrz6 -- ls -R /source/data +source/data: +LICENSE +README.md +lost+found + +/source/data/lost+found: +``` + +So, we can see that the data we had backed up from original deployment are now present in re-deployed deployment. + +## Cleanup + +To cleanup the resources created by this tutorial, run following commands: + +```console +$ kubectl delete recovery -n demo rook-recovery +$ kubectl delete secret -n demo rook-secret +$ kubectl delete deployment -n demo stash-demo +$ kubectl delete pvc -n demo stash-recovered +$ kubectl delete repository -n demo deployment.stash-demo + +$ kubectl delete ns demo +``` + +- To uninstall Stash from your cluster, follow the instructions from [here](/docs/setup/uninstall.md). \ No newline at end of file diff --git a/docs/guides/rbac.md b/docs/guides/rbac.md index 94a48d8f8..ca03fcc54 100644 --- a/docs/guides/rbac.md +++ b/docs/guides/rbac.md @@ -6,7 +6,7 @@ menu: identifier: rbac-stash name: RBAC parent: guides - weight: 40 + weight: 45 product_name: stash menu_name: product_stash_0.7.0 section_menu_id: guides @@ -46,5 +46,5 @@ You can find full working examples [here](/docs/guides/workloads.md). - To run backup in offline mode see [here](/docs/guides/offline_backup.md) - See the list of supported backends and how to configure them [here](/docs/guides/backends/overview.md). - See working examples for supported workload types [here](/docs/guides/workloads.md). -- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring.md). +- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring/overview.md). - Want to hack on Stash? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/restore.md b/docs/guides/restore.md index dd61309e3..fadceebd4 100644 --- a/docs/guides/restore.md +++ b/docs/guides/restore.md @@ -6,7 +6,7 @@ menu: identifier: restore-stash name: Restore Volumes parent: guides - weight: 25 + weight: 20 product_name: stash menu_name: product_stash_0.7.0 section_menu_id: guides @@ -14,32 +14,138 @@ section_menu_id: guides > New to Stash? Please start [here](/docs/concepts/README.md). -# Restore Backup -This tutorial will show you how to restore a Stash backup. At first, backup a Kubernetes workload volume by following the steps [here](/docs/guides/backup.md). +# Restore from Backup -To restore a backup, you need to create a `Recovery` CRD by specifying `Repository`, `path` and volume where the backup will be restored. Here, is a sample `Recovery` to recover the latest snapshot. +This tutorial will show you how to restore backed up volume using Stash. Here, we are going to recover backed up data into a PVC. Then, we are going to re-deploy the workload using the recovered volume. + +## Before You Begin + +To proceed with this tutorial, you have to meet following requirements: + +- At first, you need to have some backup taken by Stash. If you already don't have any backup repository, create one by following this [backup tutorial](/docs/guides/backup.md). + +- You need to have the storage `Secret` that was used to take backup. If you don't have the `Secret`, create one with valid credentials. + +- You need to have `Repository` crd that was created for the respective backup. If you have lost the `Repository` crd, you have to create it manually with respective backend information. Follow, [this guide](/docs/concepts/crds/repository.md) to understand structure of `Repository` crd. + +- You should be familiar with the following Stash concepts: + - [Repository](/docs/concepts/crds/repository.md) + - [Recovery](/docs/concepts/crds/recovery.md) + - [Snapshot](/docs/concepts/crds/snapshot.md) + +To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. Create the namespace if you haven't created yet. + +```console +$ kubectl create ns demo +namespace/demo created +``` + +>Note: YAML files used in this tutorial are stored in [/docs/examples/recovery](/docs/examples/recovery) directory of [appscode/stash](https://github.com/appscode/stash) repository. + +## Overview + +The following diagram shows how Stash recovers backed up data from a backend. Open the image in a new tab to see the enlarged image. + +

+  Stash Backup Flow +

+ +The volume recovery backup process consists of the following steps: + +1. A user creates a `Recovery` crd that specifies the target `Repository` from where he/she want to recover. It also specifies one or more volumes (`recoveredVolumes`) where the recovered data will be stored. +2. Stash operator watches for new `Recovery` crds. If it sees one, it checks if the referred `Repository` crd exists or not. +3. Then, Stash operator creates a `Job` to recover the backed up data. +4. The recovery `Job` reads the backend information from `Repository` crd and the backend credentials from the storage `Secret`. +5. Then, the recovery `Job` recovers data from the backend and stores it in the target volume. +6. Finally, the user mounts this recovered volume into the original workload and re-deploys it. + +## Recovery + +Now, we are going to recover backed up data from `deployment.stash-demo` Repository that was created while taking backup into a PVC named `stash-recovered`. + +At first, let's delete `Restic` crd so that it does not lock the repository while are recovering from it. Also, delete `stash-demo` deployment and `stash-sample-data` ConfigMap if you followed our backup guide. ```console -$ kubectl apply -f ./docs/examples/tutorial/recovery.yaml -recovery "stash-demo" created +$ kubectl delete deployment -n demo stash-demo +deployment.extensions "stash-demo" deleted + +$ kubectl delete restic -n demo local-restic +restic.stash.appscode.com "local-restic" deleted + +$ kubectl delete configmap -n demo stash-sample-data +configmap "stash-sample-data" deleted ``` +>Note: In order to perform recovery, we need `Repository` crd (in our case `deployment.stash-demo`) and backend secret (in our case `local-secret`). + +**Create PVC:** + +We will recover backed up data into a PVC. At first, we need to know available [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) in our cluster. + +```console +$ kubectl get storageclass +NAME PROVISIONER AGE +standard (default) k8s.io/minikube-hostpath 8h +``` + +Now, let's create a `PersistentVolumeClaim` where our recovered data will be stored. + +```console +$ kubectl apply -f ./docs/examples/recovery/pvc.yaml +persistentvolumeclaim/stash-recovered created +``` + +Here is the definition of the `PersistentVolumeClaim` we have created above, + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: stash-recovered + namespace: demo + labels: + app: stash-demo +spec: + storageClassName: standard + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Mi +``` + +Check whether cluster has provisioned the requested claim. + +```console +$ kubectl get pvc -n demo -l app=stash-demo +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +stash-recovered Bound pvc-e6ffface-fa01-11e8-8905-0800277ca39d 50Mi RWO standard 13s +``` + +Look at the `STATUS` filed. `stash-recovered` PVC is bounded to volume `pvc-e6ffface-fa01-11e8-8905-0800277ca39d`. + +**Create Recovery CRD:** + +Now, we have to create a `Recovery` crd to recover backed up data into this PVC. + +The resource definition of the `Recovery` crd we are going to create is below: + ```yaml apiVersion: stash.appscode.com/v1alpha1 kind: Recovery metadata: - name: stash-demo - namespace: default + name: local-recovery + namespace: demo spec: repository: name: deployment.stash-demo - namespace: default + namespace: demo paths: - /source/data recoveredVolumes: - mountPath: /source/data - hostPath: - path: /data/stash-test/restic-restored + persistentVolumeClaim: + claimName: stash-recovered ``` Here, @@ -49,95 +155,160 @@ Here, - `spec.paths` specifies the file-group paths that were backed up using `Restic`. - `spec.recoveredVolumes` indicates an array of volumes where snapshots will be recovered. Here, `mountPath` specifies where the volume will be mounted. Note that, `Recovery` recovers data in the same paths from where the backup was taken (specified in `spec.paths`). So, volumes must be mounted on those paths or their parent paths. ->Note that, here we have used `hostPath` as `recoveredVolumes` only for demonstration purpose. You can use `PVC`, `gcePersistentDisk` and other Kubernetes volumes to recover your data. Here are some examples, -> 1. [Recover to gcePersistentDisk](/docs/guides/gke.md#recover-to-gce-persistent-disk) -> 2. [Recover to PersistentVolumeClaim](/docs/guides/gke.md#recover-to-persistentvolumeclaim) -> 3. [Recover to Rook PVC](/docs/guides/rook.md#recover-to-persistentvolumeclaim) +Let's create the Recovery crd we have shown above, + +```console +$ kubectl apply -f ./docs/examples/recovery/recovery.yaml +recovery.stash.appscode.com/local-recovery created +``` -Stash operator watches for `Recovery` objects using Kubernetes api. It collects required snapshot information from the specified `Restic` object. Then it creates a recovery job that performs the recovery guides. On completion, job and associated pods are deleted by stash operator. To verify recovery, we can check the `Recovery` status. +Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, -```yaml -$ kubectl get recovery stash-demo -o yaml +```console +$ kubectl get recovery -n demo local-recovery +NAME REPOSITORY-NAMESPACE REPOSITORY-NAME SNAPSHOT PHASE AGE +local-recovery demo deployment.stash-demo Succeeded 54s +``` -apiVersion: stash.appscode.com/v1alpha1 -kind: Recovery +Here, `PHASE` `Succeeded` indicates that our recovery has been completed successfully. Backup data has been restored in `stash-recovered` PVC. Now, we are ready to use this PVC to re-deploy the workload. + +If you are using Kubernetes version older than v1.11.0 then run following command and check `status.phase` field to see whether the recovery succeeded or failed. + +```console +$ kubectl get recovery -n demo local-recovery -o yaml +``` + +**Re-deploy Workload:** + +We have successfully restored backed up data into `stash-recovered` PVC. Now, we are going to re-deploy our previous deployment `stash-demo`. This time, we are going to mount the `stash-recovered` PVC as `source-data` volume instead of ConfigMap `stash-sample-data`. + +Below is the YAML for `stash-demo` deployment with `stash-recovered` PVC as `source-data` volume. + +```yaml +apiVersion: apps/v1 +kind: Deployment metadata: - clusterName: "" - creationTimestamp: 2017-12-04T06:27:16Z - deletionGracePeriodSeconds: null - deletionTimestamp: null - generation: 0 - initializers: null + labels: + app: stash-demo name: stash-demo - namespace: default - resourceVersion: "29671" - selfLink: /apis/stash.appscode.com/v1alpha1/namespaces/default/recoveries/stash-demo - uid: 2bf74432-d8bc-11e7-be92-0800277f19c0 + namespace: demo spec: - repository: - name: deployment.stash-demo - namespace: default - paths: - - /source/data - recoveredVolumes: - - mountPath: /source/data - hostPath: - path: /data/stash-test/restic-restored -status: - phase: Succeeded + replicas: 1 + selector: + matchLabels: + app: stash-demo + template: + metadata: + labels: + app: stash-demo + name: busybox + spec: + containers: + - args: + - sleep + - "3600" + image: busybox + imagePullPolicy: IfNotPresent + name: busybox + volumeMounts: + - mountPath: /source/data + name: source-data + restartPolicy: Always + volumes: + - name: source-data + persistentVolumeClaim: + claimName: stash-recovered ``` +Let's create the deployment, + +```console +$ kubectl apply -f ./docs/examples/recovery/recovered-deployment.yaml +deployment.apps/stash-demo created +``` + +**Verify Recovered Data:** + +We have re-deployed `stash-demo` deployment with recovered volume. Now, it is time to verify that the recovered data are present in `/source/data` directory. + +Get the pod of new deployment, + +```console +$ kubectl get pod -n demo -l app=stash-demo +NAME READY STATUS RESTARTS AGE +stash-demo-69694789df-kvcp5 1/1 Running 0 20s +``` + +Run following command to view data of `/source/data` directory of this pod, + +```console +$ kubectl exec -n demo stash-demo-69694789df-kvcp5 -- ls -R /source/data +/source/data: +LICENSE +README.md +``` + +So, we can see that the data we had backed up from original deployment are now present in re-deployed deployment. + ## Recover a specific snapshot -With the help of [Snapshot](/docs/concepts/crds/snapshot.md) object, stash allows the users to recover a particular snapshot. Now, the users can specify which snapshot to recover. Here, is an example of how to recover a specific snapshot. +With the help of [Snapshot](/docs/concepts/crds/snapshot.md) object, Stash allows users to recover from a particular snapshot. Here is an example of how to recover from a specific snapshot. First, list the available snapshots, ```console -$ kubectl get snapshots --all-namespaces +$ kubectl get snapshots -n demo -l repository=deployment.stash-demo NAME AGE -deployment.stash-demo-d3050010 4m -deployment.stash-demo-300d7c13 3m -deployment.stash-demo-c24f6d96 2m -deployment.stash-demo-80bcc7e3 1m -deployment.stash-demo-3e79020e 35s -``` +deployment.stash-demo-bd8db133 4m50s +deployment.stash-demo-b6e67dee 3m50s +deployment.stash-demo-10790cf0 2m50s +deployment.stash-demo-1ace430f 110s +deployment.stash-demo-baff6c47 50s +``` -Now, create a `Recovery` with specifying `Snapshot` name, +>Note: If you are using [Local](/docs/guides/backends/local.md) backend for storing backup snapshots, your workload must be running to be able to list snapshots. -```console -$ kubectl apply -f ./docs/examples/tutorial/recovery-specific-snapshot.yaml -recovery "stash-demo" created -``` +Below is the YAML for `Recovery` crd referring to a specific snapshot. ```yaml apiVersion: stash.appscode.com/v1alpha1 kind: Recovery metadata: - name: stash-demo - namespace: default + name: local-recovery-specific-snapshot + namespace: demo spec: repository: name: deployment.stash-demo - namespace: default - snapshot: deployment.stash-demo-d3050010 + namespace: demo + snapshot: deployment.stash-demo-baff6c47 paths: - /source/data recoveredVolumes: - mountPath: /source/data - hostPath: - path: /data/stash-test/restic-restored + persistentVolumeClaim: + claimName: stash-recovered ``` -## Cleaning up -To cleanup the Kubernetes resources created by this tutorial, run: +Now, create a `Recovery` crd shown above, ```console -$ kubectl delete deployment stash-demo -$ kubectl delete secret stash-demo -$ kubectl delete restic stash-demo -$ kubectl delete recovery stash-demo -$ kubectl delete repository deployment.stash-demo +$ kubectl apply -f ./docs/examples/recovery/recovery-specific-snapshot.yaml +recovery.stash.appscode.com/local-recovery-specific-snapshot created +``` + +## Cleanup + +To cleanup the resources created by this tutorial, run following commands: + +```console +$ kubectl delete recovery -n demo local-recovery +$ kubectl delete recovery -n demo local-recovery-specific-snapshot +$ kubectl delete secret -n demo local-secret +$ kubectl delete deployment -n demo stash-demo +$ kubectl delete pvc -n demo stash-recovered +$ kubectl delete repository -n demo deployment.stash-demo + +$ kubectl delete ns demo ``` If you would like to uninstall Stash operator, please follow the steps [here](/docs/setup/uninstall.md). @@ -149,6 +320,6 @@ If you would like to uninstall Stash operator, please follow the steps [here](/d - To run backup in offline mode see [here](/docs/guides/offline_backup.md) - See the list of supported backends and how to configure them [here](/docs/guides/backends/overview.md). - See working examples for supported workload types [here](/docs/guides/workloads.md). -- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring.md). +- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring/overview.md). - Learn about how to configure [RBAC roles](/docs/guides/rbac.md). -- Want to hack on Stash? Check our [contribution guidelines](/docs/CONTRIBUTING.md). \ No newline at end of file +- Want to hack on Stash? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/rook.md b/docs/guides/rook.md deleted file mode 100644 index 11f2b10a6..000000000 --- a/docs/guides/rook.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -title: Rook | Stash -description: Using Stash with Rook Storage Service -menu: - product_stash_0.7.0: - identifier: rook-stash - name: Backup to Rook - parent: guides - weight: 55 -product_name: stash -menu_name: product_stash_0.7.0 -section_menu_id: guides ---- - -> New to Stash? Please start [here](/docs/concepts/README.md). - - -# Using Stash with Rook Storage Service - -This tutorial will show you how to use Stash to **backup** and **restore** a Kubernetes volume in [Rook](https://rook.io/) storage service. Here, we are going to backup the `/source/data` folder of a busybox pod into [AWS S3](/docs/guides/backends.md#aws-s3) compatible [Rook Object Storage](https://rook.io/docs/rook/master/object.html). Then, we will show how to recover this data into a `PersistentVolumeClaim` of [Rook Block Storage](https://rook.io/docs/rook/master/block.html). We will also re-deploy deployment using this recovered volume. - -## Before You Begin - -At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [Minikube](https://github.com/kubernetes/minikube). Now, install `Stash` in your cluster following the steps [here](/docs/setup/install.md). - -You should have understanding the following Stash concepts: - -- [Restic](/docs/concepts/crds/restic.md) -- [Repository](/docs/concepts/crds/repository.md) -- [Recovery](/docs/concepts/crds/recovery.md) -- [Snapshot](/docs/concepts/crds/snapshot.md) - -Then, you will need to have a [Rook Storage Service](https://rook.io) with [Object Storage](https://rook.io/docs/rook/master/object.html) and [Block Storage](https://rook.io/docs/rook/master/block.html) configured. If you do not already have a **Rook Storage Service** configured, you can create one by following this [quickstart guide](https://rook.io/docs/rook/master/quickstart.html). - -## Backup - -First, deploy the following `busybox` Deployment in your cluster. Here we are using a git repository as a source volume for demonstration purpose. - -```console -$ kubectl apply -f ./docs/examples/tutorial/busybox.yaml -deployment "stash-demo" created -``` - -Definition of `busybox` deployment: - -```yaml -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: stash-demo - name: stash-demo - namespace: default -spec: - replicas: 1 - template: - metadata: - labels: - app: stash-demo - name: busybox - spec: - containers: - - args: - - sleep - - "3600" - image: busybox - imagePullPolicy: IfNotPresent - name: busybox - volumeMounts: - - mountPath: /source/data - name: source-data - restartPolicy: Always - volumes: - - gitRepo: - repository: https://github.com/appscode/stash-data.git - name: source-data -``` - -Run the following command to confirm that `busybox` pods are running. - -```console -$ kubectl get pods -l app=stash-demo -NAME READY STATUS RESTARTS AGE -stash-demo-b66b9cdfd-j7rb5 1/1 Running 0 49s -``` - -You can check that the `/source/data/` directory of pod is populated with data from the volume source using this command, - -```console -$ kubectl exec stash-demo-b66b9cdfd-j7rb5 -- ls -R /source/data/ -/source/data/: -stash-data - -/source/data/stash-data: -Eureka-by-EdgarAllanPoe.txt -LICENSE -README.md -``` - -Now, let’s backup the directory into a [AWS S3](/docs/guides/backends.md#aws-s3) compatible Rook Object Storage. - -At first, we need to create a secret for `Restic` crd. Create secret for `Restic` using following command, - -```console -$ echo -n 'changeit' > RESTIC_PASSWORD -$ echo -n '' > AWS_ACCESS_KEY_ID -$ echo -n '' > AWS_SECRET_ACCESS_KEY -$ kubectl create secret generic rook-restic-secret \ - --from-file=./RESTIC_PASSWORD \ - --from-file=./AWS_ACCESS_KEY_ID \ - --from-file=./AWS_SECRET_ACCESS_KEY -secret "rook-restic-secret" created -``` - -Verify that the secret has been created successfully, - -```console -$ kubectl get secret rook-restic-secret -o yaml -``` - -```yaml -apiVersion: v1 -data: - AWS_ACCESS_KEY_ID: - AWS_SECRET_ACCESS_KEY: - RESTIC_PASSWORD: Y2hhbmdlaXQ= -kind: Secret -metadata: - creationTimestamp: 2018-04-12T10:32:14Z - name: rook-restic-secret - namespace: default - resourceVersion: "2414" - selfLink: /api/v1/namespaces/default/secrets/rook-restic-secret - uid: c454391b-3e3c-11e8-a7b6-080027672508 -type: Opaque -``` - -Now, we can create `Restic` crd. This will create a repository `stash-backup-repo` in **Rook Object Storage** bucket and start taking periodic backup of `/source/data/` folder. - -```console -$ kubectl apply -f ./docs/examples/backends/rook/rook-restic.yaml -restic "rook-restic" created -``` - -Definition of `Restic` crd for Rook Object Storage backend, - -```yaml -apiVersion: stash.appscode.com/v1alpha1 -kind: Restic -metadata: - name: rook-restic - namespace: default -spec: - selector: - matchLabels: - app: stash-demo # Must match with the label of busybox pod we have created before. - fileGroups: - - path: /source/data - retentionPolicyName: 'keep-last-5' - backend: - s3: - endpoint: 'http://rook-ceph-rgw-my-store.rook' # Use your own rook object storage end point. - bucket: stash-backup # Give a name of the bucket where you want to backup. - prefix: demo # . Path prefix into bucket where repository will be created.(optional). - storageSecretName: rook-restic-secret - schedule: '@every 1m' - volumeMounts: - - mountPath: /source/data - name: source-data - retentionPolicies: - - name: 'keep-last-5' - keepLast: 5 - prune: true -``` - -If everything goes well, A `Repository` crd with name `deployment.stash-demo` will be created for the respective repository in Rook Object Storage backend. Verify that, `Repository` is created successfully using this command, - -```console -$ kubectl get repository deployment.stash-demo -NAME AGE -deployment.stash-demo 1m -``` - -`Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup is taking successfully by, - -```console -$ kubectl get snapshots -l repository=deployment.stash-demo -NAME AGE -deployment.stash-demo-c1014ca6 10s -``` - -Here, `deployment.stash-demo-c1014ca6` represents the name of the successful backup [Snapshot](/docs/concepts/crds/snapshot.md) taken by Stash in `deployment.stash-demo` repository. - -## Recover to `PersistentVolumeClaim` - -**Rook Block Storage** allow the users to mount persistent volume into pod using `PersistentVolumeClaim`. Here, we will recover our backed up data into a PVC. - -At first, delete `Restic` crd so that it does not lock the restic repository while we are trying to recover from it. - -```console -$ kubectl delete restic rook-restic -restic "rook-restic" deleted -``` - -Now, create a `PersistentVolumeClaim` for Rook Block Storage, - -```console -$ kubectl apply -f ./docs/examples/backends/rook/rook-pvc.yaml -persistentvolumeclaim "stash-recovered" created -``` - -Definition of `PersistentVolumeClaim`: - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: stash-recovered - labels: - app: stash-demo -spec: - storageClassName: rook-block - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi -``` - -Check cluster has provisioned the requested claim, - -```console -$ kubectl get pvc -l app=stash-demo -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -stash-recovered Bound pvc-a7aa73fa-3e3f-11e8-a7b6-080027672508 2Gi RWO rook-block 36s -``` - -Look at the `STATUS` filed. `stash-recovered` PVC is bounded to volume `pvc-a7aa73fa-3e3f-11e8-a7b6-080027672508`. - -Now, create a `Recovery` to recover backed up data in this PVC. - -```console -$ kubectl apply -f ./docs/examples/backends/rook/rook-recovery.yaml -recovery "rook-recovery" created -``` - -Definition of `Recovery` should look like below: - -```yaml -apiVersion: stash.appscode.com/v1alpha1 -kind: Recovery -metadata: - name: rook-recovery - namespace: default -spec: - repository: - name: deployment.stash-demo - namespace: default - paths: - - /source/data - recoveredVolumes: - - mountPath: /source/data - persistentVolumeClaim: - claimName: stash-recovered -``` - -Wait until `Recovery` job completed its task. To verify that recovery completed successfully run, - -```console -$ kubectl get recovery rook-recovery -o yaml -``` - -```yaml -apiVersion: stash.appscode.com/v1alpha1 -kind: Recovery -metadata: - annotations: - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"stash.appscode.com/v1alpha1","kind":"Recovery","metadata":{"name":"rook-recovery","namespace":"default"},"spec":{"repository":"deployment.stash-demo","paths":["/source/data"],"recoveredVolumes":[{"mountPath":"/source/data","persistentVolumeClaim":{"claimName":"stash-recovered"}}]}} - clusterName: "" - creationTimestamp: 2018-04-12T12:57:54Z - generation: 0 - name: rook-recovery - namespace: default - resourceVersion: "2315" - selfLink: /apis/stash.appscode.com/v1alpha1/namespaces/default/recoveries/rook-recovery - uid: 1dbad356-3e51-11e8-b2bd-080027dbef96 -spec: - repository: - name: deployment.stash-demo - namespace: default - paths: - - /source/data - recoveredVolumes: - - mountPath: /source/data - persistentVolumeClaim: - claimName: stash-recovered -status: - phase: Succeeded -``` - -Now, let's re-deploy the `busybox` deployment using this recovered PVC. First, delete old deployment and recovery job. - -```console -$ kubectl delete deployment stash-demo -deployment "stash-demo" deleted - -$ kubectl delete recovery rook-recovery -recovery "rook-recovery" deleted -``` - -Now, mount the recovered `PersistentVolumeClaim` in `busybox` deployment instead of `gitRepo` we had mounted before then re-deploy it, - -```console -$ kubectl apply -f ./docs/examples/backends/rook/restored-deployment.yaml -deployment "stash-demo" created -``` - -```yaml -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: stash-demo - name: stash-demo - namespace: default -spec: - replicas: 1 - template: - metadata: - labels: - app: stash-demo - name: busybox - spec: - containers: - - args: - - sleep - - "3600" - image: busybox - imagePullPolicy: IfNotPresent - name: busybox - volumeMounts: - - mountPath: /source/data - name: source-data - restartPolicy: Always - volumes: - - name: source-data - persistentVolumeClaim: - claimName: stash-recovered -``` - -Get the pod of new deployment, - -```console -$ kubectl get pod -l app=stash-demo -NAME READY STATUS RESTARTS AGE -stash-demo-5bc57fbcfb-b45k8 1/1 Running 0 1m -``` - -Check the backed up data is restored in `/source/data/` directory of `busybox` pod. - -```console -$ kubectl exec stash-demo-5bc57fbcfb-b45k8 -- ls -R /source/data/ -/source/data/: -lost+found -stash-data - -/source/data/lost+found: - -/source/data/stash-data: -Eureka-by-EdgarAllanPoe.txt -LICENSE -README.md -``` - -## Cleanup - -```console -$ kubectl delete pvc stash-recovered -$ kubectl delete deployment stash-demo -$ kubectl delete repository deployment.stash-demo -``` - -Uninstall Stash following the instructions [here](/docs/setup/uninstall.md). \ No newline at end of file diff --git a/docs/guides/workloads.md b/docs/guides/workloads.md index 3d6427d2c..dac3bce59 100644 --- a/docs/guides/workloads.md +++ b/docs/guides/workloads.md @@ -6,7 +6,7 @@ menu: identifier: workloads-stash name: Workloads parent: guides - weight: 20 + weight: 25 product_name: stash menu_name: product_stash_0.7.0 section_menu_id: guides @@ -132,7 +132,7 @@ To learn about the meaning of various flags, please visit [here](/docs/reference - Learn about the details of Recovery CRD [here](/docs/concepts/crds/recovery.md). - To run backup in offline mode see [here](/docs/guides/offline_backup.md) - See the list of supported backends and how to configure them [here](/docs/guides/backends/overview.md). -- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring.md). +- Thinking about monitoring your backup operations? Stash works [out-of-the-box with Prometheus](/docs/guides/monitoring/overview.md). - Learn about how to configure [RBAC roles](/docs/guides/rbac.md). - Wondering what features are coming next? Please visit [here](/docs/roadmap.md). - Want to hack on Stash? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/images/platforms/minio/minio-repository.png b/docs/images/platforms/minio/minio-repository.png new file mode 100644 index 000000000..190d2d373 Binary files /dev/null and b/docs/images/platforms/minio/minio-repository.png differ diff --git a/docs/images/platforms/minio/minio-snapshots.png b/docs/images/platforms/minio/minio-snapshots.png new file mode 100644 index 000000000..df6150339 Binary files /dev/null and b/docs/images/platforms/minio/minio-snapshots.png differ diff --git a/docs/images/stash-backup.svg b/docs/images/stash-backup.svg new file mode 100644 index 000000000..1ea054481 --- /dev/null +++ b/docs/images/stash-backup.svg @@ -0,0 +1,729 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/stash-offline-backup.svg b/docs/images/stash-offline-backup.svg new file mode 100644 index 000000000..60c2cf5fe --- /dev/null +++ b/docs/images/stash-offline-backup.svg @@ -0,0 +1,812 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/stash-recovery.svg b/docs/images/stash-recovery.svg new file mode 100644 index 000000000..a79f7fef5 --- /dev/null +++ b/docs/images/stash-recovery.svg @@ -0,0 +1,851 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +