From 74dc27c3b5a22c6fe3d328b916332f8a58e7a11b Mon Sep 17 00:00:00 2001 From: Emruz Hossain Date: Tue, 25 Aug 2020 21:28:29 +0600 Subject: [PATCH] Add bash instead of console for better code highlight (#83) Signed-off-by: Emruz Hossain --- docs/addons/elasticsearch/setup/install.md | 12 +-- docs/addons/elasticsearch/setup/uninstall.md | 8 +- docs/addons/mongodb/setup/install.md | 12 +-- docs/addons/mongodb/setup/uninstall.md | 8 +- docs/addons/mysql/setup/install.md | 12 +-- docs/addons/mysql/setup/uninstall.md | 8 +- docs/addons/percona-xtradb/setup/install.md | 12 +-- docs/addons/percona-xtradb/setup/uninstall.md | 8 +- docs/addons/postgres/setup/install.md | 12 +-- docs/addons/postgres/setup/uninstall.md | 8 +- docs/concepts/crds/repository.md | 6 +- docs/concepts/crds/snapshot.md | 4 +- .../latest/advanced-use-case/clone-pvc.md | 68 ++++++++--------- .../advanced-use-case/instant-backup.md | 24 +++--- .../latest/advanced-use-case/pause-backup.md | 30 ++++---- docs/guides/latest/auto-backup/database.md | 46 +++++------ docs/guides/latest/auto-backup/pvc.md | 42 +++++----- docs/guides/latest/auto-backup/workload.md | 50 ++++++------ docs/guides/latest/backends/azure.md | 4 +- docs/guides/latest/backends/b2.md | 4 +- docs/guides/latest/backends/gcs.md | 4 +- docs/guides/latest/backends/local.md | 18 ++--- docs/guides/latest/backends/rest.md | 4 +- docs/guides/latest/backends/s3.md | 6 +- docs/guides/latest/backends/swift.md | 4 +- .../latest/batch-backup/batch-backup.md | 64 ++++++++-------- docs/guides/latest/cli/cli.md | 38 +++++----- .../latest/hooks/backup-and-restore-hooks.md | 70 ++++++++--------- .../guides/latest/hooks/batch-backup-hooks.md | 28 +++---- docs/guides/latest/monitoring/builtin.md | 16 ++-- docs/guides/latest/monitoring/coreos.md | 16 ++-- docs/guides/latest/monitoring/overview.md | 6 +- docs/guides/latest/platforms/aks.md | 44 +++++------ docs/guides/latest/platforms/eks.md | 44 +++++------ docs/guides/latest/platforms/gke.md | 44 +++++------ docs/guides/latest/platforms/minio.md | 44 +++++------ docs/guides/latest/platforms/rook.md | 44 +++++------ docs/guides/latest/volumes/pvc.md | 50 ++++++------ .../latest/volumesnapshot/deployment.md | 44 +++++------ docs/guides/latest/volumesnapshot/overview.md | 2 +- docs/guides/latest/volumesnapshot/pvc.md | 44 +++++------ .../latest/volumesnapshot/statefulset.md | 76 +++++++++---------- docs/guides/latest/workloads/daemonset.md | 38 +++++----- docs/guides/latest/workloads/deployment.md | 40 +++++----- docs/guides/latest/workloads/statefulset.md | 52 ++++++------- docs/guides/v1alpha1/backends/azure.md | 4 +- docs/guides/v1alpha1/backends/b2.md | 4 +- docs/guides/v1alpha1/backends/gcs.md | 4 +- docs/guides/v1alpha1/backends/local.md | 6 +- docs/guides/v1alpha1/backends/s3.md | 4 +- docs/guides/v1alpha1/backends/swift.md | 4 +- docs/guides/v1alpha1/backup.md | 26 +++---- docs/guides/v1alpha1/monitoring/builtin.md | 16 ++-- docs/guides/v1alpha1/monitoring/coreos.md | 16 ++-- docs/guides/v1alpha1/monitoring/grafana.md | 4 +- docs/guides/v1alpha1/monitoring/overview.md | 6 +- docs/guides/v1alpha1/offline_backup.md | 24 +++--- docs/guides/v1alpha1/platforms/aks.md | 42 +++++----- docs/guides/v1alpha1/platforms/eks.md | 38 +++++----- docs/guides/v1alpha1/platforms/gke.md | 54 ++++++------- docs/guides/v1alpha1/platforms/minio.md | 44 +++++------ docs/guides/v1alpha1/platforms/rook.md | 44 +++++------ docs/guides/v1alpha1/restore.md | 28 +++---- docs/setup/developer-guide/overview.md | 16 ++-- docs/setup/developer-guide/release.md | 2 +- docs/setup/install.md | 20 ++--- docs/setup/uninstall.md | 6 +- docs/setup/upgrade.md | 4 +- 68 files changed, 817 insertions(+), 817 deletions(-) diff --git a/docs/addons/elasticsearch/setup/install.md b/docs/addons/elasticsearch/setup/install.md index 211dddb3..5f86a507 100644 --- a/docs/addons/elasticsearch/setup/install.md +++ b/docs/addons/elasticsearch/setup/install.md @@ -36,7 +36,7 @@ You can install the addon either as a helm chart or you can create only the YAML Run the following script to install `stash-elasticsearch` addon as a Helm chart using Helm 3. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --catalog=stash-elasticsearch ``` @@ -47,7 +47,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to install `stash-elasticsearch` addon as a Helm chart using Helm 2. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm2.sh | bash -s -- --catalog=stash-elasticsearch ``` @@ -58,7 +58,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to install `stash-elasticsearch` addon as Kubernetes YAMLs. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/script.sh | bash -s -- --catalog=stash-elasticsearch ``` @@ -71,7 +71,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d After installation is completed, this addon will create `elasticsearch-backup-*` and `elasticsearch-restore-*` Functions and Tasks for all supported Elasticsearch versions. To verify, run the following command: -```console +```bash $ kubectl get functions.stash.appscode.com NAME AGE elasticsearch-backup-7.2 20s @@ -95,7 +95,7 @@ update-status 7h6m Also, verify that the `Task` have been created. -```console +```bash $ kubectl get tasks.stash.appscode.com NAME AGE elasticsearch-backup-7.2 2m7s @@ -122,7 +122,7 @@ Now, Stash is ready to backup Elasticsearch databases. In order to install `Function` and `Task` only for a specific Elasticsearch version, use `--version` flag to specify the desired database version. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --catalog=stash-elasticsearch --version=6.5 ``` diff --git a/docs/addons/elasticsearch/setup/uninstall.md b/docs/addons/elasticsearch/setup/uninstall.md index dfde6374..b77f3b96 100644 --- a/docs/addons/elasticsearch/setup/uninstall.md +++ b/docs/addons/elasticsearch/setup/uninstall.md @@ -34,7 +34,7 @@ In order to uninstall Elasticsearch addon, follow the instruction given below. Run the following script to uninstall `stash-elasticsearch` addon that was installed as a Helm chart using Helm 3. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --uninstall --catalog=stash-elasticsearch ``` @@ -45,7 +45,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to uninstall `stash-elasticsearch` addon that was installed as a Helm chart using Helm 2. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm2.sh | bash -s -- --uninstall --catalog=stash-elasticsearch ``` @@ -56,7 +56,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to uninstall `stash-elasticsearch` addon that was installed as Kubernetes YAMLs. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/script.sh | bash -s -- --uninstall --catalog=stash-elasticsearch ``` @@ -67,6 +67,6 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d In order to uninstall Elasticsearch addon only for a specific database version, use `--version` flag to specify the desired version. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --uninstall --catalog=stash-elasticsearch --version=6.5 ``` diff --git a/docs/addons/mongodb/setup/install.md b/docs/addons/mongodb/setup/install.md index 1540eeb6..4777790a 100644 --- a/docs/addons/mongodb/setup/install.md +++ b/docs/addons/mongodb/setup/install.md @@ -36,7 +36,7 @@ You can install the addon either as a helm chart or you can create only the YAML Run the following script to install `stash-mongodb` addon as a Helm chart using Helm 3. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --catalog=stash-mongodb ``` @@ -47,7 +47,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to install `stash-mongodb` addon as a Helm chart using Helm 2. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm2.sh | bash -s -- --catalog=stash-mongodb ``` @@ -58,7 +58,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to install `stash-mongodb` addon as Kubernetes YAMLs. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/script.sh | bash -s -- --catalog=stash-mongodb ``` @@ -71,7 +71,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d After installation is completed, this addon will create `mongodb-backup-*` and `mongodb-restore-*` Functions and Tasks for all supported MongoDB versions. To verify, run the following command: -```console +```bash $ kubectl get functions.stash.appscode.com NAME AGE mongodb-backup-4.1 20s @@ -89,7 +89,7 @@ update-status 7h6m Also, verify that the `Task` have been created. -```console +```bash $ kubectl get tasks.stash.appscode.com NAME AGE mongodb-backup-4.1 2m7s @@ -110,7 +110,7 @@ Now, Stash is ready to backup MongoDB databases. In order to install `Function` and `Task` only for a specific MongoDB version, use `--version` flag to specify the desired database version. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --catalog=stash-mongodb --version=3.6 ``` diff --git a/docs/addons/mongodb/setup/uninstall.md b/docs/addons/mongodb/setup/uninstall.md index 785aadcb..c7d74924 100644 --- a/docs/addons/mongodb/setup/uninstall.md +++ b/docs/addons/mongodb/setup/uninstall.md @@ -34,7 +34,7 @@ In order to uninstall MongoDB addon, follow the instruction given below. Run the following script to uninstall `stash-mongodb` addon that was installed as a Helm chart using Helm 3. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --uninstall --catalog=stash-mongodb ``` @@ -45,7 +45,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to uninstall `stash-mongodb` addon that was installed as a Helm chart using Helm 2. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm2.sh | bash -s -- --uninstall --catalog=stash-mongodb ``` @@ -56,7 +56,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to uninstall `stash-mongodb` addon that was installed as Kubernetes YAMLs. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/script.sh | bash -s -- --uninstall --catalog=stash-mongodb ``` @@ -67,6 +67,6 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d In order to uninstall MongoDB addon only for a specific database version, use `--version` flag to specify the desired version. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --uninstall --catalog=stash-mongodb --version=3.6 ``` diff --git a/docs/addons/mysql/setup/install.md b/docs/addons/mysql/setup/install.md index 598ffbe4..0bbdda4d 100644 --- a/docs/addons/mysql/setup/install.md +++ b/docs/addons/mysql/setup/install.md @@ -36,7 +36,7 @@ You can install the addon either as a helm chart or you can create only the YAML Run the following script to install `stash-mysql` addon as a Helm chart using Helm 3. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --catalog=stash-mysql ``` @@ -47,7 +47,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to install `stash-mysql` addon as a Helm chart using Helm 2. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm2.sh | bash -s -- --catalog=stash-mysql ``` @@ -58,7 +58,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to install `stash-mysql` addon as Kubernetes YAMLs. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/script.sh | bash -s -- --catalog=stash-mysql ``` @@ -71,7 +71,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d After installation is completed, this addon will create `mysql-backup-*` and `mysql-restore-*` Functions and Tasks for all supported MySQL versions. To verify, run the following command: -```console +```bash $ kubectl get functions.stash.appscode.com NAME AGE mysql-backup-8.0.14 20s @@ -85,7 +85,7 @@ update-status 7h6m Also, verify that the `Task` have been created. -```console +```bash $ kubectl get tasks.stash.appscode.com NAME AGE mysql-backup-8.0.14 2m7s @@ -102,7 +102,7 @@ Now, Stash is ready to backup MySQL databases. In order to install `Function` and `Task` only for a specific MySQL version, use `--version` flag to specify the desired database version. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --catalog=stash-mysql --version=8.0.14 ``` diff --git a/docs/addons/mysql/setup/uninstall.md b/docs/addons/mysql/setup/uninstall.md index b16d4243..a50beab1 100644 --- a/docs/addons/mysql/setup/uninstall.md +++ b/docs/addons/mysql/setup/uninstall.md @@ -34,7 +34,7 @@ In order to uninstall MySQL addon, follow the instruction given below. Run the following script to uninstall `stash-mysql` addon that was installed as a Helm chart using Helm 3. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --uninstall --catalog=stash-mysql ``` @@ -45,7 +45,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to uninstall `stash-mysql` addon that was installed as a Helm chart using Helm 2. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm2.sh | bash -s -- --uninstall --catalog=stash-mysql ``` @@ -56,7 +56,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to uninstall `stash-mysql` addon that was installed as Kubernetes YAMLs. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/script.sh | bash -s -- --uninstall --catalog=stash-mysql ``` @@ -67,6 +67,6 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d In order to uninstall MySQL addon only for a specific database version, use `--version` flag to specify the desired version. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --uninstall --catalog=stash-mysql --version=8.0.14 ``` diff --git a/docs/addons/percona-xtradb/setup/install.md b/docs/addons/percona-xtradb/setup/install.md index 2506062c..8fade6d3 100644 --- a/docs/addons/percona-xtradb/setup/install.md +++ b/docs/addons/percona-xtradb/setup/install.md @@ -36,7 +36,7 @@ You can install the addon either as a helm chart or you can create only the YAML Run the following script to install `stash-percona-xtradb` addon as a Helm chart using Helm 3. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --catalog=stash-percona-xtradb ``` @@ -47,7 +47,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to install `stash-percona-xtradb` addon as a Helm chart using Helm 2. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm2.sh | bash -s -- --catalog=stash-percona-xtradb ``` @@ -58,7 +58,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to install `stash-percona-xtradb` addon as Kubernetes YAMLs. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/script.sh | bash -s -- --catalog=stash-percona-xtradb ``` @@ -71,7 +71,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d After installation is completed, this addon will create `percona-xtradb-backup-*` and `percona-xtradb-restore-*` Functions and Tasks for all supported Percona XtraDB versions. To verify, run the following command: -```console +```bash $ kubectl get functions.stash.appscode.com NAME AGE percona-xtradb-backup-5.7 20s @@ -83,7 +83,7 @@ update-status 7h6m Also, verify that the `Task` have been created. -```console +```bash $ kubectl get tasks.stash.appscode.com NAME AGE percona-xtradb-backup-5.7 2m7s @@ -98,7 +98,7 @@ Now, Stash is ready to backup Percona XtraDB databases. In order to install `Function` and `Task` only for a specific Percona XtraDB version, use `--version` flag to specify the desired database version. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --catalog=stash-percona-xtradb --version=5.7 ``` diff --git a/docs/addons/percona-xtradb/setup/uninstall.md b/docs/addons/percona-xtradb/setup/uninstall.md index f297298a..74ee4294 100644 --- a/docs/addons/percona-xtradb/setup/uninstall.md +++ b/docs/addons/percona-xtradb/setup/uninstall.md @@ -34,7 +34,7 @@ In order to uninstall Percona XtraDB addon, follow the instruction given below. Run the following script to uninstall `stash-percona-xtradb` addon that was installed as a Helm chart using Helm 3. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --uninstall --catalog=stash-percona-xtradb ``` @@ -45,7 +45,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to uninstall `stash-percona-xtradb` addon that was installed as a Helm chart using Helm 2. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm2.sh | bash -s -- --uninstall --catalog=stash-percona-xtradb ``` @@ -56,7 +56,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to uninstall `stash-percona-xtradb` addon that was installed as Kubernetes YAMLs. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/script.sh | bash -s -- --uninstall --catalog=stash-percona-xtradb ``` @@ -67,6 +67,6 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d In order to uninstall Percona XtraDB addon only for a specific database version, use `--version` flag to specify the desired version. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --uninstall --catalog=stash-percona-xtradb --version=5.7 ``` diff --git a/docs/addons/postgres/setup/install.md b/docs/addons/postgres/setup/install.md index 220f0500..734bfc9e 100644 --- a/docs/addons/postgres/setup/install.md +++ b/docs/addons/postgres/setup/install.md @@ -36,7 +36,7 @@ You can install the addon either as a helm chart or you can create only the YAML Run the following script to install `stash-postgres` addon as a Helm chart using Helm 3. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --catalog=stash-postgres ``` @@ -47,7 +47,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to install `stash-postgres` addon as a Helm chart using Helm 2. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm2.sh | bash -s -- --catalog=stash-postgres ``` @@ -58,7 +58,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to install `stash-postgres` addon as Kubernetes YAMLs. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/script.sh | bash -s -- --catalog=stash-postgres ``` @@ -71,7 +71,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d After installation is completed, this addon will create `postgres-backup-*` and `postgres-restore-*` Functions and Tasks for all supported PostgreSQL versions. To verify, run the following command: -```console +```bash $ kubectl get functions.stash.appscode.com NAME AGE postgres-backup-10.2 20s @@ -91,7 +91,7 @@ update-status 7h6m Also, verify that the `Task` have been created. -```console +```bash $ kubectl get tasks.stash.appscode.com NAME AGE postgres-backup-10.2 2m7s @@ -114,7 +114,7 @@ Now, Stash is ready to backup PostgreSQL databases. In order to install `Function` and `Task` only for a specific PostgreSQL version, use `--version` flag to specify the desired database version. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --catalog=stash-postgres --version=11.2 ``` diff --git a/docs/addons/postgres/setup/uninstall.md b/docs/addons/postgres/setup/uninstall.md index bba12c5d..7455127e 100644 --- a/docs/addons/postgres/setup/uninstall.md +++ b/docs/addons/postgres/setup/uninstall.md @@ -34,7 +34,7 @@ In order to uninstall PostgreSQL addon, follow the instruction given below. Run the following script to uninstall `stash-postgres` addon that was installed as a Helm chart using Helm 3. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --uninstall --catalog=stash-postgres ``` @@ -45,7 +45,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to uninstall `stash-postgres` addon that was installed as a Helm chart using Helm 2. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm2.sh | bash -s -- --uninstall --catalog=stash-postgres ``` @@ -56,7 +56,7 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d Run the following script to uninstall `stash-postgres` addon that was installed as Kubernetes YAMLs. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/script.sh | bash -s -- --uninstall --catalog=stash-postgres ``` @@ -67,6 +67,6 @@ curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/d In order to uninstall PostgreSQL addon only for a specific database version, use `--version` flag to specify the desired version. -```console +```bash curl -fsSL https://github.com/stashed/catalog/raw/{{< param "info.catalog" >}}/deploy/helm3.sh | bash -s -- --uninstall --catalog=stash-postgres --version=11.2 ``` diff --git a/docs/concepts/crds/repository.md b/docs/concepts/crds/repository.md index 5b89b296..90b48c2d 100644 --- a/docs/concepts/crds/repository.md +++ b/docs/concepts/crds/repository.md @@ -94,7 +94,7 @@ Stash allows users to delete **only `Repository` crd** or **`Repository` crd alo You can delete only `Repository` crd by, -```console +```bash $ kubectl delete repository # Example @@ -116,14 +116,14 @@ Here, is an example of deleting backed up data from GCS backend, - First, set `wipeOut: true` by patching `Repository` crd. - ```console + ```bash $ kubectl patch repository gcs-demo-repo --type="merge" --patch='{"spec": {"wipeOut": true}}' repository "gcs-demo-repo" patched ``` - Finally, delete `Repository` object. It will delete backed up data from the backend. - ```console + ```bash $ kubectl delete repository gcs-demo-repo repository "gcs-demo-repo" deleted ``` diff --git a/docs/concepts/crds/snapshot.md b/docs/concepts/crds/snapshot.md index 7e3b6c1a..072dce51 100644 --- a/docs/concepts/crds/snapshot.md +++ b/docs/concepts/crds/snapshot.md @@ -100,7 +100,7 @@ Here, we are going to describe the various sections of a `Snapshot` object. **Listing Snapshots:** -```console +```bash # List Snapshots of all Repositories in the current namespace $ kubectl get snapshot --request-timeout=300s @@ -125,7 +125,7 @@ $ kubectl get snapshot -l repository=local-repo,hostname=db --request-timeout=30 **Viewing information of a particular Snapshot:** -```console +```bash $ kubectl get snapshot [-n ] -o yaml # Example: diff --git a/docs/guides/latest/advanced-use-case/clone-pvc.md b/docs/guides/latest/advanced-use-case/clone-pvc.md index aa79ec8b..57e7a3e9 100644 --- a/docs/guides/latest/advanced-use-case/clone-pvc.md +++ b/docs/guides/latest/advanced-use-case/clone-pvc.md @@ -30,7 +30,7 @@ Using Stash you can clone data volumes of a workload into a different namespace To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -119,7 +119,7 @@ The above Deployment will automatically create `data.txt` and `config.cfg` file Let's create the Deployment and PVCs we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/clone-pvc/deployment/deployment.yaml persistentvolumeclaim/source-data created persistentvolumeclaim/source-config created @@ -128,7 +128,7 @@ deployment.apps/stash-demo created Now, wait for the pod of the Deployment to go into `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-67ccdfbbc7-z97rd 1/1 Running 0 77s @@ -136,7 +136,7 @@ stash-demo-67ccdfbbc7-z97rd 1/1 Running 0 77s Verify that the sample data has been created in `/source/data` and `/source/config` directory using the following commands, -```console +```bash $ kubectl exec -n demo stash-demo-67ccdfbbc7-z97rd -- cat /source/data/data.txt sample_data $ kubectl exec -n demo stash-demo-67ccdfbbc7-z97rd -- cat /source/config/config.cfg @@ -152,7 +152,7 @@ We are going to store our backed up data into a GCS bucket. We have to create a Let's create a secret called `gcs-secret` with access credentials of our desired GCS backend, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded/sa_key_file.json > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -181,7 +181,7 @@ spec: Let's create the `Repository` object that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/clone-pvc/repository.yaml repository.stash.appscode.com/gcs-repo created ``` @@ -225,7 +225,7 @@ spec: Let's create the `BackupConfiguration` object that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/clone-pvc/deployment/dep-backupconfiguration.yaml backupconfiguration.stash.appscode.com/deployment-backup created ``` @@ -236,7 +236,7 @@ If everything goes well, Stash will create a `CronJob` to trigger backup periodi Verify that Stash has created a `CronJob` to trigger a periodic backup of volumes of the Deployment by the following command, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE deployment-backup * * * * * 36s @@ -246,7 +246,7 @@ deployment-backup * * * * * 36s Now, wait for the next backup schedule. You can watch for `BackupSession` crd using the following command, -```console +```bash $ watch -n 3 kubectl get backupconfiguration -n demo Every 3.0s: kubectl get backupconfiguration -n demo suaas-appscode: Mon Jul 8 18:20:47 2019 @@ -264,14 +264,14 @@ Now, we are going to clone the volumes that we have backed up in the previous se At first, let's pause the scheduled backup of the old Deployment so that no backup is taken during the restore process. To pause the `deployment-backup` BackupConfiguration, run: -```console +```bash $ kubectl patch backupconfiguration -n demo deployment-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/deployment-backup patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE deployment-backup */1 * * * * true 26m @@ -331,7 +331,7 @@ Here, Let's create the `RestoreSession` object that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/clone-pvc/deployment/restoresession.yaml restoresession.stash.appscode.com/restore-deployment created ``` @@ -342,7 +342,7 @@ Once, you have created the `RestoreSession` crd, Stash will create a job to rest Run the following command to watch RestoreSession phase, -```console +```bash $ watch -n 3 kubectl get restoresession -n demo Every 3.0s: kubectl get restoresession -n demo suaas-appscode: Mon Jul 8 18:39:58 2019 @@ -358,7 +358,7 @@ Once the restore process is complete, we are going to see that new PVCs with the Verify that the PVCs have been created by the following command, -```console +```bash $ kubectl get pvc -n demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE restore-config Bound pvc-6aab94dc-10b2-4c36-8768-89b20a7a24ed 2Gi RWO standard 32s @@ -414,14 +414,14 @@ spec: Create the deployment we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/clone-pvc/deployment/restore-deployment.yaml deployment.apps/restore-demo created ``` Now, wait for the pod of the Deployment to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE restore-demo-85fbcb5dcf-vpbt8 1/1 Running 0 2m50s @@ -429,7 +429,7 @@ restore-demo-85fbcb5dcf-vpbt8 1/1 Running 0 2m50s Verify that the backed up data has been restored in `/source/data` and `/source/config` directory using the following command, -```console +```bash $ kubectl exec -n demo restore-demo-85fbcb5dcf-vpbt8 -- cat /restore/data/data.txt sample_data $ kubectl exec -n demo restore-demo-85fbcb5dcf-vpbt8 -- cat /restore/config/config.cfg @@ -523,7 +523,7 @@ The above StatefulSet will automatically create `data.txt` and `config.cfg` file Let's create the Statefulset we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/clone-pvc/statefulset/statefulset.yaml service/headless configured statefulset.apps/stash-demo created @@ -531,7 +531,7 @@ statefulset.apps/stash-demo created Now, wait for the pod of the Statefulset to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-0 1/1 Running 0 47s @@ -541,7 +541,7 @@ stash-demo-2 1/1 Running 0 33s Verify that the sample data has been created in `/source/data` and `/source/config` directory using the following command, -```console +```bash $ kubectl exec -n demo stash-demo-0 -- cat /source/data/data.txt stash-demo-0 $ kubectl exec -n demo stash-demo-0 -- cat /source/config/config.cfg @@ -560,7 +560,7 @@ stash-demo-2 We are going to store our backed up data into a GCS bucket. Let’s create a secret called `gcs-secret` with access credentials of our desired GCS backend, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded/sa_key_file.json > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -589,7 +589,7 @@ spec: Let’s create the Repository object that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/clone-pvc/repository.yaml repository.stash.appscode.com/gcs-repo created ``` @@ -633,7 +633,7 @@ spec: Let’s create the `BackupConfiguration` object that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/clone-pvc/statefulset/ss-backupconfiguration.yaml backupconfiguration.stash.appscode.com/ss-backup created ``` @@ -644,7 +644,7 @@ If everything goes well, Stash will create a `CronJob` to trigger backup periodi Verify that Stash has created a `CronJob` to trigger a periodic backup of the volumes of the Statefulset by the following command, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE ss-backup * * * * * 2m @@ -654,7 +654,7 @@ ss-backup * * * * * 2m Now, wait for the next backup schedule. You can watch for `BackupSession` crd using the following command, -```console +```bash $ watch -n 3 kubectl get backupsession -n demo Every 3.0s: kubectl get backupsession -n demo suaas-appscode: Tue Jul 9 17:09:43 2019 @@ -672,14 +672,14 @@ Now, we are going to restore the volumes that we have backed up in the previous At first, let's pause the scheduled backup of the old StatefulSet so that no backup is taken during the restore process. To pause the `ss-backup` BackupConfiguration, run: -```console +```bash $ kubectl patch backupconfiguration -n demo ss-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/ss-backup patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE ss-backup */1 * * * * true 26m @@ -737,7 +737,7 @@ spec: - `spec.target.volumeClaimTemplates:` a list of PVC templates that will be created by Stash to restore the respective backed up data. - `metadata.name` is a template for the name of the restored PVC that will be created by Stash. You have to provide this named template to match with your desired StatefulSet's PVC. For example, if you want to deploy a StatefulSet named `stash-demo` with `volumeClaimTemplate` name `my-volume`, your StatefulSet's PVC will be`my-volume-stash-demo-0`, `my-volume-stash-demo-1` and so on. In this case, you have to provide `volumeClaimTemplate` name in RestoreSession in the following format: - ```console + ```bash --${POD_ORDINAL} ``` @@ -746,7 +746,7 @@ spec: Let’s create the `RestoreSession` object that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/clone-pvc/statefulset/restoresession.yaml restoresession.stash.appscode.com/restore-statefulset created ``` @@ -757,7 +757,7 @@ Once, you have created the `RestoreSession` crd, Stash will create a job to rest Run the following command to watch `RestoreSession` phase, -```console +```bash $ watch -n 3 kubectl get restoresession -n demo Every 3.0s: kubectl get restoresession -n demo suaas-appscode: Tue Jul 9 18:14:44 2019 @@ -771,7 +771,7 @@ So, we can see from the output of the above command that the restore process suc Once the restore process is complete, verify that new PVCs have been created successfully by the following command, -```console +```bash $ kubectl get pvc -n demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE restore-config-restore-demo-0 Bound pvc-c575f88a-79c9-4d25-9aab-5f9822ced239 2Gi RWO standard 19s @@ -857,7 +857,7 @@ spec: Create the StatefulSet we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/clone-pvc/statefulset/restore-statefulset.yaml service/re-headless created statefulset.apps/restore-demo created @@ -865,7 +865,7 @@ statefulset.apps/restore-demo created Now, wait for the pod of the StatefulSet to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE restore-demo-0 1/1 Running 0 34s @@ -875,7 +875,7 @@ restore-demo-2 1/1 Running 0 26s Verify that the backed up data has been restored in `/restore/data` and `/restore/config` directory using the following command, -```console +```bash $ kubectl exec -n demo restore-demo-0 -- cat /restore/data/data.txt stash-demo-0 $ kubectl exec -n demo restore-demo-0 -- cat /restore/config/config.cfg diff --git a/docs/guides/latest/advanced-use-case/instant-backup.md b/docs/guides/latest/advanced-use-case/instant-backup.md index 6f319054..45481e96 100644 --- a/docs/guides/latest/advanced-use-case/instant-backup.md +++ b/docs/guides/latest/advanced-use-case/instant-backup.md @@ -29,7 +29,7 @@ This guide will show you how to take an instant backup in Stash. To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -96,7 +96,7 @@ The above Deployment will automatically create a `data.txt` file in `/source/dat Let’s create the Deployment and PVC we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/instant-backup/deployment.yaml persistentvolumeclaim/source-data created deployment.apps/stash-demo created @@ -104,7 +104,7 @@ deployment.apps/stash-demo created Now, wait for the pod of the Deployment to go into `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-859d96f6bd-fxr7l 1/1 Running 0 81s @@ -112,7 +112,7 @@ stash-demo-859d96f6bd-fxr7l 1/1 Running 0 81s Verify that the sample data has been created in `/source/data` directory using the following command, -```console +```bash $ kubectl exec -n demo stash-demo-859d96f6bd-fxr7l -- cat /source/data/data.txt sample_data ``` @@ -125,7 +125,7 @@ We are going to store our backed up data into a GCS bucket. We have to create a Let’s create a secret called `gcs-secret` with access credentials of our desired GCS backend, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded-sa-json.key > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -154,7 +154,7 @@ spec: Let’s create the `Repository` object that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/instant-backup/repository.yaml repository.stash.appscode.com/gcs-repo created ``` @@ -195,7 +195,7 @@ spec: Let’s create the `BackupConfiguration` object that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/instant-backup/backupconfiguration.yaml backupconfiguration.stash.appscode.com/deployment-backup created ``` @@ -204,7 +204,7 @@ backupconfiguration.stash.appscode.com/deployment-backup created If everything goes well, Stash will inject a sidecar container into the `stash-demo` Deployment to take backup of `/source/data` directory. Let’s check that the sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-9bff9fd4f-xvt77 2/2 Running 0 57s @@ -218,7 +218,7 @@ It will also create a `CronJob` with the schedule specified in `spec.schedule` f Verify that the `CronJob` has been created using the following command, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE deployment-backup */40 * * * * 6m41s @@ -255,7 +255,7 @@ spec: Let's create the `BackupSession` object that we have have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/instant-backup/backupsession.yaml backupsession.stash.appscode.com/deployment-backupsession created ``` @@ -266,7 +266,7 @@ If everything goes well, the stash sidecar inside the Deployment will take a bac Run the following command to watch `BackupSession` phase, -```console +```bash $ watch -n 3 kubectl get backupsession -n demo Every 3.0s: kubectl get backupsession -n demo suaas-appscode: Wed Jul 10 17:18:52 2019 @@ -280,7 +280,7 @@ We can see from the above output that the instant backup session has succeeded. Once a backup is complete, Stash will update the respective `Repository` crd to reflect the backup. Check that the repository `gcs-repo` has been updated by the following command, -```console +```bash $ kubectl get repository -n demo gcs-repo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE gcs-repo true 24 B 1 116s 10m diff --git a/docs/guides/latest/advanced-use-case/pause-backup.md b/docs/guides/latest/advanced-use-case/pause-backup.md index 8d7255b0..7e6955e6 100644 --- a/docs/guides/latest/advanced-use-case/pause-backup.md +++ b/docs/guides/latest/advanced-use-case/pause-backup.md @@ -29,7 +29,7 @@ Stash supports pausing backups without deleting respective `BackupConfiguration` To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -96,7 +96,7 @@ The above Deployment will automatically create a `data.txt` file in `/source/dat Let's create the Deployment and PVC we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/pause-backup/deployment.yaml persistentvolumeclaim/source-pvc created deployment.apps/stash-demo created @@ -104,7 +104,7 @@ deployment.apps/stash-demo created Now, wait for the pods of the Deployment to go into the `Running` state. -```console +```bash kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-69f9ffbbf7-bww24 1/1 Running 0 100s @@ -114,7 +114,7 @@ stash-demo-69f9ffbbf7-rsj55 1/1 Running 0 100s To verify that the sample data has been created in `/source/data` directory, use the following command: -```console +```bash $ kubectl exec -n demo stash-demo-69f9ffbbf7-bww24 -- cat /source/data/data.txt sample_data ``` @@ -127,7 +127,7 @@ We are going to store our backed up data into a [GCS bucket](https://cloud.googl Let’s create a secret called ` gcs-secret` with access credentials to our desired GCS bucket, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded-sa-json.key > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -156,7 +156,7 @@ spec: Let's create the Repository we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/pause-backup/repository.yaml repository.stash.appscode.com/gcs-repo created ``` @@ -197,7 +197,7 @@ spec: Let's create the `BackupConfiguration` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/pause-backup/backupconfiguration.yaml backupconfiguration.stash.appscode.com/pause-backup created ``` @@ -206,7 +206,7 @@ backupconfiguration.stash.appscode.com/pause-backup created If everything goes well, Stash will inject a sidecar container into the `stash-demo` Deployment to take backup of `/source/data` directory. Let’s check that the sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-7489fcb7f5-jctj4 2/2 Running 0 117s @@ -222,7 +222,7 @@ It will also create a `CronJob` with the schedule specified in `spec.schedule` f Verify that the `CronJob` has been created using the following command, -```console +```bash $ watch -n 1 kubectl get backupconfiguration -n demo Every 3.0s: kubectl get backupconfiguration -n demo suaas-appscode: Thu Aug 1 17:08:08 2019 @@ -234,7 +234,7 @@ demo pause-backup */1 * * * * 27s Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 1 kubectl get backupssession -n demo Every 3.0s: kubectl get backupssession -n demo suaas-appscode: Thu Aug 1 17:43:57 2019 @@ -255,7 +255,7 @@ When we set `spec.paused: true`, the following things are going to happen: Let's patch the BackupConfiguration crd `pause-backup` and set `spec.paused: true`, -```console +```bash $ kubectl patch backupconfiguration -n demo pause-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/pause-backup patched ``` @@ -331,14 +331,14 @@ spec: Let's create the `BackupSession` we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/advanced-use-case/pause-backup/backupsession.yaml backupsession.stash.appscode.com/instant-backupsession created ``` Run the following command to watch the BackupSession phase, -```console +```bash $ watch -n 1 kubectl get backupsession -n demo instant-backupsession Every 1.0s: kubectl get backupsession -n demo instant-backupsession suaas-appscode: Fri Aug 2 11:56:24 2019 @@ -383,7 +383,7 @@ Events: You can resume backup by setting `spec.paused: false` in BackupConfiguration crd. and applying the update or you can patch BackupConfiguration using, -```console +```bash $ kubectl patch backupconfiguration -n demo pause-backup --type="merge" --patch='{"spec": {"paused": false}}' backupconfiguration.stash.appscode.com/pause-backup patched ``` @@ -392,7 +392,7 @@ backupconfiguration.stash.appscode.com/pause-backup patched To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo deployment stash-demo kubectl delete -n demo backupconfiguration pause-backup kubectl delete -n demo repository gce-repo diff --git a/docs/guides/latest/auto-backup/database.md b/docs/guides/latest/auto-backup/database.md index ea3c1367..a82c757e 100644 --- a/docs/guides/latest/auto-backup/database.md +++ b/docs/guides/latest/auto-backup/database.md @@ -37,7 +37,7 @@ You should be familiar with the following `Stash` concepts: To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -54,7 +54,7 @@ We are going to use [GCS Backend](/docs/guides/latest/backends/gcs.md) to store At first, let's create a Storage Secret for the GCS backend, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ mv downloaded-sa-json.key GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -101,7 +101,7 @@ Note that we have used some variables (format: `${}`) in `spec.ba Let's create the `BackupBlueprint` that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/auto-backup/database/backupblueprint.yaml backupblueprint.stash.appscode.com/postgres-backup-blueprint created ``` @@ -165,7 +165,7 @@ spec: Let's create the `Postgres` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/auto-backup/database/sample-postgres-1.yaml postgres.kubedb.com/sample-postgres-1 created ``` @@ -174,7 +174,7 @@ KubeDB will deploy a PostgreSQL database according to the above specification an Verify that an `AppBinding` has been created for this PostgreSQL sample, -```console +```bash $ kubectl get appbinding -n demo NAME AGE sample-postgres-1 47s @@ -182,7 +182,7 @@ sample-postgres-1 47s If you view the YAML of this `AppBinding`, you will see it holds service and secret information. Stash uses this information to connect with the database. -```console +```bash $ kubectl get appbinding -n demo sample-postgres-1 -o yaml ``` @@ -241,14 +241,14 @@ spec: Let's create the `Postgres` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/auto-backup/database/sample-postgres-2.yaml postgres.kubedb.com/sample-postgres-2 created ``` Verify that an `AppBinding` has been created for this PostgreSQL database, -```console +```bash $ kubectl get appbinding -n demo NAME AGE sample-postgres-1 2m49s @@ -269,14 +269,14 @@ Let's backup our first PostgreSQL sample using auto-backup. At first, add the auto-backup specific annotation to the AppBinding `sample-postgres-1`, -```console +```bash $ kubectl annotate appbinding sample-postgres-1 -n demo --overwrite \ stash.appscode.com/backup-blueprint=postgres-backup-blueprint ``` Verify that the annotation has been added successfully, -```console +```bash $ kubectl get appbinding -n demo sample-postgres-1 -o yaml ``` @@ -316,7 +316,7 @@ Now, Stash will create a `Repository` crd and a `BackupConfiguration` crd accord Verify that the `Repository` has been created successfully by the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE postgres-sample-postgres-1 2m23s @@ -324,7 +324,7 @@ postgres-sample-postgres-1 If we view the YAML of this `Repository`, we are going to see that the variables `${TARGET_NAMESPACE}`, `${TARGET_APP_RESOURCE}` and `${TARGET_NAME}` has been replaced by `demo`, `postgres` and `sample-postgres-1` respectively. -```console +```bash $ kubectl get repository -n demo postgres-sample-postgres-1 -o yaml ``` @@ -353,7 +353,7 @@ spec: Verify that the `BackupConfiguration` crd has been created by the following command, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE postgres-sample-postgres-1 postgres-backup-11.2 */5 * * * * 3m39s @@ -363,7 +363,7 @@ Notice the `TASK` field. It denoting that this backup will be performed using `p Let's check the YAML of this `BackupConfiguration`, -```console +```bash $ kubectl get backupconfiguration -n demo postgres-sample-postgres-1 -o yaml ``` @@ -411,7 +411,7 @@ Notice that the `spec.target.ref` is pointing to the AppBinding `sample-postgres Now, wait for the next backup schedule. Run the following command to watch `BackupSession` crd: -```console +```bash $ watch -n 1 kubectl get backupsession -n demo -l=stash.appscode.com/backup-configuration=postgres-sample-postgres-1 Every 1.0s: kubectl get backupsession -n demo -l=stash.appscode.com/backup-configuration=postgres-sample-postgres-1 workstation: Thu Aug 1 20:35:43 2019 @@ -428,7 +428,7 @@ When backup session is completed, Stash will update the respective `Repository` Run the following command to check if a snapshot has been sent to the backend, -```console +```bash $ kubectl get repository -n demo postgres-sample-postgres-1 NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE postgres-sample-postgres-1 true 1.324 KiB 1 73s 6m7s @@ -449,7 +449,7 @@ Now, lets backup our second PostgreSQL sample using the same `BackupBlueprint` w Add the auto backup specific annotation to AppBinding `sample-postgres-2`, -```console +```bash $ kubectl annotate appbinding sample-postgres-2 -n demo --overwrite \ stash.appscode.com/backup-blueprint=postgres-backup-blueprint ``` @@ -458,7 +458,7 @@ $ kubectl annotate appbinding sample-postgres-2 -n demo --overwrite \ Verify that the `Repository` has been created successfully by the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE postgres-sample-postgres-1 true 1.324 KiB 1 2m3s 6m57s @@ -469,7 +469,7 @@ Here, Repository `postgres-sample-postgres-2` has been created for the second Po If we view the YAML of this `Repository`, we are going to see that the variables `${TARGET_NAMESPACE}`, `${TARGET_APP_RESOURCE}` and `${TARGET_NAME}` has been replaced by `demo`, `postgres` and `sample-postgres-2` respectively. -```console +```bash $ kubectl get repository -n demo postgres-sample-postgres-2 -o yaml ``` @@ -498,7 +498,7 @@ spec: Verify that the `BackupConfiguration` crd has been created by the following command, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE postgres-sample-postgres-1 postgres-backup-11.2 */5 * * * * 7m52s @@ -511,7 +511,7 @@ Again, notice the `TASK` field. This time, `${TARGET_APP_VERSION}` has been repl Now, wait for the next backup schedule. Run the following command to watch `BackupSession` crd: -```console +```bash $ watch -n 1 kubectl get backupsession -n demo -l=stash.appscode.com/backup-configuration=postgres-sample-postgres-2 Every 1.0s: kubectl get backupsession -n demo -l=stash.appscode.com/backup-configuration=postgres-sample-postgres-2 workstation: Thu Aug 1 20:55:40 2019 @@ -523,7 +523,7 @@ postgres-sample-postgres-2-1564671303 BackupConfiguration postgres-sample-po Run the following command to check if a snapshot has been sent to the backend, -```console +```bash $ kubectl get repository -n demo postgres-sample-postgres-2 NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE postgres-sample-postgres-2 true 1.324 KiB 1 52s 19m @@ -540,7 +540,7 @@ If we navigate to `stash-backup/demo/postgres/sample-postgres-2` directory of ou To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo pg/sample-postgres-1 kubectl delete -n demo pg/sample-postgres-2 diff --git a/docs/guides/latest/auto-backup/pvc.md b/docs/guides/latest/auto-backup/pvc.md index b7c8a4bf..0bc573fe 100644 --- a/docs/guides/latest/auto-backup/pvc.md +++ b/docs/guides/latest/auto-backup/pvc.md @@ -33,7 +33,7 @@ This tutorial will show you how to configure automatic backup for PersistentVolu To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -46,7 +46,7 @@ Stash uses a `Function-Task` model to automatically backup PVC. When you install Let's verify that Stash has created the necessary `Function` to backup/restore PVC by the following command, -```console +```bash $ kubectl get function NAME AGE pvc-backup 6h55m @@ -56,7 +56,7 @@ update-status 6h55m Also, verify that the necessary `Task` has been created, -```console +```bash $ kubectl get task NAME AGE pvc-backup 6h55m @@ -73,7 +73,7 @@ We are going to use [GCS Backend](/docs/guides/latest/backends/gcs.md) to store At first, let's create a Storage Secret for the GCS backend, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ mv downloaded-sa-json.key GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -120,7 +120,7 @@ Note that we have used some variables (format: `${}`) in `backend Let's create the `BackupBlueprint` that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/auto-backup/pvc/backupblueprint.yaml backupblueprint.stash.appscode.com/pvc-backup-blueprint created ``` @@ -174,7 +174,7 @@ Notice the `metadata.labels` section. Here, we have added `app: nfs-demo` label. Let's create the PV we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/auto-backup/pvc/nfs_pv.yaml persistentvolume/nfs-pv created ``` @@ -207,14 +207,14 @@ Also, notice the `spec.selector` section. We have specified `app: nfs-demo` labe Let's create the PVC we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/auto-backup/pvc/nfs_pvc.yaml persistentvolumeclaim/nfs-pvc created ``` Verify that the PVC has bounded with our desired PV, -```console +```bash $ kubectl get pvc -n demo nfs-pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE nfs-pvc Bound nfs-pv 1Gi RWX 61s @@ -253,14 +253,14 @@ Here, we have mounted `pod-1/data` directory of the `nfs-pvc` into `/sample/data Let's deploy the pod we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/auto-backup/pvc/pod-1.yaml pod/demo-pod-1 created ``` Verify that the sample data has been generated into `/sample/data/` directory, -```console +```bash $ kubectl exec -n demo demo-pod-1 cat /sample/data/hello.txt hello from pod 1. ``` @@ -292,14 +292,14 @@ Now, we have mounted `pod-2/data` directory of the `nfs-pvc` into `/sample/data` Let's create the pod we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/auto-backup/pvc/pod-2.yaml pod/demo-pod-2 created ``` Verify that the sample data has been generated into `/sample/data/` directory, -```console +```bash $ kubectl exec -n demo demo-pod-2 cat /sample/data/hello.txt hello from pod 2. ``` @@ -312,7 +312,7 @@ Now, we are going to add auto backup specific annotation to the PVC. Stash watch Let's add the auto backup specific annotation to the PVC, -```console +```bash $ kubectl annotate pvc nfs-pvc -n demo --overwrite \ stash.appscode.com/backup-blueprint=pvc-backup-blueprint \ stash.appscode.com/schedule="*/15 * * * *" @@ -320,7 +320,7 @@ $ kubectl annotate pvc nfs-pvc -n demo --overwrite \ Verify that the annotations has been added successfully, -```console +```bash $ kubectl get pvc -n demo nfs-pvc -o yaml ``` @@ -369,7 +369,7 @@ Now, Stash will create a `Repository` crd and a `BackupConfiguration` crd accord Verify that the `Repository` has been created successfully by the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE persistentvolumeclaim-nfs-pvc @@ -377,7 +377,7 @@ persistentvolumeclaim-nfs-pvc If we view the YAML of this `Repository`, we are going to see that the variables `${TARGET_NAMESPACE}`, `${TARGET_KIND}` and `${TARGET_NAME}` has been replaced by `demo`, `presistentvolumeclaim` and `nfs-pvc` respectively. -```console +```bash $ kubectl get repository -n demo persistentvolumeclaim-nfs-pvc -o yaml ``` @@ -406,7 +406,7 @@ spec: Verify that the `BackupConfiguration` crd has been created by the following command, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE persistentvolumeclaim-nfs-pvc pvc-backup */15 * * * * 119s @@ -414,7 +414,7 @@ persistentvolumeclaim-nfs-pvc pvc-backup */15 * * * * 119s Let's check the YAML of this `BackupConfiguration`, -```console +```bash $ kubectl get backupconfiguration -n demo persistentvolumeclaim-nfs-pvc -o yaml ``` @@ -462,7 +462,7 @@ Notice that the `spec.target.ref` is pointing to the `nfs-pvc` PVC. Now, wait for the next backup schedule. Run the following command to watch `BackupSession` crd: -```console +```bash $ watch -n 1 kubectl get backupsession -n demo -l=stash.appscode.com/backup-configuration=persistentvolumeclaim-nfs-pvc Every 1.0s: kubectl get backupsession -n demo ... workstation: Thu Jul 18 15:18:42 2019 @@ -479,7 +479,7 @@ When backup session is completed, Stash will update the respective `Repository` Run the following command to check if a snapshot has been sent to the backend, -```console +```bash $ kubectl get repository -n demo persistentvolumeclaim-nfs-pvc NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE persistentvolumeclaim-nfs-pvc true 41 B 1 3m37s 5m11s @@ -498,7 +498,7 @@ If we navigate to `stash-backup/demo/persistentvolumeclaim/nfs-pvc` directory of To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo backupBlueprint/pvc-backup-blueprint kubectl delete -n demo repository/persistentvolumeclaim-nfs-pvc kubectl delete -n demo backupconfiguration/persistentvolumeclaim-nfs-pvc diff --git a/docs/guides/latest/auto-backup/workload.md b/docs/guides/latest/auto-backup/workload.md index 3b05cf9d..96e55dd5 100644 --- a/docs/guides/latest/auto-backup/workload.md +++ b/docs/guides/latest/auto-backup/workload.md @@ -32,7 +32,7 @@ This tutorial will show you how to configure automatic backup for Kubernetes wor To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create namespace demo namespace/demo created ``` @@ -47,7 +47,7 @@ We are going to use [GCS Backend](/docs/guides/latest/backends/gcs.md) to store At first, let's create a Storage Secret for the GCS backend, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ mv downloaded-sa-json.key GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -88,7 +88,7 @@ Note that we have used some variables (format: `${}`) in `backend Let's create the `BackupBlueprint` that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/auto-backup/workload/backupblueprint.yaml backupblueprint.stash.appscode.com/workload-backup-blueprint created ``` @@ -197,7 +197,7 @@ Notice the `metadata.annotations` field. We have specified the automatic backup Let's create the above Deployment, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/auto-backup/workload/deployment.yaml configmap/stash-sample-data-1 created configmap/stash-sample-data-2 created @@ -210,7 +210,7 @@ If everything goes well, Stash will create a `Repository` and a `BackupConfigura Verify that the Repository has been created successfully by the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE deployment-stash-demo 9s @@ -218,7 +218,7 @@ deployment-stash-demo If we view the YAML of this Repository, we are going to see that the variables `${TARGET_NAMESPACE}`, `${TARGET_KIND}` and `${TARGET_NAME}` has been replaced by `demo`, `deployment` and `stash-demo` respectively. -```console +```bash $ kubectl get repository -n demo deployment-stash-demo -o yaml ``` @@ -241,7 +241,7 @@ spec: Verify that the `BackupConfiguration` has been created by the following command, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE deployment-stash-demo */15 * * * * 19s @@ -249,7 +249,7 @@ deployment-stash-demo */15 * * * * 19s Let's check the YAML of this `BackupConfiguration`, -```console +```bash $ kubectl get backupconfiguration -n demo deployment-stash-demo -o yaml ``` @@ -292,7 +292,7 @@ Notice that the `spec.target.ref` is pointing to the `stash-demo` Deployment. Al Now, wait for the next backup schedule. Run the following command to watch `BackupSession` crd: -```console +```bash $ watch -n 1 kubectl get backupsession -n demo -l=stash.appscode.com/backup-configuration=deployment-stash-demo Every 1.0s: kubectl get backupsession -n demo workstation: Wed Jun 26 12:20:31 2019 @@ -308,7 +308,7 @@ When backup session is completed, Stash will update the respective `Repository` Run the following command to check if the snapshots are stored in the backend, -```console +```bash $ kubectl get repository -n demo deployment-stash-demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE deployment-stash-demo true 246 B 2 70s 5m @@ -406,7 +406,7 @@ Notice the `metadata.annotations` field. We have specified automatic backup spec Let's create the StatefulSet we have created above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/auto-backup/workload/statefulset.yaml service/headless created statefulset.apps/sts-demo created @@ -416,7 +416,7 @@ statefulset.apps/sts-demo created Verify that a Repository has been created for this StatefulSet using the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE deployment-stash-demo true 410 B 10 14s 39m @@ -427,7 +427,7 @@ Here, `statefulset-sts-demo` Repository has been created for our `sts-demo` Stat Let's view the YAML of the Repository, -```console +```bash $ kubectl get repository -n demo statefulset-sts-demo -o yaml ``` @@ -452,7 +452,7 @@ Notice that the variables of the `prefix` field of `BackupBlueprint` is now repl Verify that a `BackupConfiguration` has been created for this StatefulSet using the following command, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE deployment-stash-demo */5 * * * * 40m @@ -461,7 +461,7 @@ statefulset-sts-demo */5 * * * * 105s Here, `statefulset-sts-demo` has been created for the StatefulSet `sts-demo`. You can check the YAML of this `BackupConfiguration` to see that the target field is pointing to this StatefulSet. -```console +```bash $ kubectl get backupconfiguration -n demo statefulset-sts-demo -o yaml ``` @@ -502,7 +502,7 @@ spec: Now, wait for the next backup schedule. Watch the `BackupSession` of the BackupConfiguration `statefulset-sts-demo` using the following command, -```console +```bash $ watch -n 1 kubectl get backupsession -n demo -l=stash.appscode.com/backup-configuration=statefulset-sts-demo Every 1.0s: kubectl get backupsession -n demo -l=stash.appscode.com/backup-... workstation: Wed Jun 26 13:01:22 2019 @@ -514,7 +514,7 @@ statefulset-sts-demo-1561532403 BackupConfiguration statefulset-sts-demo S Once the backup session is completed, verify that the `Repository` has been updated to reflect the backup using the following command, -```console +```bash $ kubectl get repository -n demo statefulset-sts-demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE statefulset-sts-demo true 0 B 6 32s 7m29s @@ -589,7 +589,7 @@ Notice the `metadata.annotations` field. We have specified automatic backup spec Let's create the DaemonSet we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/auto-backup/workload/daemonset.yaml configmap/my-daemon-config created daemonset.apps/dmn-demo created @@ -599,7 +599,7 @@ daemonset.apps/dmn-demo created Verify that a `Repository` has been created for this DaemonSet using the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE daemonset-dmn-demo 28s @@ -611,7 +611,7 @@ Here, `daemonset-dmn-demo` Repository has been created for our `dmn-demo` Daemon Let's view the YAML of the Repository, -```console +```bash $ kubectl get repository -n demo daemonset-dmn-demo -o yaml ``` @@ -634,7 +634,7 @@ spec: Verify that a `BackupConfiguration` has been created for this DaemonSet using the following command, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE daemonset-dmn-demo */5 * * * * 90s @@ -644,7 +644,7 @@ statefulset-sts-demo */5 * * * * 32m Here, `daemonset-dmn-demo` has been created for the DaemonSet `dmn-demo`. You can check the YAML of this `BackupConfiguration` to see that the target field is pointing to this DaemonSet. -```console +```bash $ kubectl get backupconfiguration -n demo daemonset-dmn-demo -o yaml ``` @@ -682,7 +682,7 @@ spec: Now, wait for the next backup schedule. Watch the `BackupSession` of the BackupConfiguration `daemonset-dmn-demo` using the following command, -```console +```bash $ watch -n 1 kubectl get backupsession -n demo -l=stash.appscode.com/backup-configuration=daemonset-dmn-demo Every 1.0s: kubectl get backupsession -n demo -l=stash.appscode.com/backup-... workstation: Wed Jun 26 13:30:14 2019 @@ -695,7 +695,7 @@ daemonset-dmn-demo-1561534208 BackupConfiguration daemonset-dmn-demo Succe Once the backup session is completed, verify that the `Repository` has been updated to reflect the backup using the following command, -```console +```bash $ kubectl get repository -n demo daemonset-dmn-demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE daemonset-dmn-demo true 51 B 1 5s 4m27s @@ -714,7 +714,7 @@ If we navigate to `stash-backup/demo/daemonset/dmn-demo` directory of our GCS bu To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo deployment/stash-demo kubectl delete -n demo statefulset/sts-demo kubectl delete -n demo daemonset/dmn-demo diff --git a/docs/guides/latest/backends/azure.md b/docs/guides/latest/backends/azure.md index 25d864ec..63a69d91 100644 --- a/docs/guides/latest/backends/azure.md +++ b/docs/guides/latest/backends/azure.md @@ -30,7 +30,7 @@ To configure storage secret for this backend, following secret keys are needed: Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > AZURE_ACCOUNT_NAME $ echo -n '' > AZURE_ACCOUNT_KEY @@ -71,7 +71,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/azure.yaml repository/azure-repo created ``` diff --git a/docs/guides/latest/backends/b2.md b/docs/guides/latest/backends/b2.md index c4d98fc6..c5b23997 100644 --- a/docs/guides/latest/backends/b2.md +++ b/docs/guides/latest/backends/b2.md @@ -32,7 +32,7 @@ To configure storage secret for this backend, following secret keys are needed: Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > B2_ACCOUNT_ID $ echo -n '' > B2_ACCOUNT_KEY @@ -73,7 +73,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/b2.yaml repository/b2-repo created ``` diff --git a/docs/guides/latest/backends/gcs.md b/docs/guides/latest/backends/gcs.md index 1b0c4422..1f6d1492 100644 --- a/docs/guides/latest/backends/gcs.md +++ b/docs/guides/latest/backends/gcs.md @@ -32,7 +32,7 @@ To configure storage secret for this backend, following secret keys are needed: Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ mv downloaded-sa-json.key GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -73,7 +73,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/gcs.yaml repository/gcs-repo created ``` diff --git a/docs/guides/latest/backends/local.md b/docs/guides/latest/backends/local.md index 381a69c8..25180bba 100644 --- a/docs/guides/latest/backends/local.md +++ b/docs/guides/latest/backends/local.md @@ -30,7 +30,7 @@ To configure storage secret for local backend, following secret keys are needed: Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ kubectl create secret generic -n demo local-secret --from-file=./RESTIC_PASSWORD secret/local-secret created @@ -71,7 +71,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/local_hostPath.yaml repository/local-repo-with-hostpath created ``` @@ -99,7 +99,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/local_pvc.yaml repository/local-repo-with-pvc created ``` @@ -126,7 +126,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/local_nfs.yaml repository/local-repo-with-nfs created ``` @@ -155,7 +155,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/local_gcePersistentDisk.yaml repository/local-repo-with-gcepersistentdisk created ``` @@ -184,7 +184,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/local_awsElasticBlockStore.yaml repository/local-repo-with-awsebs created ``` @@ -213,7 +213,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/local_azureDisk.yaml repository/local-repo-with-azuredisk created ``` @@ -240,7 +240,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/local_storageOS.yaml repository/local-repo-with-storageos created ``` @@ -265,7 +265,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/emptyDir.yaml repository/local-repo-with-emptydir created ``` diff --git a/docs/guides/latest/backends/rest.md b/docs/guides/latest/backends/rest.md index 0cf354a5..0beecd7e 100644 --- a/docs/guides/latest/backends/rest.md +++ b/docs/guides/latest/backends/rest.md @@ -31,7 +31,7 @@ To configure storage secret for this backend, following secret keys are needed: Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > REST_SERVER_USERNAME $ echo -n '' > REST_SERVER_PASSWORD @@ -69,7 +69,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/rest.yaml repository/rest-repo created ``` diff --git a/docs/guides/latest/backends/s3.md b/docs/guides/latest/backends/s3.md index 33cc35f0..26dff670 100644 --- a/docs/guides/latest/backends/s3.md +++ b/docs/guides/latest/backends/s3.md @@ -33,7 +33,7 @@ To configure storage secret for this backend, following secret keys are needed: Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > AWS_ACCESS_KEY_ID $ echo -n '' > AWS_SECRET_ACCESS_KEY @@ -46,7 +46,7 @@ secret/s3-secret created For TLS secured Minio Server, create secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > AWS_ACCESS_KEY_ID $ echo -n '' > AWS_SECRET_ACCESS_KEY @@ -92,7 +92,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/s3.yaml repository/s3-repo created ``` diff --git a/docs/guides/latest/backends/swift.md b/docs/guides/latest/backends/swift.md index 15b26eaa..08e6bffc 100644 --- a/docs/guides/latest/backends/swift.md +++ b/docs/guides/latest/backends/swift.md @@ -96,7 +96,7 @@ For token-based authentication, following secret keys are needed: A sample storage secret creation for keystone v2 authentication is shown below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > OS_AUTH_URL $ echo -n '' > OS_TENANT_ID @@ -144,7 +144,7 @@ spec: Create the `Repository` we have shown above using the following command, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/backends/swift.yaml repository/swift-repo created ``` diff --git a/docs/guides/latest/batch-backup/batch-backup.md b/docs/guides/latest/batch-backup/batch-backup.md index c3e7154e..a6ac8140 100644 --- a/docs/guides/latest/batch-backup/batch-backup.md +++ b/docs/guides/latest/batch-backup/batch-backup.md @@ -36,7 +36,7 @@ This tutorial will demonstrate how to use Stash to take backup of an application To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -53,7 +53,7 @@ We are going to use MySQL as the database for our WordPress site. So, let's depl Let's create a secret for the MySQL database, -```console +```bash $ kubectl create secret -n demo generic mysql-pass \ --from-literal=username=root \ --from-literal=password=mysqlpass @@ -142,7 +142,7 @@ spec: Let's create the above MySQL Deployment, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/batch-backup/mysql.yaml service/wordpress-db created deployment.apps/wordpress-db created @@ -151,7 +151,7 @@ persistentvolumeclaim/mysql-pvc created Now, wait for the MySQL pod to go into running state, -```console +```bash $ kubectl get pod -n demo -l app=wordpress-db NAME READY STATUS RESTARTS AGE wordpress-db-58657b89b9-kgt76 1/1 Running 0 104s @@ -159,7 +159,7 @@ wordpress-db-58657b89b9-kgt76 1/1 Running 0 104s Let's check if the MySQL database is ready to accept connections, -```console +```bash $ kubectl logs -n demo -f wordpress-db-58657b89b9-kgt76 Initializing database .... @@ -252,7 +252,7 @@ spec: Let's create the above Deployment, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/batch-backup/wordpress.yaml service/wordpress-app created deployment.apps/wordpress-app created @@ -261,7 +261,7 @@ persistentvolumeclaim/wordpress-pvc created Now, wait for the wordpress pod to go into running state, -```console +```bash $ kubectl get pod -n demo -l app=wordpress-app NAME READY STATUS RESTARTS AGE wordpress-app-59b69858f9-48phf 1/1 Running 0 3m40s @@ -273,7 +273,7 @@ So, we can see that our WordPress site is running. Now, its time to insert some At first, lets port-forward the `wordpress-app` Service that we have created with the WordPress deployment. -```console +```bash $ kubectl port-forward -n demo service/wordpress-app 8080:80 Forwarding from 127.0.0.1:8080 -> 80 Forwarding from [::1]:8080 -> 80 @@ -295,7 +295,7 @@ Once we have completed the setup, let's create some sample blog posts. Here, I h When we save the post, WordPress will store it into the database. If we exec into the database pod, we will see the post has been stored there. -```console +```bash $ kubectl exec -it -n demo wordpress-db-58657b89b9-kgt76 -- mysql --user=root --password=mysqlpass ... mysql> show databases; @@ -356,7 +356,7 @@ So, we can see that our post has been stored with `stash-batch-backup-test` name Also, WordPress pod write some files in its `/var/www/html` directory. Let's see whats file has been written there: -```console +```bash $ kubectl exec -it -n demo wordpress-app-59b69858f9-48phf -- ls /var/www/html index.php wp-blog-header.php wp-cron.php wp-mail.php license.txt wp-comments-post.php wp-includes wp-settings.php @@ -406,7 +406,7 @@ Here, Let's create the above AppBinding, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/batch-backup/appbinding.yaml appbinding.appcatalog.appscode.com/wordpress-db ``` @@ -421,7 +421,7 @@ We are going to store our backed up data into a GCS bucket. We have to create a Let's create a Secret called `gcs-secret` with access credentials to our desired GCS bucket, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded-sa-json.key > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -452,7 +452,7 @@ spec: Let's create the Repository we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/batch-backup/repository.yaml repository.stash.appscode.com/gcs-repo created ``` @@ -518,7 +518,7 @@ Here, Let's create the `BackupBatch` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/batch-backup/backupbatch.yaml backupbatch.stash.appscode.com/wordpress-backup created ``` @@ -529,7 +529,7 @@ Stash will also create a `CronJob` with the schedule specified in `spec.schedule Verify that the CronJob has been created successfully, -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE stash-backup-wordpress-backup */5 * * * * False 0 32s @@ -539,7 +539,7 @@ stash-backup-wordpress-backup */5 * * * * False 0 The CronJob will trigger a backup on each scheduled slot by creating a `BackupSession` CR. Let's wait for a `BackupSession` to complete, -```console +```bash $ kubectl get backupsession -n demo -w NAME INVOKER-TYPE INVOKER-NAME PHASE AGE wordpress-backup-1597245602 BackupBatch wordpress-backup 0s @@ -555,7 +555,7 @@ When a backup session is completed, Stash will update the respective `Repository Run the following command to check if a backup snapshot has been stored in the backend, -```console +```bash $ kubectl get repository -n demo gcs-repo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE gcs-repo true 183.5Mi 2 3s 38m @@ -586,14 +586,14 @@ Here, we are going to see two different restore scenarios: At first, let stop the backup so that no new backup happens during the restore process. Let's set `spec.paused` section of `BackupBatch` to `true` which will stop taking further scheduled backup. -```console +```bash $ kubectl patch backupbatch -n demo wordpress-backup --type="merge" --patch='{"spec": {"paused": true}}' backupbatch.stash.appscode.com/wordpress-backup patched ``` It should suspend the respective CronJob which is responsible for triggering backup at a scheduled slot. Let's verify that the CronJob has been suspended. -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE stash-backup-wordpress-backup */5 * * * * True 0 12h 13h @@ -607,7 +607,7 @@ In this section, we are going to simulate a disaster scenario where we will dama At first, let's corrupt the database. Here, we are going to delete the sample post we have created earlier. -```console +```bash $ kubectl exec -it -n demo wordpress-db-58657b89b9-kgt76 -- mysql --user=root --password=mysqlpass .... mysql> show tables from wordpress; @@ -696,13 +696,13 @@ So, we can see that the sample post is gone. Only, the `Hello World!` post is no Now, let's do some damage to our WordPress deployment too. Here, we are going to remove the `wp-content` directory from `/var/www/html` directory of our WordPress pod. -```console +```bash $ kubectl exec -n demo wordpress-app-5b778b446-gtd6d -c wordpress -- rm -r /var/www/html/wp-content ``` Verify that the `wp-content` directory has been removed. -```console +```bash $ kubectl exec -n demo wordpress-app-5b778b446-gtd6d -c wordpress -- ls /var/www/html index.php license.txt @@ -780,14 +780,14 @@ Here, Let's create the above `RestoreBatch` object, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/batch-backup/restorebatch.yaml restorebatch.stash.appscode.com/wordpress-restore created ``` Now, wait for the `RestoreBatch` phase to go into `Succeeded` state. -```console +```bash $ kubectl get restorebatch -n demo -w NAME REPOSITORY PHASE AGE wordpress-restore gcs-repo Running 7s @@ -800,7 +800,7 @@ We can see from above that Stash has successfully restored both components. Now, Let's verify that `sample-batch-backup-test` post that we had deleted from the database has been restored. -```console +```bash $ kubectl exec -n demo wordpress-db-58657b89b9-kgt76 -- mysql --user=root --password=mysqlpass -e "SELECT post_name FROM wordpress.wp_posts;" mysql: [Warning] Using a password on the command line interface can be insecure. post_name @@ -816,7 +816,7 @@ We can see that the `stash-batch-backup-test` post is now present in the databas Again, let verify whether the `wp-content` directory that we had removed from the WordPress deployment's pod has been restored or not. -```console +```bash $ kubectl exec -n demo wordpress-app-684b577c89-wpsqs -c wordpress -- ls /var/www/html index.php license.txt @@ -859,13 +859,13 @@ Here, we are going to delete the sample post again from the database and then re Let's delete the sample post from the database: -```console +```bash $ kubectl exec -n demo wordpress-db-58657b89b9-kgt76 -- mysql --user=root --password=mysqlpass -e "DELETE FROM wordpress.wp_posts WHERE post_name='stash-batch-backup-test';" ``` Verify that the sample post has been removed: -```console +```bash $ kubectl exec -n demo wordpress-db-58657b89b9-kgt76 -- mysql --user=root --password=mysqlpass -e "SELECT post_name FROM wordpress.wp_posts;" mysql: [Warning] Using a password on the command line interface can be insecure. post_name @@ -905,14 +905,14 @@ spec: Let's create the above `RestoreSession` object, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/batch-backup/restoresession.yaml restoresession.stash.appscode.com/wordpress-db-restore created ``` Now, wait for the `RestoreSession` phase to go into `Succeeded` state, -```console +```bash $ kubectl get restoresession -n demo -w NAME REPOSITORY PHASE AGE wordpress-db-restore gcs-repo Running 10s @@ -925,7 +925,7 @@ So, we can see that Stash has successfully restored the database. Let's verify whether the sample post has been restored or not, -```console +```bash $ kubectl exec -n demo wordpress-db-58657b89b9-kgt76 -- mysql --user=root --password=mysqlpass -e "SELECT post_name FROM wordpress.wp_posts;" mysql: [Warning] Using a password on the command line interface can be insecure. post_name @@ -943,7 +943,7 @@ We can see from the above output that the `stash-batch-backup-test` post has bee To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo backupbatch wordpress-backup kubectl delete -n demo restorebatch wordpress-restore kubectl delete -n demo restoresession wordpress-db-restore diff --git a/docs/guides/latest/cli/cli.md b/docs/guides/latest/cli/cli.md index 68cb0ce1..15a1c5aa 100644 --- a/docs/guides/latest/cli/cli.md +++ b/docs/guides/latest/cli/cli.md @@ -54,13 +54,13 @@ To create a `Repository`, you need to provide a `Repository` name and backend in **Format:** -```console +```bash kubectl stash create [flags] ``` **Example:** -```console +```bash $ kubectl stash create repository gcs-repo --namespace=demo --secret=gcs-secret --bucket=appscode-qa --prefix=/source/data --provider=gcs ``` @@ -95,13 +95,13 @@ To create a `BackupConfiguration`, you need to provide `BackupConfiguration` nam **Format:** -```console +```bash kubectl stash create [flags] ``` **Example:** -```console +```bash $ kubectl stash create backupconfig ss-backup --namespace=demo --repository=gcs-repo --schedule="*/4 * * * *" --target-apiversion=apps/v1 --target-kind=StatefulSet --target-name=stash-demo --paths=/source/data --volume-mounts=source-data:/source/data --keep-last=5 --prune=true ``` @@ -133,13 +133,13 @@ To create a `RestoreSession`, you need to provide a `Repository` name, Target or **Format:** -```console +```bash kubectl stash create restoresession [flags] ``` **Example:** -```console +```bash $ kubectl stash create restoresession ss-restore --namespace=demo --repository=gcs-repo --target-apiversion=apps/v1 --target-kind=StatefulSet --target-name=stash-recovered --paths=/source/data --volume-mounts=source-data:/source/data ``` @@ -158,13 +158,13 @@ To copy a Secret, you need to provide Secret name and destination namespace. You **Format:** -```console +```bash kubectl stash cp secret [flags] ``` **Example:** -```console +```bash $ kubectl stash cp secret my-secret --namespace=demo --to-namespace=demo1 ``` @@ -184,13 +184,13 @@ You will provide the destination namespace by using flag. The available flags ar **Format:** -```console +```bash kubectl stash cp repository [flags] ``` **Example:** -```console +```bash $ kubectl stash cp repository my-repo --namespce=demo --to-namespace=demo1 ``` @@ -211,13 +211,13 @@ You will provide the destination namespace by using flags. The available flags a **Format:** -```console +```bash kubectl stash cp backupconfig [flags] ``` **Example:** -```console +```bash $ kubectl stash cp backupconfig my-backupconfig --namespace=demo --to-namespace=demo1 ``` @@ -232,7 +232,7 @@ To copy a VolumeSnapshot, you need to provide VolumeSnapshot name and destinatio **Example:** -```console +```bash $ kubectl stash cp volumesnapshot my-vol-snap --namespace=demo --to-namespace=demo1 ``` @@ -261,13 +261,13 @@ You will provide the backend credential by using flags. The available flags are: **Format:** -```console +```bash kubectl stash clone pvc [flags] ``` **Example:** -```console +```bash $ kubectl stash clone pvc my-pvc -n demo --to-namespace=demo-1 --secret= --bucket= --prefix= --provider= ``` @@ -278,13 +278,13 @@ To trigger an instant backup, you need to have a BackupConfiguration in your clu **Format:** -```console +```bash kubectl stash trigger [flags] ``` **Example:** -```console +```bash $ kubectl stash trigger my-config --namespace=demo ``` @@ -295,12 +295,12 @@ To unlock the Repository, you need to provide a Repository name. You can also pr **Format:** -```console +```bash kubectl stash unlock [flags] ``` **Example:** -```console +```bash $ kubectl stash unlock my-repo --namespace=demo ``` diff --git a/docs/guides/latest/hooks/backup-and-restore-hooks.md b/docs/guides/latest/hooks/backup-and-restore-hooks.md index 3f7a83f3..8577df9a 100644 --- a/docs/guides/latest/hooks/backup-and-restore-hooks.md +++ b/docs/guides/latest/hooks/backup-and-restore-hooks.md @@ -39,7 +39,7 @@ You should be familiar with the following `Stash` concepts: To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -73,7 +73,7 @@ spec: Let's create the above `MySQL` CR, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/hooks/sample-mysql.yaml mysql.kubedb.com/sample-mysql created ``` @@ -82,7 +82,7 @@ KubeDB will deploy a MySQL database according to the above specification. It wil Wait for the database to go into `Running` state, -```console +```bash $ kubectl get mysql -n demo -w NAME VERSION STATUS AGE sample-mysql 8.0.14 Creating 5s @@ -93,7 +93,7 @@ sample-mysql 8.0.14 Running 2m7s Verify that KubeDB has created a Secret for the database. -```console +```bash $ kubectl get secret -n demo -l=kubedb.com/name=sample-mysql NAME TYPE DATA AGE sample-mysql-auth Opaque 2 5m7s @@ -103,7 +103,7 @@ sample-mysql-auth Opaque 2 5m7s KubeDB creates an `AppBinding` CR that holds the necessary information to connect with the database. Verify that the `AppBinding` has been created for the above database: -```console +```bash $ kubectl get appbindings -n demo -l=kubedb.com/name=sample-mysql NAME TYPE VERSION AGE sample-mysql kubedb.com/mysql 8.0.14 66s @@ -111,7 +111,7 @@ sample-mysql kubedb.com/mysql 8.0.14 66s If you check the YAML of the `AppBinding`, you will see the connection information and respective Secret reference to access the database is presents in `spec` section. -```console +```bash $ kubectl get appbindings sample-mysql -n demo -o yaml ``` @@ -151,7 +151,7 @@ Now, let's insert some sample data into the above database. Here, we are going t At first, let's export the database credentials as environment variables in our current shell so that we can use those variables to access the database instead of typing username and password every time. -```console +```bash # export username from the database secret $ export MYSQL_USER=$(kubectl get secret -n demo sample-mysql-auth -o jsonpath='{.data.username}'| base64 -d) @@ -169,7 +169,7 @@ CWg2hru8b0Yu7dzS Now, let's identify the database pod, -```console +```bash $ kubectl get pods -n demo --selector="kubedb.com/name=sample-mysql" NAME READY STATUS RESTARTS AGE sample-mysql-0 1/1 Running 0 6m50s @@ -177,7 +177,7 @@ sample-mysql-0 1/1 Running 0 6m50s Let's `exec` into the database pod and insert sample data, -```console +```bash $ kubectl exec -it -n demo sample-mysql-0 -- mysql --user=$MYSQL_USER --password=$MYSQL_PASSWORD mysql: [Warning] Using a password on the command line interface can be insecure. @@ -239,7 +239,7 @@ We are going to store our backed up data into a GCS bucket. At first, we need to Let's create a secret called `gcs-secret` with access credentials to our desired GCS bucket, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded-sa-json.key > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -270,7 +270,7 @@ spec: Let's create the `Repository` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/hooks/repository.yaml repository.stash.appscode.com/gcs-repo created ``` @@ -322,7 +322,7 @@ spec: Let's create the above `BackupConfiguration`, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/hooks/pre_backup_hook_demo.yaml backupconfiguration.stash.appscode.com/backup-hook-demo created ``` @@ -331,7 +331,7 @@ backupconfiguration.stash.appscode.com/backup-hook-demo created If everything goes well, Stash will create a CronJob with the schedule specified in `spec.schedule` field of the `BackupConfiguration` CR. -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE stash-backup-backup-hook-demo */5 * * * * False 0 74s @@ -343,7 +343,7 @@ The `stash-backup-backup-hook-demo` CronJob will trigger a backup on each schedu Wait for a schedule to appear. Run the following command to watch `BackupSession` CR, -```console +```bash $ kubectl get backupsession -n demo -w NAME INVOKER-TYPE INVOKER-NAME PHASE AGE @@ -358,7 +358,7 @@ Here, the phase `Succeeded` means that the backup process has been completed suc Once a backup is completed, Stash will update the respective `Repository` CR to reflect the backup completion. Check that the repository `gcs-repo` has been updated by the following command, -```console +```bash $ kubectl get repository -n demo gcs-repo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE gcs-repo true 1 75s 55m @@ -372,7 +372,7 @@ If the `preBackup` hook executes successfully, the database will be marked as re Let's verify that the database is read-only by trying to execute a write operation, -```console +```bash $ kubectl exec -it -n demo sample-mysql-0 -- mysql --user=$MYSQL_USER --password=$MYSQL_PASSWORD -e "CREATE DATABASE read-OnlyTest;" mysql: [Warning] Using a password on the command line interface can be insecure. ERROR 1290 (HY000) at line 1: The MySQL server is running with the --super-read-only option so it cannot execute this statement @@ -381,7 +381,7 @@ command terminated with exit code 1 Here, the error message clearly states the database is now read-only. Let's try to execute a read operation. -```console +```bash $ kubectl exec -it -n demo sample-mysql-0 -- mysql --user=$MYSQL_USER --password=$MYSQL_PASSWORD -e "SELECT * FROM companyRecord.employee;" mysql: [Warning] Using a password on the command line interface can be insecure. +----+----------+--------+ @@ -441,7 +441,7 @@ spec: Let's apply the update, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/hooks/post_backup_hook_demo.yaml backupconfiguration.stash.appscode.com/backup-hook-demo configured ``` @@ -450,7 +450,7 @@ backupconfiguration.stash.appscode.com/backup-hook-demo configured Now, wait for the next backup slot, -```console +```bash $ kubectl get backupsession -n demo -w NAME INVOKER-TYPE INVOKER-NAME PHASE AGE @@ -464,14 +464,14 @@ backup-hook-demo-1579179905 BackupConfiguration backup-hook-demo Succeeded If the `postBackup` hook has been executed successfully, the database should be writable again. Let's try to execute a write operation to verify that the database writable, -```console +```bash $ kubectl exec -it -n demo sample-mysql-0 -- mysql --user=$MYSQL_USER --password=$MYSQL_PASSWORD -e "CREATE DATABASE postBackupHookTest;" mysql: [Warning] Using a password on the command line interface can be insecure. ``` Verify the test database has been created successfully, -```console +```bash $ kubectl exec -it -n demo sample-mysql-0 -- mysql --user=$MYSQL_USER --password=CWg2hru8b0Yu7dzS -e "SHOW DATABASES;" mysql: [Warning] Using a password on the command line interface can be insecure. @@ -497,14 +497,14 @@ In this section, we are going to demonstrate `preRestore` and `postRestore` hook At first, let stop the backup so that no new backup happens during the restore process. Let's set `spec.paused` section of `BackupConfiguration` to `true` which will stop taking further scheduled backup. -```console +```bash $ kubectl patch backupconfiguration -n demo backup-hook-demo --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/backup-hook-demo patched ``` It should suspend the respective CronJob which is responsible for triggering backup at a scheduled slot. Let's verify that the CronJob has been suspended. -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE stash-backup-backup-hook-demo */5 * * * * True 0 5m13s 29m @@ -514,14 +514,14 @@ stash-backup-backup-hook-demo */5 * * * * True 0 5m13s Now, let's simulate a disaster scenario. Here, we are going to delete the `companyRecord` database before restoring so that we can verify that the data has been restored from backup. -```console +```bash $ kubectl exec -it -n demo sample-mysql-0 -- mysql --user=$MYSQL_USER --password=$MYSQL_PASSWORD -e "DROP DATABASE companyRecord;" mysql: [Warning] Using a password on the command line interface can be insecure. ``` Verify that the database has been deleted, -```console +```bash $ kubectl exec -it -n demo sample-mysql-0 -- mysql --user=$MYSQL_USER --password=$MYSQL_PASSWORD -e "SHOW DATABASES;" mysql: [Warning] Using a password on the command line interface can be insecure. +--------------------+ @@ -575,7 +575,7 @@ spec: Let's create the above `RestoreSession`, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/hooks/pre_restore_hook_demo.yaml restoresession.stash.appscode.com/pre-restore-hook-demo created ``` @@ -584,7 +584,7 @@ restoresession.stash.appscode.com/pre-restore-hook-demo created Now, wait for the restore process to complete, -```console +```bash $ kubectl get restoresession -n demo -w NAME REPOSITORY PHASE AGE pre-restore-hook-demo gcs-repo Running 10s @@ -598,7 +598,7 @@ Here, `RestoreSession` phase `Succeeded` means the restore process has been comp Verify that the data has been restored successfully, -```console +```bash $ kubectl exec -it -n demo sample-mysql-0 -- mysql --user=$MYSQL_USER --password=$MYSQL_PASSWORD -e "SELECT * FROM companyRecord.employee;" mysql: [Warning] Using a password on the command line interface can be insecure. +----+----------+--------+ @@ -618,14 +618,14 @@ Now, let's consider that you want to perform some migration on the database duri Let's delete the old database `companyRecord` before restoring so that we can verify that the data has been restored from backup. -```console +```bash $ kubectl exec -it -n demo sample-mysql-0 -- mysql --user=$MYSQL_USER --password=$MYSQL_PASSWORD -e "DROP DATABASE companyRecord;" mysql: [Warning] Using a password on the command line interface can be insecure. ``` Verify that the database has been deleted, -```console +```bash $ kubectl exec -it -n demo sample-mysql-0 -- mysql --user=$MYSQL_USER --password=$MYSQL_PASSWORD -e "SHOW DATABASES;" mysql: [Warning] Using a password on the command line interface can be insecure. +--------------------+ @@ -673,7 +673,7 @@ spec: Let's create the above `RestoreSession`, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/hooks/post_restore_hook_demo.yaml restoresession.stash.appscode.com/post-restore-hook-demo created ``` @@ -682,7 +682,7 @@ restoresession.stash.appscode.com/post-restore-hook-demo created Now, wait for the restore process to complete, -```console +```bash $ kubectl get restoresession -n demo post-restore-hook-demo -w NAME REPOSITORY PHASE AGE post-restore-hook-demo gcs-repo Running 12s @@ -694,7 +694,7 @@ post-restore-hook-demo gcs-repo Succeeded 29s Verify that the `companyRecord` database has been restored and the `employee` table has been renamed to `salaryRecord`. -```console +```bash $ kubectl exec -it -n demo sample-mysql-0 -- mysql --user=$MYSQL_USER --password=$MYSQL_PASSWORD -e "SHOW TABLES IN companyRecord;" mysql: [Warning] Using a password on the command line interface can be insecure. +-------------------------+ @@ -706,7 +706,7 @@ mysql: [Warning] Using a password on the command line interface can be insecure. Let's check `salaryRecord` table contains the original data of the `employee` table, -```console +```bash $ kubectl exec -it -n demo sample-mysql-0 -- mysql --user=$MYSQL_USER --password=$MYSQL_PASSWORD -e "SELECT * FROM companyRecord.salaryRecord;" mysql: [Warning] Using a password on the command line interface can be insecure. +----+----------+--------+ @@ -722,7 +722,7 @@ So, we can see that the `postRestore` hook successfully performed migration on t To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo restoresession pre-restore-hook-demo post-restore-hook-demo kubectl delete -n demo backupconfiguration backup-hook-demo kubectl delete -n demo repository gcs-repo diff --git a/docs/guides/latest/hooks/batch-backup-hooks.md b/docs/guides/latest/hooks/batch-backup-hooks.md index 7519dea8..ad865d52 100644 --- a/docs/guides/latest/hooks/batch-backup-hooks.md +++ b/docs/guides/latest/hooks/batch-backup-hooks.md @@ -41,7 +41,7 @@ You should be familiar with the following `Stash` concepts: To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -85,7 +85,7 @@ spec: Let's create the above database, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/hooks/batch-backup/wordpress-mysql.yaml mysql.kubedb.com/wordpress-mysql created ``` @@ -94,7 +94,7 @@ KubeDB will deploy a MySQL database according to the above specification. It wil Wait for the database to go into `Running` state, -```console +```bash $ kubectl get mysql -n demo -w NAME VERSION STATUS AGE wordpress-mysql 8.0.14 Creating 17s @@ -105,7 +105,7 @@ wordpress-mysql 8.0.14 Running 3m10s Verify that KubeDB has created a Secret for the database. -```console +```bash $ kubectl get secret -n demo -l=kubedb.com/name=wordpress-mysql NAME TYPE DATA AGE wordpress-mysql-auth Opaque 2 4m1s @@ -115,7 +115,7 @@ wordpress-mysql-auth Opaque 2 4m1s KubeDB creates an `AppBinding` CR that holds the necessary information to connect with the database. Verify that the `AppBinding` has been created for the above database: -```console +```bash $ kubectl get appbindings -n demo -l=kubedb.com/name=wordpress-mysql NAME TYPE VERSION AGE wordpress-mysql kubedb.com/mysql 8.0.14 2m10s @@ -190,7 +190,7 @@ spec: Let's create the above resources, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/hooks/batch-backup/wordpress-deployment.yaml persistentvolumeclaim/wordpress-pvc created @@ -199,7 +199,7 @@ deployment.apps/wordpress-deployment created Verify that WordPress pod ready -```console +```bash $ kubectl get pod -n demo -l=app=wordpress,tier=frontend NAME READY STATUS RESTARTS AGE wordpress-deployment-586f94487c-nm8p5 1/1 Running 0 2m26s @@ -217,7 +217,7 @@ We are going to store our backed up data into a GCS bucket. At first, we need to Let's create a secret called `gcs-secret` with access credentials to our desired GCS bucket, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded-sa-json.key > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -248,7 +248,7 @@ spec: Let's create the `Repository` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/hooks/batch-backup/repository.yaml repository.stash.appscode.com/gcs-repo created ``` @@ -397,7 +397,7 @@ You can customize the `body` section of `httpPost` hook to change the visual rep Let's create the above `BackupBatch` object, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/hooks/batch-backup/wordpress-backup.yaml backupbatch.stash.appscode.com/wordpress-backup created ``` @@ -406,7 +406,7 @@ backupbatch.stash.appscode.com/wordpress-backup created If everything goes well, Stash will create a CronJob with the schedule specified in `spec.schedule` field of the `BackupBatch` CR. -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE stash-backup-wordperss-backup */3 * * * * False 0 27s @@ -418,7 +418,7 @@ The `stash-backup-wordpress-backup` CronJob will trigger a backup on each schedu Wait for a schedule to appear. Run the following command to watch `BackupSession` CR, -```console +```bash $ kubectl get backupsession -n demo -w NAME INVOKER-TYPE INVOKER-NAME PHASE AGE wordpress-backup-1579526461 BackupBatch wordpress-backup Running 0s @@ -433,7 +433,7 @@ Here, the phase `Succeeded` means that the backup process has been completed suc Once a backup is completed, Stash will update the respective `Repository` CR to reflect the backup completion. Check that the repository `gcs-repo` has been updated by the following command, -```console +```bash $ kubectl get repository -n demo gcs-repo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE gcs-repo true 2 72s 18m @@ -453,7 +453,7 @@ Now, go to your slack channel. You should see that Stash has sent notification b To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl -n demo delete backupbatch wordpress-backup kubectl -n demo delete repository gcs-repo kubectl -n demo delete secret gcs-secret diff --git a/docs/guides/latest/monitoring/builtin.md b/docs/guides/latest/monitoring/builtin.md index 28e24d57..9603c5d4 100644 --- a/docs/guides/latest/monitoring/builtin.md +++ b/docs/guides/latest/monitoring/builtin.md @@ -22,7 +22,7 @@ At first, you need to have a Kubernetes cluster, and the kubectl command-line to To keep Prometheus resources isolated, we are going to use a separate namespace to deploy Prometheus server. -```console +```bash $ kubectl create ns monitoring namespace/monitoring created ``` @@ -31,7 +31,7 @@ namespace/monitoring created Enable Prometheus monitoring using `prometheus.io/builtin` agent while installing Stash. To know details about how to enable monitoring see [here](/docs/guides/v1alpha1/monitoring/overview.md#how-to-enable-monitoring). Here, we are going to enable monitoring for `backup`, `restore` and `operator` metrics using Helm 3. -```console +```bash $ helm install stash-operator appscode/stash --version {{< param "info.version" >}} \ --namespace kube-system \ --set monitoring.agent=prometheus.io/builtin \ @@ -110,7 +110,7 @@ We have deployed Stash in `kube-system` namespace. Stash exports operator metric Let's check `stash-apiserver-cert` certificate has been created in `monitoring` namespace. -```console +```bash $ kubectl get secret -n monitoring -l=app=stash NAME TYPE DATA AGE stash-apiserver-cert kubernetes.io/tls 2 2m21s @@ -120,7 +120,7 @@ stash-apiserver-cert kubernetes.io/tls 2 2m21s If you are using a RBAC enabled cluster, you have to give necessary RBAC permissions for Prometheus. Let's create necessary RBAC stuffs for Prometheus, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/monitoring/builtin/prom-rbac.yaml clusterrole.rbac.authorization.k8s.io/stash-prometheus-server created serviceaccount/stash-prometheus-server created @@ -245,7 +245,7 @@ Also note that, we have provided a bearer-token file through `bearer_token_file` Let's create the ConfigMap we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/monitoring/builtin/prom-config.yaml configmap/stash-prometheus-server-conf created ``` @@ -306,7 +306,7 @@ Notice that, we have mounted `stash-apiserver-cert` secret as a volume at `/etc/ Now, let's create the deployment, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/monitoring/builtin/prom-deployment.yaml deployment.apps/stash-prometheus-server created ``` @@ -315,7 +315,7 @@ deployment.apps/stash-prometheus-server created Prometheus server is running on port `9090`. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. Run following command on a separate terminal, -```console +```bash $ kubectl port-forward -n monitoring stash-prometheus-server-9ddbf79b6-8l6hk 9090 Forwarding from 127.0.0.1:9090 -> 9090 Forwarding from [::1]:9090 -> 9090 @@ -343,7 +343,7 @@ A screenshot that shows Prometheus metrics send by Stash backup and restore proc To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete clusterrole stash-prometheus-server kubectl delete clusterrolebinding stash-prometheus-server diff --git a/docs/guides/latest/monitoring/coreos.md b/docs/guides/latest/monitoring/coreos.md index 8239325f..aa7f2e9f 100644 --- a/docs/guides/latest/monitoring/coreos.md +++ b/docs/guides/latest/monitoring/coreos.md @@ -22,7 +22,7 @@ CoreOS [prometheus-operator](https://github.com/coreos/prometheus-operator) prov - To keep Prometheus resources isolated, we are going to use a separate namespace to deploy Prometheus operator and respective resources. - ```console + ```bash $ kubectl create ns monitoring namespace/monitoring created ``` @@ -35,7 +35,7 @@ Enable Prometheus monitoring using `prometheus.io/coreos-operator` agent while i Here, we are going to enable monitoring for both `backup`, `restore` and `operator` metrics using Helm 3. -```console +```bash $ helm install stash-operator appscode/stash --version {{< param "info.version" >}} \ --namespace kube-system \ --set monitoring.agent=prometheus.io/coreos-operator \ @@ -90,7 +90,7 @@ Stash exports operator metrics via TLS secured `api` endpoint. So, Prometheus se Let's check secret `stash-apiserver-cert` has been created in monitoring namespace. -```console +```bash $ kubectl get secret -n monitoring -l=app=stash NAME TYPE DATA AGE stash-apiserver-cert kubernetes.io/tls 2 31m @@ -135,7 +135,7 @@ Here, `spec.serviceMonitorSelector` is used to select the `ServiceMonitor` crd t Let's create the `Prometheus` object we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/monitoring/coreos/prometheus.yaml prometheus.monitoring.coreos.com/prometheus created ``` @@ -144,7 +144,7 @@ Prometheus operator watches for `Prometheus` crd. Once a `Prometheus` crd is cre Let's check StatefulSet has been created, -```console +```bash $ kubectl get statefulset -n monitoring NAME DESIRED CURRENT AGE prometheus-prometheus 1 1 4m @@ -152,7 +152,7 @@ prometheus-prometheus 1 1 4m Check StatefulSet's pod is running, -```console +```bash $ kubectl get pod prometheus-prometheus-0 -n monitoring NAME READY STATUS RESTARTS AGE prometheus-prometheus-0 2/2 Running 0 6m @@ -164,7 +164,7 @@ Now, we are ready to access Prometheus dashboard. Prometheus server is running on port `9090`. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. Run following command on a separate terminal, -```console +```bash $ kubectl port-forward -n monitoring prometheus-prometheus-0 9090 Forwarding from 127.0.0.1:9090 -> 9090 Forwarding from [::1]:9090 -> 9090 @@ -181,7 +181,7 @@ Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:909 To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash # cleanup Prometheus resources kubectl delete -n monitoring prometheus prometheus kubectl delete -n monitoring secret stash-apiserver-cert diff --git a/docs/guides/latest/monitoring/overview.md b/docs/guides/latest/monitoring/overview.md index 7ccdac34..e8b87412 100644 --- a/docs/guides/latest/monitoring/overview.md +++ b/docs/guides/latest/monitoring/overview.md @@ -148,7 +148,7 @@ You have to provides these flags while installing or upgrading or updating Stash **Helm 3:** -```console +```bash $ helm install stash-operator appscode/stash --version {{< param "info.version" >}} \ --namespace kube-system \ --set monitoring.agent=prometheus.io/coreos-operator \ @@ -160,7 +160,7 @@ $ helm install stash-operator appscode/stash --version {{< param "info.version" **Helm 2:** -```console +```bash $ helm install appscode/stash --name stash-operator --version {{< param "info.version" >}} \ --namespace kube-system \ --set monitoring.agent=prometheus.io/coreos-operator \ @@ -172,7 +172,7 @@ $ helm install appscode/stash --name stash-operator --version {{< param "info.ve **YAML (with Helm 3):** -```console +```bash $ helm template stash-operator appscode/stash --version {{< param "info.version" >}} \ --namespace kube-system \ --no-hooks \ diff --git a/docs/guides/latest/platforms/aks.md b/docs/guides/latest/platforms/aks.md index 1dfdec6e..49bb193b 100644 --- a/docs/guides/latest/platforms/aks.md +++ b/docs/guides/latest/platforms/aks.md @@ -31,7 +31,7 @@ This guide will show you how to use Stash to backup and restore volumes of a Kub To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -40,7 +40,7 @@ namespace/demo created Stash works with any `StorageClass`. Check available `StorageClass` in your cluster using the following command: -```console +```bash $ kubectl get storageclass -n demo NAME PROVISIONER AGE standard kubernetes.io/azure-disk 3m @@ -79,7 +79,7 @@ spec: Let's create the PVC we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/aks/pvc.yaml persistentvolumeclaim/stash-sample-data created ``` @@ -127,14 +127,14 @@ spec: Let's create the Deployment we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/aks/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for the pods of the Deployment to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-8685fb5478-4psw8 1/1 Running 0 4m47s @@ -144,7 +144,7 @@ stash-demo-8685fb5478-fjggh 1/1 Running 0 4m47s To verify that the sample data has been created in `/source/data` directory, use the following command: -```console +```bash $ kubectl exec -n demo stash-demo-8685fb5478-4psw8 -- cat /source/data/data.txt sample_data ``` @@ -157,7 +157,7 @@ We are going to store our backed up data into an [Azure Blob Container](https:// Let's create a secret called `azure-secret` with access credentials to our desired [Azure Blob Container](https://azure.microsoft.com/en-us/services/storage/blobs/), -```console +```bash $ echo -n 'changeit' >RESTIC_PASSWORD $ echo -n '' > AZURE_ACCOUNT_NAME $ echo -n '' > AZURE_ACCOUNT_KEY @@ -170,7 +170,7 @@ secret/azure-secret created Verify that the secret has been created successfully, -```console +```bash $ kubectl get secret -n demo azure-secret -o yaml ``` @@ -211,7 +211,7 @@ spec: Let's create the Repository we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/aks/repository.yaml repository.stash.appscode.com/azure-repo created ``` @@ -261,7 +261,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/aks/backupconfiguration.yaml backupconfiguration.stash.appscode.com/deployment-backup created ``` @@ -270,7 +270,7 @@ backupconfiguration.stash.appscode.com/deployment-backup created If everything goes well, Stash will inject a sidecar container into the `stash-demo` Deployment to take backup of `/source/data` directory. Let’s check that the sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-5bdc545845-45smg 2/2 Running 0 45s @@ -373,7 +373,7 @@ It will also create a `CronJob` with the schedule specified in `spec.schedule` f Verify that the `CronJob` has been created using the following command, -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE deployment-backup */1 * * * * False 0 35s 64s @@ -385,7 +385,7 @@ The `deployment-backup` CronJob will trigger a backup on each schedule by creati Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 2 kubectl get backupsession -n demo Every 1.0s: kubectl get backupsession -n demo suaas-appscode: Mon Jun 24 10:23:08 2019 @@ -399,7 +399,7 @@ We can see from the above output that the backup session has succeeded. Now, we Once a backup is complete, Stash will update the respective `Repository` crd to reflect the backup. Check that the repository `azure-repo` has been updated by the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE azure-repo true 8 B 1 2s 1m10s @@ -424,14 +424,14 @@ At first, let's stop taking any further backup of the old Deployment so that no Let's pause the `deployment-backup` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo deployment-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/deployment-backup patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE deployment-backup */1 * * * * true 26m @@ -496,7 +496,7 @@ spec: Let's create the Deployment and PVC we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/aks/recovered_deployment.yaml persistentvolumeclaim/restore-pvc created deployment.apps/stash-recovered created @@ -540,7 +540,7 @@ Here, Let's create the `RestoreSession` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/aks/restoresession.yaml restoresession.stash.appscode.com/deployment-restore created ``` @@ -626,7 +626,7 @@ Notice the `Init-Containers` section. We can see that the init-container `stash- Now, wait for the restore process to complete. You can watch the `RestoreSession` phase using the following command, -```console +```bash $ watch -n 2 kubectl get restoresession -n demo Every 3.0s: kubectl get restore -n demo suaas-appscode: Thu Jul 18 12:02:10 2019 @@ -642,7 +642,7 @@ So, we can see from the output of the above command that the restore process has In this section, we are going to verify that the desired data has been restored successfully. At first, check if the `stash-recovered` pod of the Deployment has gone into `Running` state by the following command, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-recovered-6669c8bcfd-7pz9m 1/1 Running 0 76s @@ -652,7 +652,7 @@ stash-recovered-6669c8bcfd-qkllx 1/1 Running 0 51s Verify that the sample data has been restored in `/restore/data` directory of the `stash-recovered` pod of the Deployment using the following command, -```console +```bash $ kubectl exec -n demo stash-recovered-6669c8bcfd-7pz9m -- cat /restore/data/data.txt sample_data ``` @@ -661,7 +661,7 @@ sample_data To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo deployment stash-demo kubectl delete -n demo deployment stash-recovered kubectl delete -n demo backupconfiguration deployment-backup diff --git a/docs/guides/latest/platforms/eks.md b/docs/guides/latest/platforms/eks.md index 2330fc13..b878e134 100644 --- a/docs/guides/latest/platforms/eks.md +++ b/docs/guides/latest/platforms/eks.md @@ -31,7 +31,7 @@ This guide will show you how to use Stash to backup and restore volumes of a Kub To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -40,7 +40,7 @@ namespace/demo created Stash works with any `StorageClass`. Check available `StorageClass` in your cluster using the following command: -```console +```bash $ kubectl get storageclass -n demo NAME PROVISIONER AGE standard kubernetes.io/aws-ebs 10m @@ -79,7 +79,7 @@ spec: Let's create the PVC we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/eks/pvc.yaml persistentvolumeclaim/source-pvc created ``` @@ -127,14 +127,14 @@ spec: Let's create the Deployment we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/eks/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for the pods of the Deployment to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-85b76c4849-6rmx8 1/1 Running 0 31s @@ -144,7 +144,7 @@ stash-demo-85b76c4849-wq8fs 1/1 Running 0 31s To verify that the sample data has been created in `/source/data` directory, use the following command: -```console +```bash $ kubectl exec -n demo stash-demo-85b76c4849-6rmx8 -- cat /source/data/data.txt sample_data ``` @@ -157,7 +157,7 @@ We are going to store our backed up data into an [AWS S3 Bucket](https://aws.ama Let's create a secret called `s3-secret` with access credentials to our desired [AWS S3 Bucket](https://aws.amazon.com/s3/), -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > AWS_ACCESS_KEY_ID $ echo -n '' > AWS_SECRET_ACCESS_KEY @@ -170,7 +170,7 @@ secret/s3-secret created Verify that the secret has been created successfully, -```console +```bash $ kubectl get secret -n demo s3-secret -o yaml ``` @@ -212,7 +212,7 @@ spec: Let's create the `Repository` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/eks/repository.yaml repository.stash.appscode.com/s3-repo created ``` @@ -262,7 +262,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/eks/backupconfiguration.yaml backupconfiguration.stash.appscode.com/deployment-backup created ``` @@ -271,7 +271,7 @@ backupconfiguration.stash.appscode.com/deployment-backup created If everything goes well, Stash will inject a sidecar container into the `stash-demo` Deployment to take backup of `/source/data` directory. Let’s check that the sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-55d4fd968c-b2rrc 2/2 Running 0 60s @@ -388,7 +388,7 @@ It will also create a `CronJob` with the schedule specified in `spec.schedule` f Verify that the `CronJob` has been created using the following command, -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE deployment-backup */1 * * * * False 0 24s 2m14s @@ -400,7 +400,7 @@ The `deployment-backup` CronJob will trigger a backup on each schedule by creati Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 3 kubectl get backupsession -n demo Every 3.0s: kubectl get backupsession -n demo suaas-appscode: Thu Jul 18 18:26:16 2019 @@ -414,7 +414,7 @@ We can see from the above output that the backup session has succeeded. Now, we Once a backup is complete, Stash will update the respective `Repository` crd to reflect the backup. Check that the repository `s3-repo` has been updated by the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE s3-repo true 7 B 1 3s 1m12s @@ -439,14 +439,14 @@ At first, let's stop taking any further backup of the old Deployment so that no Let's pause the `deployment-backup` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo deployment-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/deployment-backup patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE deployment-backup */1 * * * * true 26m @@ -511,7 +511,7 @@ spec: Let's create the Deployment and PVC we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/eks/recovered_deployment.yaml persistentvolumeclaim/restore-pvc created deployment.apps/stash-recovered created @@ -554,7 +554,7 @@ Here, Let's create the `RestoreSession` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/eks/restoresession.yaml restoresession.stash.appscode.com/deployment-restore created ``` @@ -643,7 +643,7 @@ Notice the `Init-Containers` section. We can see that the init-container `stash- Now, wait for the restore process to complete. You can watch the `RestoreSession` phase using the following command, -```console +```bash $ watch -n 2 kubectl get restoresession -n demo Every 3.0s: kubectl get restoresession --all-namespaces suaas-appscode: Thu Jul 18 18:45:55 2019 @@ -659,7 +659,7 @@ So, we can see from the output of the above command that the restore process has In this section, we are going to verify that the desired data has been restored successfully. At first, check if the `stash-recovered` pod of the Deployment has gone into `Running` state by the following command, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-recovered-698b4bb5cb-l2ngj 1/1 Running 0 2m25s @@ -669,7 +669,7 @@ stash-recovered-698b4bb5cb-nhlrn 1/1 Running 0 2m45s Verify that the sample data has been restored in `/restore/data` directory of the `stash-recovered` pod of the Deployment using the following command, -```console +```bash $ kubectl exec -n demo stash-recovered-698b4bb5cb-l2ngj -- cat /restore/data/data.txt sample_data ``` @@ -678,7 +678,7 @@ sample_data To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo deployment stash-demo kubectl delete -n demo deployment stash-recovered kubectl delete -n demo backupconfiguration deployment-backup diff --git a/docs/guides/latest/platforms/gke.md b/docs/guides/latest/platforms/gke.md index d1acd6f2..dd902cc1 100644 --- a/docs/guides/latest/platforms/gke.md +++ b/docs/guides/latest/platforms/gke.md @@ -31,7 +31,7 @@ This guide will show you how to use Stash to backup and restore volumes of a Kub To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -40,7 +40,7 @@ namespace/demo created Stash works with any `StorageClass`. Check available `StorageClass` in your cluster using the following command: -```console +```bash $ kubectl get storageclass -n demo NAME PROVISIONER AGE standard kubernetes.io/gce-pd 3m @@ -79,7 +79,7 @@ spec: Let's create the PVC we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/gke/pvc.yaml persistentvolumeclaim/source-pvc created ``` @@ -127,14 +127,14 @@ spec: Let's create the Deployment we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/gke/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for the pods of the Deployment to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-798987998b-qz6bt 1/1 Running 0 45s @@ -142,7 +142,7 @@ stash-demo-798987998b-qz6bt 1/1 Running 0 45s To verify that the sample data has been created in `/source/data` directory, use the following command: -```console +```bash $ kubectl exec -n demo stash-demo-798987998b-qz6bt -- cat /source/data/data.txt sample_data ``` @@ -157,7 +157,7 @@ We are going to store our backed up data into an [GCS bucket](https://cloud.goog Let's create a secret called `gcs-secret` with access credentials to our desired GCS bucket, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded-sa-json.key > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -170,7 +170,7 @@ secret "gcs-secret" created Verify that the secret has been created successfully, -```console +```bash $ kubectl get secret -n demo gcs-secret -o yaml ``` @@ -211,7 +211,7 @@ spec: Let's create the `Repository` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/gke/repository.yaml repository.stash.appscode.com/gcs-repo created ``` @@ -261,7 +261,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/gke/backupconfiguration.yaml backupconfiguration.stash.appscode.com/deployment-backup created ``` @@ -270,7 +270,7 @@ backupconfiguration.stash.appscode.com/deployment-backup created If everything goes well, Stash will inject a sidecar container into the `stash-demo` Deployment to take backup of `/source/data` directory. Let’s check that the sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-998db88b7-gqnzq 2/2 Running 0 48s @@ -393,7 +393,7 @@ It will also create a `CronJob` with the schedule specified in `spec.schedule` f Verify that the `CronJob` has been created using the following command, -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE deployment-backup */1 * * * * False 0 13s 2m42s @@ -405,7 +405,7 @@ The `deployment-backup` CronJob will trigger a backup on each schedule by creati Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 3 kubectl get backupsession -n demo Every 3.0s: kubectl get backupsession -n-demo suaas-appscode: Mon Jul 22 15:01:21 2019 @@ -419,7 +419,7 @@ We can see from the above output that the backup session has succeeded. Now, we Once a backup is complete, Stash will update the respective `Repository` crd to reflect the backup. Check that the repository `gcs-repo` has been updated by the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE gcs-repo true 8 B 1 3s 1m15s @@ -444,13 +444,13 @@ At first, let's stop taking any further backup of the old Deployment so that no Let's pause the `deployment-backup` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo deployment-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/deployment-backup patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE deployment-backup */1 * * * * true 26m @@ -515,7 +515,7 @@ spec: Let's create the Deployment and PVC we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/gke/recovered_deployment.yaml persistentvolumeclaim/restore-pvc created deployment.apps/stash-recovered created @@ -559,7 +559,7 @@ Here, Let's create the `RestoreSession` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/gke/restoresession.yaml restoresession.stash.appscode.com/deployment-restore created ``` @@ -645,7 +645,7 @@ Notice the `Init-Containers` section. We can see that the init-container `stash- Now, wait for the restore process to complete. You can watch the `RestoreSession` phase using the following command, -```console +```bash $ watch -n 2 kubectl get restoresession -n demo Every 3.0s: kubectl get restoresession --all-namespaces suaas-appscode: Mon Jul 22 18:17:26 2019 @@ -661,7 +661,7 @@ So, we can see from the output of the above command that the restore process has In this section, we are going to verify that the desired data has been restored successfully. At first, check if the `stash-recovered` pod of the Deployment has gone into `Running` state by the following command, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-recovered-7876d7bbbb-w6t8f 1/1 Running 0 4m58s @@ -669,7 +669,7 @@ stash-recovered-7876d7bbbb-w6t8f 1/1 Running 0 4m58s Verify that the sample data has been restored in `/restore/data` directory of the `stash-recovered` pod of the Deployment using the following command, -```console +```bash $ kubectl exec -n demo stash-recovered-7876d7bbbb-w6t8f -- cat /restore/data/data.txt sample_data ``` @@ -678,7 +678,7 @@ sample_data To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo deployment stash-demo kubectl delete -n demo deployment stash-recovered kubectl delete -n demo backupconfiguration deployment-backup diff --git a/docs/guides/latest/platforms/minio.md b/docs/guides/latest/platforms/minio.md index 22ad93d3..662c4553 100644 --- a/docs/guides/latest/platforms/minio.md +++ b/docs/guides/latest/platforms/minio.md @@ -31,7 +31,7 @@ Minio is an open-source object storage server compatible with [AWS S3](https://a To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -40,7 +40,7 @@ namespace/demo created Stash works with any `StorageClass`. Check available `StorageClass` in your cluster using the following command: -```console +```bash $ kubectl get storageclass -n demo NAME PROVISIONER AGE standard (default) k8s.io/minikube-hostpath 130m @@ -79,7 +79,7 @@ spec: Let's create the PVC we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/minio/pvc.yaml persistentvolumeclaim/source-pvc created ``` @@ -127,14 +127,14 @@ spec: Let's create the Deployment we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/minio/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for the pods of the Deployment to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-69f9ffbbf7-6wwtr 1/1 Running 0 60s @@ -144,7 +144,7 @@ stash-demo-69f9ffbbf7-q8qld 1/1 Running 0 60s To verify that the sample data has been created in `/source/data` directory, use the following command: -```console +```bash $ kubectl exec -n demo stash-demo-69f9ffbbf7-6wwtr -- cat /source/data/data.txt sample_data ``` @@ -157,7 +157,7 @@ We are going to store our backed up data into an [Minio Bucket](https://min.io/) Let's create a secret called `minio-secret` with access credentials to our desired [Minio bucket](https://min.io/), -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > AWS_ACCESS_KEY_ID $ echo -n '' > AWS_SECRET_ACCESS_KEY @@ -172,7 +172,7 @@ secret/minio-secret created Verify that the secret has been created successfully, -```console +```bash $ kubectl get secret -n demo minio-secret -o yaml ``` @@ -215,7 +215,7 @@ spec: Let's create the `Repository` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/minio/repository.yaml repository.stash.appscode.com/minio-repo created ``` @@ -265,7 +265,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/minio/backupconfiguration.yaml backupconfiguration.stash.appscode.com/deployment-backup created ``` @@ -274,7 +274,7 @@ backupconfiguration.stash.appscode.com/deployment-backup created If everything goes well, Stash will inject a sidecar container into the `stash-demo` Deployment to take backup of `/source/data` directory. Let’s check that the sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-6548cf5cc-7qx9d 2/2 Running 0 42s @@ -404,7 +404,7 @@ It will also create a `CronJob` with the schedule specified in `spec.schedule` f Verify that the `CronJob` has been created using the following command, -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE deployment-backup */1 * * * * False 0 13s 1m50s @@ -416,7 +416,7 @@ The `deployment-backup` CronJob will trigger a backup on each schedule by creati Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 3 kubectl get backupsession -n demo Every 3.0s: kubectl get backupsession -n demo suaas-appscode: Mon Jul 22 15:01:21 2019 @@ -430,7 +430,7 @@ We can see from the above output that the backup session has succeeded. Now, we Once a backup is complete, Stash will update the respective `Repository` crd to reflect the backup. Check that the repository `minio-repo` has been updated by the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE minio-repo true 32 B 2 3m40s 5m18s @@ -455,14 +455,14 @@ At first, let's stop taking any further backup of the old Deployment so that no Let's pause the `deployment-backup` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo deployment-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/deployment-backup patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE deployment-backup */1 * * * * true 26m @@ -527,7 +527,7 @@ spec: Let's create the Deployment and PVC we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/minio/recovered_deployment.yaml persistentvolumeclaim/restore-pvc created deployment.apps/stash-recovered created @@ -570,7 +570,7 @@ Here, Let's create the `RestoreSession` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/minio/restoresession.yaml restoresession.stash.appscode.com/deployment-restore created ``` @@ -658,7 +658,7 @@ Notice the `Init-Containers` section. We can see that the init-container `stash- Now, wait for the restore process to complete. You can watch the `RestoreSession` phase using the following command, -```console +```bash $ watch -n 2 kubectl get restoresession -n demo Every 3.0s: kubectl get restoresession --all-namespaces suaas-appscode: Mon Jul 22 18:17:26 2019 @@ -674,7 +674,7 @@ So, we can see from the output of the above command that the restore process has In this section, we are going to verify that the desired data has been restored successfully. At first, check if the `stash-recovered` pod of the Deployment has gone into `Running` state by the following command, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-recovered-6f5c46fdbf-lfthv 1/1 Running 0 2m39s @@ -684,7 +684,7 @@ stash-recovered-6f5c46fdbf-vn2z8 1/1 Running 0 2m35s Verify that the sample data has been restored in `/restore/data` directory of the `stash-recovered` pod of the Deployment using the following command, -```console +```bash $ kubectl exec -n demo stash-recovered-6f5c46fdbf-s7rrq -- cat /restore/data/data.txt sample_data ``` @@ -693,7 +693,7 @@ sample_data To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo deployment stash-demo kubectl delete -n demo deployment stash-recovered kubectl delete -n demo backupconfiguration deployment-backup diff --git a/docs/guides/latest/platforms/rook.md b/docs/guides/latest/platforms/rook.md index a77fbf0d..ea4dad1f 100644 --- a/docs/guides/latest/platforms/rook.md +++ b/docs/guides/latest/platforms/rook.md @@ -31,7 +31,7 @@ This guide will show you how to use Stash to backup and restore volumes of a Kub To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -42,7 +42,7 @@ namespace/demo created [Ceph Block Storage](https://rook.io/docs/rook/v1.0/ceph-block.html) allows mounting Rook storage into pod using a PersistentVolumeClaim. In order to do that, we have to create a PersistentVolumeClaim with `rook-ceph-block`[StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/). Verify the StorageClass exist by the following command: -```console +```bash $ kubectl get storageclass NAME PROVISIONER AGE rook-ceph-block ceph.rook.io/block 89m @@ -78,7 +78,7 @@ spec: Let's create the PVC we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/rook/pvc.yaml persistentvolumeclaim/source-pvc created ``` @@ -130,14 +130,14 @@ spec: Let's create the Deployment we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/rook/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for the pods of the Deployment to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-69f9ffbbf7-98lth 1/1 Running 0 13s @@ -145,7 +145,7 @@ stash-demo-69f9ffbbf7-98lth 1/1 Running 0 13s To verify that the sample data has been created in `/source/data` directory, use the following command: -```console +```bash $ kubectl exec -n demo stash-demo-69f9ffbbf7-98lth -- cat /source/data/data.txt sample_data ``` @@ -158,7 +158,7 @@ We are going to store our backed up data into an [Ceph Storage Bucket](https://r Let's create a secret called `rook-secret` with access credentials to our desired [Ceph Storage Bucket](https://rook.io/docs/rook/v1.0/ceph-storage.html), -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > AWS_ACCESS_KEY_ID $ echo -n '' > AWS_SECRET_ACCESS_KEY @@ -171,7 +171,7 @@ secret/rook-secret created Verify that the secret has been created successfully, -```console +```bash $ kubectl get secret -n demo rook-secret -o yaml ``` @@ -213,7 +213,7 @@ spec: Let's create the `Repository` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/rook/repository.yaml repository.stash.appscode.com/rook-repo created ``` @@ -263,7 +263,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/rook/backupconfiguration.yaml backupconfiguration.stash.appscode.com/deployment-backup created ``` @@ -272,7 +272,7 @@ backupconfiguration.stash.appscode.com/deployment-backup created If everything goes well, Stash will inject a sidecar container into the `stash-demo` Deployment to take backup of `/source/data` directory. Let’s check that the sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-76d78d8966-nbkrl 2/2 Running 0 39s @@ -406,7 +406,7 @@ It will also create a `CronJob` with the schedule specified in `spec.schedule` f Verify that the `CronJob` has been created using the following command, -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE deployment-backup */1 * * * * False 0 13s 1m50s @@ -418,7 +418,7 @@ The `deployment-backup` CronJob will trigger a backup on each schedule by creati Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 3 kubectl get backupsession -n demo Every 3.0s: kubectl get backupsession -n demo suaas-appscode: Mon Jul 22 15:01:21 2019 @@ -432,7 +432,7 @@ We can see from the above output that the backup session has succeeded. Now, we Once a backup is complete, Stash will update the respective `Repository` crd to reflect the backup. Check that the repository `rook-repo` has been updated by the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE rook-repo true 30 B 2 3m10s 5m20s @@ -450,14 +450,14 @@ At first, let's stop taking any further backup of the old Deployment so that no Let's pause the `deployment-backup` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo deployment-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/deployment-backup patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE deployment-backup */1 * * * * true 26m @@ -526,7 +526,7 @@ spec: Let's create the Deployment and PVC we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/rook/recovered_deployment.yaml persistentvolumeclaim/restore-pvc created deployment.apps/stash-recovered created @@ -569,7 +569,7 @@ Here, Let's create the `RestoreSession` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/platforms/rook/restoresession.yaml restoresession.stash.appscode.com/deployment-restore created ``` @@ -659,7 +659,7 @@ Notice the `Init-Containers` section. We can see that the init-container `stash- Now, wait for the restore process to complete. You can watch the `RestoreSession` phase using the following command, -```console +```bash $ watch -n 2 kubectl get restoresession -n demo Every 3.0s: kubectl get restoresession --all-namespaces suaas-appscode: Mon Jul 28 18:17:22 2019 @@ -675,7 +675,7 @@ So, we can see from the output of the above command that the restore process has In this section, we are going to verify that the desired data has been restored successfully. At first, check if the `stash-recovered` pod of the Deployment has gone into `Running` state by the following command, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-recovered-5c59587895-76tsx 1/1 Running 0 73s @@ -683,7 +683,7 @@ stash-recovered-5c59587895-76tsx 1/1 Running 0 73s Verify that the sample data has been restored in `/restore/data` directory of the `stash-recovered` pod of the Deployment using the following command, -```console +```bash $ kubectl exec -n demo stash-recovered-5c59587895-76tsx -- cat /restore/data/data.txt sample_data ``` @@ -692,7 +692,7 @@ sample_data To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo deployment stash-demo kubectl delete -n demo deployment stash-recovered kubectl delete -n demo backupconfiguration deployment-backup diff --git a/docs/guides/latest/volumes/pvc.md b/docs/guides/latest/volumes/pvc.md index b2ad7add..e237f1cd 100644 --- a/docs/guides/latest/volumes/pvc.md +++ b/docs/guides/latest/volumes/pvc.md @@ -34,7 +34,7 @@ This guide will show you how to backup a stand-alone PersistentVolumeClaim (PVC) To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -47,7 +47,7 @@ Stash uses a `Function-Task` model to backup stand-alone volume. When you instal Let's verify that Stash has created the necessary `Function` to backup/restore PVC by the following command, -```console +```bash $ kubectl get function NAME AGE pvc-backup 117m @@ -57,7 +57,7 @@ update-status 117m Also, verify that the necessary `Task` has been created, -```console +```bash $ kubectl get task NAME AGE pvc-backup 118m @@ -95,7 +95,7 @@ Notice the `metadata.labels` section. Here, we have added `app: nfs-demo` label. Let's create the PV we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumes/pv.yaml persistentvolume/nfs-pv created ``` @@ -128,14 +128,14 @@ Also, notice the `spec.selector` section. We have specified `app: nfs-demo` labe Let's create the PVC we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumes/pvc.yaml persistentvolumeclaim/nfs-pvc created ``` Verify that the PVC has bounded with our desired PV, -```console +```bash $ kubectl get pvc -n demo nfs-pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE nfs-pvc Bound nfs-pv 1Gi RWX 32s @@ -174,14 +174,14 @@ Here, we have mounted `pod-1/data` directory of the `nfs-pvc` into `/sample/data Let's deploy the pod we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumes/pod-1.yaml pod/demo-pod-1 created ``` Verify that the sample data has been generated into `/sample/data/` directory, -```console +```bash $ kubectl exec -n demo demo-pod-1 cat /sample/data/hello.txt hello from pod 1. ``` @@ -213,14 +213,14 @@ Now, we have mounted `pod-2/data` directory of the `nfs-pvc` into `/sample/data` Let's create the pod we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumes/pod-2.yaml pod/demo-pod-2 created ``` Verify that the sample data has been generated into `/sample/data/` directory, -```console +```bash $ kubectl exec -n demo demo-pod-2 cat /sample/data/hello.txt hello from pod 2. ``` @@ -235,7 +235,7 @@ Now, we are going to backup the PVC `nfs-pvc` in a GCS bucket using Stash. We ha Let's create a Secret named `gcs-secret` with access credentials of our desired GCS backend, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded/sa_key_file.json > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -268,7 +268,7 @@ spec: Let's create the `Repository` object that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumes/repository.yaml repository.stash.appscode.com/gcs-repo created ``` @@ -309,7 +309,7 @@ Here, Let's create the `BackupConfiguration` object that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumes/backupconfiguration.yaml backupconfiguration.stash.appscode.com/nfs-pvc-backup created ``` @@ -320,7 +320,7 @@ If everything goes well, Stash will create a CronJob to trigger backup periodica Verify that Stash has created a CronJob to trigger a periodic backup of the targeted PVC by the following command, -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE nfs-pvc-backup */5 * * * * False 0 28s @@ -330,7 +330,7 @@ nfs-pvc-backup */5 * * * * False 0 28s Now, wait for the next backup schedule. You can watch for `BackupSession` crd using the following command, -```console +```bash $ watch -n 1 kubectl get backupsession -n demo -l=stash.appscode.com/backup-configuration=nfs-pvc-backup Every 1.0s: kubectl get backupsession -n demo -l=stash.appscode.com/backup-... workstation: Wed Jul 3 19:53:13 2019 @@ -347,7 +347,7 @@ When backup session is completed, Stash will update the respective `Repository` Run the following command to check if a backup snapshot has been stored in the backend, -```console +```bash $ kubectl get repository -n demo gcs-repo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE gcs-repo true 80 B 1 25s 49m @@ -374,14 +374,14 @@ At first, let's stop taking any further backup of the PVC so that no backup is t Let's pause the `nfs-pvc-backup` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo nfs-pvc-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/nfs-pvc-backup patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo nfs-pvc-backup NAME TASK SCHEDULE PAUSED AGE nfs-pvc-backup */1 * * * * true 20m @@ -395,7 +395,7 @@ At first, let's simulate a disaster scenario. Let's delete all the files from th Delete the data of pod `demo-pod-1`: -```console +```bash # delete data $ kubectl exec -n demo demo-pod-1 -- sh -c "rm /sample/data/*" @@ -406,7 +406,7 @@ $ kubectl exec -n demo demo-pod-1 ls /sample/data/ Delete the data of pod `demo-pod-2`: -```console +```bash # delete data $ kubectl exec -n demo demo-pod-2 -- sh -c "rm /sample/data/*" @@ -445,7 +445,7 @@ spec: Let's create the `RestoreSession` object that we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumes/restoresession.yaml restoresession.stash.appscode.com/nfs-pvc-restore created ``` @@ -454,7 +454,7 @@ restoresession.stash.appscode.com/nfs-pvc-restore created Now, wait for the restore process to complete. You can watch the `RestoreSession` phase using the following command, -```console +```bash $ watch -n 1 kubectl get restoresession -n demo nfs-pvc-restore Every 1.0s: kubectl get restoresession -n demo nfs-pvc-restore workstation: Wed Jul 3 20:10:52 2019 @@ -470,14 +470,14 @@ Let's verify if the deleted files have been restored successfully into the PVC. Verify that the data of `demo-pod-1` has been restored: -```console +```bash $ kubectl exec -n demo demo-pod-1 cat /sample/data/hello.txt hello from pod 1. ``` Verify that the data of `demo-pod-2` has been restored: -```console +```bash $ kubectl exec -n demo demo-pod-2 cat /sample/data/hello.txt hello from pod 2. ``` @@ -488,7 +488,7 @@ So, we can see from the above output that the files we had deleted in **Simulate To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete backupconfiguration -n demo nfs-pvc-backup kubectl delete restoresession -n demo nfs-pvc-restore diff --git a/docs/guides/latest/volumesnapshot/deployment.md b/docs/guides/latest/volumesnapshot/deployment.md index c4907ff1..a9d31b50 100644 --- a/docs/guides/latest/volumesnapshot/deployment.md +++ b/docs/guides/latest/volumesnapshot/deployment.md @@ -46,7 +46,7 @@ The [volumeBindingMode](https://kubernetes.io/docs/concepts/storage/storage-clas Let's create the `StorageClass` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/storageclass.yaml storageclass.storage.k8s.io/standard created ``` @@ -70,14 +70,14 @@ Here, Let's create the `volumeSnapshotClass` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/default-volumesnapshotclass.yaml volumesnapshotclass.snapshot.storage.k8s.io/default-snapshot-class created ``` To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -124,7 +124,7 @@ spec: Create the PVCs we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/deployment/pvcs.yaml persistentvolumeclaim/source-data created persistentvolumeclaim/source-config created @@ -178,14 +178,14 @@ spec: Let's create the deployment we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/deployment/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for the pod of the Deployment to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-7fd48dd5b4-xqv5n 1/1 Running 0 2m10s @@ -193,7 +193,7 @@ stash-demo-7fd48dd5b4-xqv5n 1/1 Running 0 2m10s Verify that the sample data has been created in `/source/data` and `/source/config` directory using the following command, -```console +```bash $ kubectl exec -n demo stash-demo-7fd48dd5b4-xqv5n -- cat /source/data/data.txt sample_data $ kubectl exec -n demo stash-demo-7fd48dd5b4-xqv5n -- cat /source/config/config.cfg @@ -239,7 +239,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/deployment/backupconfiguration.yaml backupconfiguration.stash.appscode.com/deployments-volume-snapshot created ``` @@ -250,7 +250,7 @@ If everything goes well, Stash will create a `CronJob` to take periodic snapshot Check that the `CronJob` has been created using the following command, -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE deployments-volume-snapshot */1 * * * * False 0 39s 2m41s @@ -262,7 +262,7 @@ The `deployments-volume-snapshot` CronJob will trigger a backup on each schedule Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 1 kubectl get backupsession -n demo Every 1.0s: kubectl get backupsession -n demo suaas-appscode: Tue Jun 18 18:35:41 2019 @@ -276,13 +276,13 @@ We can see above that the backup session has succeeded. Now, we are going to ver Once a `BackupSession` crd is created, it creates volume snapshotter `Job`. Then the `Job` creates `VolumeSnapshot` crd for the targeted PVCs.The `VolumeSnapshot` name follows the following pattern: -```console +```bash - ``` Check that the `VolumeSnapshot` has been created Successfully. -```console +```bash $ kubectl get volumesnapshot -n demo NAME AGE source-config-1563171247 1m46s @@ -291,7 +291,7 @@ source-data-1563171247 1m46s Let's find out the actual snapshot name that will be saved in the Google Cloud by the following command, -```console +```bash kubectl get volumesnapshot source-data-1563171247 -n demo -o yaml ``` @@ -338,14 +338,14 @@ At first, let's stop taking any further backup of the old Deployment so that no Let's pause the `deployments-volume-snapshot` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo deployments-volume-snapshot --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/deployments-volume-snapshot patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE deployments-volume-snapshot */1 * * * * true 18m @@ -406,7 +406,7 @@ Here, Let's create the `RestoreSession` crd we have shown above. -```console +```bash $ kubectl create -f ./docs/examples/guides/latest/volumesnapshot/deployment/restoresession.yaml restoresession.stash.appscode.com/restore-pvc created ``` @@ -415,7 +415,7 @@ Once, you have created the `RestoreSession` crd, Stash will create a job to rest Run the following command to watch RestoreSession phase, -```console +```bash $ watch -n 1 kubectl get restore -n demo Every 1.0s: kubectl get restore -n demo suaas-appscode: Tue Jun 18 18:35:41 2019 @@ -432,7 +432,7 @@ Once the restore process is complete, we are going to see that new PVCs with the Verify that the PVCs have been created by the following command, -```console +```bash $ kubectl get pvc -n demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE restore-config Bound pvc-26758eda-a6ca-11e9-9f3a-42010a800050 1Gi RWO standard 30s @@ -494,14 +494,14 @@ spec: Let's create the deployment we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/deployment/restored-deployment.yaml deployment.apps/restore-demo created ``` Now, wait for the pod of the Deployment to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE restore-demo-544db78b8b-tnzb2 1/1 Running 0 34s @@ -509,7 +509,7 @@ restore-demo-544db78b8b-tnzb2 1/1 Running 0 34s Verify that the backed up data has been restored in `/restore/data` and `/restore/config` directory using the following command, -```console +```bash $ kubectl exec -n demo restore-demo-544db78b8b-tnzb2 ls /restore/config/config.cfg config_data $ kubectl exec -n demo restore-demo-544db78b8b-tnzb2 ls /restore/data/data.txt @@ -520,7 +520,7 @@ sample_data To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo deployment stash-demo kubectl delete -n demo deployment restore-demo kubectl delete -n demo backupconfiguration deployments-volume-snapshot diff --git a/docs/guides/latest/volumesnapshot/overview.md b/docs/guides/latest/volumesnapshot/overview.md index 37ed56fa..3d2b16b1 100644 --- a/docs/guides/latest/volumesnapshot/overview.md +++ b/docs/guides/latest/volumesnapshot/overview.md @@ -56,7 +56,7 @@ The `VolumeSnapshot` process consists of the following steps: 6. When it finds a `BackupSession` crd, it creates a volume snapshotter `Job` to take snapshot of the targeted volumes. 7. The volume snapshotter `Job` creates `VolumeSnapshot` crd for each PVC of the target and waits for the CSI driver to complete snapshotting. These `VolumeSnasphot` crd names follow the following format: -```console +```bash - ``` diff --git a/docs/guides/latest/volumesnapshot/pvc.md b/docs/guides/latest/volumesnapshot/pvc.md index 28efb62b..0609a071 100644 --- a/docs/guides/latest/volumesnapshot/pvc.md +++ b/docs/guides/latest/volumesnapshot/pvc.md @@ -44,7 +44,7 @@ volumeBindingMode: Immediate Let's create the `StorageClass` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/storageclass.yaml storageclass.storage.k8s.io/standard created ``` @@ -68,14 +68,14 @@ Here, Let's create the `volumeSnapshotClass` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/default-volumesnapshotclass.yaml volumesnapshotclass.snapshot.storage.k8s.io/default-snapshot-class created ``` To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -109,7 +109,7 @@ spec: Let's create the PVC we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/standalone-pvc/source-pvc.yaml persistentvolumeclaim/source-data created ``` @@ -144,14 +144,14 @@ spec: Let's create the Pod we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/standalone-pvc/source-pod.yaml pod/source-pod created ``` Now, wait for the Pod to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE source-pod 1/1 Running 0 25s @@ -160,7 +160,7 @@ source-pod 1/1 Running 0 25s Verify that the sample data has been created in `/source/data` directory for `source-pod` pod using the following command, -```console +```bash $ kubectl exec -n demo source-pod -- cat /source/data/data.txt sample_data ``` @@ -204,7 +204,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/standalone-pvc/backupconfiguration.yaml backupconfiguration.stash.appscode.com/pvc-volume-snapshot created ``` @@ -215,7 +215,7 @@ If everything goes well, Stash will create a `CronJob` to take periodic snapshot Check that the `CronJob` has been created using the following command, - ```console + ```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE pvc-volume-snapshot */1 * * * * False 0 39s 2m41s @@ -227,7 +227,7 @@ The `pvc-volume-snapshot` CronJob will trigger a backup on each scheduled time s Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 1 kubectl get backupsession -n demo Every 1.0s: kubectl get backupsession -n demo suaas-appscode: Tue Jun 18 18:35:41 2019 @@ -241,13 +241,13 @@ We can see above that the backup session has succeeded. Now, we are going to ver Once a `BackupSession` crd is created, it creates volume snapshotter `Job`. Then the `Job` creates `VolumeSnapshot` crd for the targeted PVC. The `VolumeSnapshot` name follows the following pattern: -```console +```bash - ``` Check that the `VolumeSnapshot` has been created Successfully. -```console +```bash $ kubectl get volumesnapshot -n demo NAME AGE source-data-1563186667 1m30s @@ -255,7 +255,7 @@ source-data-1563186667 1m30s Let's find out the actual snapshot name that will be saved in the Google Cloud by the following command, -```console +```bash kubectl get volumesnapshot source-data-1563186667 -n demo -o yaml ``` @@ -302,14 +302,14 @@ At first, let's stop taking any further backup of the old PVC so that no backup Let's pause the `pvc-volume-snapshot` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo pvc-volume-snapshot --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/pvc-volume-snapshot patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE pvc-volume-snapshot */1 * * * * true 22m @@ -358,7 +358,7 @@ Here, Let's create the `RestoreSession` crd we have shown above. -```console +```bash $ kubectl create -f ./docs/examples/guides/latest/volumesnapshot/standalone-pvc/restoresession.yaml restoresession.stash.appscode.com/restore-pvc created ``` @@ -367,7 +367,7 @@ Once, you have created the `RestoreSession` crd, Stash will create a job to rest Run the following command to watch RestoreSession phase, -```console +```bash $ watch -n 1 kubectl get restore -n demo Every 1.0s: kubectl get restore -n demo suaas-appscode: Tue Jun 18 19:32:35 2019 @@ -382,7 +382,7 @@ Once the restore process is complete, we are going to see that new PVC with the To verify that the PVC has been created, run by the following command, -```console +```bash $ kubectl get pvc -n demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE restore-data Bound pvc-c5f0e7f5-a6ec-11e9-9f3a-42010a800050 1Gi RWO standard 52s @@ -425,14 +425,14 @@ spec: Let's create the Pod we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/standalone-pvc/restored-pod.yaml pod/restored-pod created ``` Now, wait for the Pod to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE restored-pod 1/1 Running 0 34s @@ -440,7 +440,7 @@ restored-pod 1/1 Running 0 34s Verify that the backed up data has been restored in `/restore/data` directory for `restored-pod` pod using the following command, -```console +```bash $ kubectl exec -n demo restored-pod -- cat /restore/data/data.txt sample_data ``` @@ -449,7 +449,7 @@ sample_data To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo pod source-pod kubectl delete -n demo pod restored-pod kubectl delete -n demo backupconfiguration pvc-volume-snapshot diff --git a/docs/guides/latest/volumesnapshot/statefulset.md b/docs/guides/latest/volumesnapshot/statefulset.md index 85f7646f..d2b87508 100644 --- a/docs/guides/latest/volumesnapshot/statefulset.md +++ b/docs/guides/latest/volumesnapshot/statefulset.md @@ -44,7 +44,7 @@ volumeBindingMode: Immediate Let's create the `StorageClass` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/storageclass.yaml storageclass.storage.k8s.io/standard created ``` @@ -68,14 +68,14 @@ Here, Let's create the `volumeSnapshotClass` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/default-volumesnapshotclass.yaml volumesnapshotclass.snapshot.storage.k8s.io/default-snapshot-class created ``` To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -155,7 +155,7 @@ spec: Let's create the Statefulset we have shown above. -```console +```bash $ kubectl create -f ./docs/examples/guides/latest/volumesnapshot/statefulset/statefulset.yaml service/svc created statefulset.apps/stash-demo created @@ -163,7 +163,7 @@ statefulset.apps/stash-demo created Now, wait for the pod of Statefulset to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-0 1/1 Running 0 97s @@ -173,7 +173,7 @@ stash-demo-2 1/1 Running 0 39s Let's find out the PVCs created for these replicas, -```console +```bash kubectl get pvc -n demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE source-data-stash-demo-0 Bound pvc-760c1734-a6cc-11e9-9f3a-42010a800050 1Gi RWO standard 70s @@ -183,7 +183,7 @@ source-data-stash-demo-2 Bound pvc-9c9f542f-a6cc-11e9-9f3a-42010a800050 1 Verify that the sample data has been created in `/source/data` directory using the following command, -```console +```bash $ kubectl exec -n demo stash-demo-0 -- cat /source/data/data.txt stash-demo-0 $ kubectl exec -n demo stash-demo-1 -- cat /source/data/data.txt @@ -232,7 +232,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/statefulset/backupconfiguration.yaml backupconfiguration.stash.appscode.com/statefulset-volume-snapshot created ``` @@ -243,7 +243,7 @@ If everything goes well, Stash will create a `CronJob` to take periodic snapshot Check that the `CronJob` has been created using the following command, -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE statefulset-volume-snapshot */1 * * * * False 0 18s @@ -255,7 +255,7 @@ The `statefulset-volume-snapshot` CronJob will trigger a backup on each schedule Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 1 kubectl get backupsession -n demo Every 1.0s: kubectl get backupsession -n demo suaas-appscode: Tue Jun 18 18:35:41 2019 @@ -269,13 +269,13 @@ We can see above that the backup session has succeeded. Now, we are going to ver Once a `BackupSession` crd is created, it creates volume snapshotter `Job`. Then the `Job` creates `VolumeSnapshot` crd for the targeted PVC.The `VolumeSnapshot` name follows the following pattern: -```console +```bash --- ``` Check that the `VolumeSnapshot` has been created Successfully. -```console +```bash $ kubectl get volumesnapshot -n demo NAME AGE source-data-stash-demo-0-1563177551 115s @@ -285,7 +285,7 @@ source-data-stash-demo-2-1563177551 115s Let's find out the actual snapshot name that will be saved in the Google Cloud by the following command, -```console +```bash kubectl get volumesnapshot source-data-stash-demo-0-1563177551 -n demo -o yaml ``` @@ -332,14 +332,14 @@ At first, let's stop taking any further backup of the old StatefulSet so that no Let's pause the `statefulset-volume-snapshot` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo statefulset-volume-snapshot --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/statefulset-volume-snapshot patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE statefulset-volume-snapshot */1 * * * * true 20m @@ -384,7 +384,7 @@ Here, - `spec.target.volumeClaimTemplates`: - `metadata.name` is a template for the name of the restored PVC that will be created by Stash. You have to provide this named template to match with the desired PVC of a StatefulSet. For example, if you want to deploy a StatefulSet named `stash-demo` with `volumeClaimTemplate` name `my-volume`, the PVCs of your StatefulSet will be `my-volume-stash-demo-0`, `my-volume-stash-demo-1` and so on. In this case, you have to provide `volumeClaimTemplate` name in `RestoreSession` in the following format: - ```console + ```bash --${POD_ORDINAL} ``` @@ -394,7 +394,7 @@ Here, - `kind` is resource of the kind being referenced. Now, Kubernetes supports only `VolumeSnapshot`. - `name` is the `VolumeSnapshot` resource name. In `RestoreSession` crd, You must provide the name in the following format: - ```console + ```bash -${POD_ORDINAL}- ``` @@ -402,7 +402,7 @@ Here, Let's create the `RestoreSession` crd we have shown above. -```console +```bash $ kubectl create -f ./docs/examples/guides/latest/volumesnapshot/statefulset/restoresession.yaml restoresession.stash.appscode.com/restore-pvc created ``` @@ -411,7 +411,7 @@ Once, you have created the `RestoreSession` crd, Stash will create a job to rest Run the following command to watch RestoreSession phase, -```console +```bash $ watch -n 1 kubectl get restore -n demo Every 1.0s: kubectl get restore -n demo suaas-appscode: Tue Jun 18 18:35:41 2019 @@ -428,7 +428,7 @@ Once the restore process is complete, we are going to see that new PVCs with the Verify that the PVCs has been created by the following command, -```console +```bash $ kubectl get pvc -n demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE restore-data-restore-demo-0 Bound pvc-ed35c54d-a6dc-11e9-9f3a-42010a800050 1Gi RWO standard 13s @@ -507,7 +507,7 @@ spec: Let's create the Statefulset we have shown above. -```console +```bash $ kubectl create -f ./docs/examples/guides/latest/volumesnapshot/statefulset/restored-statefulset.yaml service/svc created statefulset.apps/restore-demo created @@ -515,7 +515,7 @@ statefulset.apps/restore-demo created Now, wait for the pod of the Statefulset to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE restore-demo-0 1/1 Running 0 65s @@ -525,7 +525,7 @@ restore-demo-2 1/1 Running 0 26s Verify that the backed up data has been restored in `/restore/data` directory using the following command, -```console +```bash $ kubectl exec -n demo restore-demo-0 -- cat /restore/data/data.txt stash-demo-0 $ kubectl exec -n demo restore-demo-1 -- cat /restore/data/data.txt @@ -576,7 +576,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/volumesnapshot/statefulset/backupconfiguration.yaml backupconfiguration.stash.appscode.com/statefulset-volume-snapshot created ``` @@ -587,7 +587,7 @@ If everything goes well, Stash will create a `CronJob` to take periodic snapshot Check that the `CronJob` has been created using the following command, -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE statefulset-volume-snapshot */1 * * * * False 0 18s @@ -599,7 +599,7 @@ The `statefulset-volume-snapshot` CronJob will trigger a backup on each schedule Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 1 kubectl get backupsession -n demo Every 1.0s: kubectl get backupsession -n demo suaas-appscode: Tue Jun 18 18:35:41 2019 @@ -613,13 +613,13 @@ We can see above that the backup session has succeeded. Now, we are going to ver Once a `BackupSession` crd is created, Stash creates a volume snapshotter `Job`. Then the `Job` creates `VolumeSnapshot` crd for the targeted PVC. The `VolumeSnapshot` name follows the following pattern: -```console +```bash - ``` Check that the `VolumeSnapshot` has been created Successfully. -```console +```bash $ kubectl get volumesnapshot -n demo NAME AGE source-data-stash-demo-0-1563181264 67s @@ -627,7 +627,7 @@ source-data-stash-demo-0-1563181264 67s Let's find out the actual snapshot name that will be saved in the GCP by the following command, -```console +```bash kubectl get volumesnapshot source-data-stash-demo-0-1563181264 -n demo -o yaml ``` @@ -674,14 +674,14 @@ At first, let's stop taking any further backup of the old StatefulSet so that no Let's pause the `statefulset-volume-snapshot` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo statefulset-volume-snapshot --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/statefulset-volume-snapshot patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE statefulset-volume-snapshot */1 * * * * true 20m @@ -727,7 +727,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above. -```console +```bash $ kubectl create -f ./docs/examples/guides/latest/volumesnapshot/statefulset/restoresession.yaml restoresession.stash.appscode.com/restore-pvc created ``` @@ -736,7 +736,7 @@ Once, you have created the `RestoreSession` crd, Stash will create a job to rest Run the following command to watch RestoreSession phase, -```console +```bash $ watch -n 1 kubectl get restore -n demo Every 1.0s: kubectl get restore -n demo suaas-appscode: Tue Jun 18 18:35:41 2019 @@ -753,7 +753,7 @@ Once the restore process is complete, we are going to see that new PVCs with the check that the status of the PVCs are bound, -```console +```bash $ kubectl get pvc -n demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE restore-data-restore-demo-0 Bound pvc-745e0f51-a6e0-11e9-9f3a-42010a800050 1Gi RWO standard 5m23s @@ -826,7 +826,7 @@ spec: Let's create the Statefulset we have shown above. -```console +```bash $ kubectl create -f ./docs/examples/guides/latest/volumesnapshot/statefulset/restored-statefulset.yaml service/svc created statefulset.apps/restore-demo created @@ -834,7 +834,7 @@ statefulset.apps/restore-demo created Now, wait for the pod of Statefulset to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE restore-demo-0 1/1 Running 0 3m9s @@ -844,7 +844,7 @@ restore-demo-2 1/1 Running 0 2m30s Verify that the backed up data has been restored in `/restore/data` directory using the following command, -```console +```bash $ kubectl exec -n demo restore-demo-0 -- cat /restore/data/data.txt stash-demo-0 $ kubectl exec -n demo restore-demo-1 -- cat /restore/data/data.txt @@ -857,7 +857,7 @@ stash-demo-0 To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo statefulset stash-demo kubectl delete -n demo statefulset restore-demo kubectl delete -n demo backupconfiguration statefulset-volume-snapshot diff --git a/docs/guides/latest/workloads/daemonset.md b/docs/guides/latest/workloads/daemonset.md index 6ad477e8..48413720 100644 --- a/docs/guides/latest/workloads/daemonset.md +++ b/docs/guides/latest/workloads/daemonset.md @@ -30,7 +30,7 @@ This guide will show you how to use Stash to backup and restore volumes of a Dae To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -83,14 +83,14 @@ spec: Let's create the DaemonSet we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/daemonset/daemon.yaml daemonset.apps/stash-demo created ``` Now, wait for the pod of the DaemonSet to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-c4nqw 1/1 Running 0 39s @@ -98,7 +98,7 @@ stash-demo-c4nqw 1/1 Running 0 39s Verify that the sample data has been created in `/source/data` directory using the following command, -```console +```bash $ kubectl exec -n demo stash-demo-c4nqw -- cat /source/data/data.txt sample_data ``` @@ -113,7 +113,7 @@ We are going to store our backed up data into a GCS bucket. We have to create a Let's create a secret called `gcs-secret` with access credentials to our desired GCS bucket, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded-sa-json.key > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -144,7 +144,7 @@ spec: Let's create the Repository we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/daemonset/repository.yaml repository.stash.appscode.com/gcs-repo created ``` @@ -195,7 +195,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/daemonset/backupconfiguration.yaml backupconfiguration.stash.appscode.com/dmn-backup created ``` @@ -204,7 +204,7 @@ backupconfiguration.stash.appscode.com/dmn-backup created If everything goes well, Stash will inject a sidecar container into the `stash-demo` DaemonSet to take backup of `/source/data` directory. Let’s check that the sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-6lnbp 2/2 Running 0 10s @@ -310,7 +310,7 @@ It will also create a `CronJob` with the schedule specified in `spec.schedule` f Verify that the `CronJob` has been created using the following command, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE dmn-backup */1 * * * * 3m @@ -322,7 +322,7 @@ The `dmn-backup` CronJob will trigger a backup on each schedule by creating a `B Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 3 kubectl get backupsession -n demo Every 3.0s: kubectl get backupsession -n demo suaas-appscode: Wed Jun 26 16:05:26 2019 @@ -336,7 +336,7 @@ We can see from the above output that the backup session has succeeded. Now, we Once a backup is complete, Stash will update the respective `Repository` crd to reflect the backup. Check that the repository `gcs-repo` has been updated by the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE gcs-repo true 0 B 3 47s 4m @@ -361,14 +361,14 @@ At first, let's stop taking any further backup of the old DaemonSet so that no b Let's pause the `dmn-backup` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo dmn-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/dmn-backup patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE dmn-backup */1 * * * * true 26m @@ -419,7 +419,7 @@ spec: Let's create the DaemonSet we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/workloads/daemonset/recovered_daemon.yaml daemonset.apps/stash-recovered configured ``` @@ -461,7 +461,7 @@ Here, Let's create the `RestoreSession` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/workloads/daemonset/restoresession.yaml restoresession.stash.appscode.com/dmn-restore created ``` @@ -547,7 +547,7 @@ Notice the `Init-Containers` section. We can see that the init-container `stash- Run the following command to watch RestoreSession phase, -```console +```bash $ watch -n 3 kubectl get restoresession -n demo Every 3.0s: kubectl get restoresession -n demo suaas-appscode: Wed Jun 26 14:28:29 2019 @@ -565,7 +565,7 @@ In this section, we are going to verify that the desired data has been restored At first, check if the `stash-recovered` pods of a DaemonSet has gone into `running` state by the following command, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-recovered-dqlrb 1/1 Running 0 4m4s @@ -573,7 +573,7 @@ stash-recovered-dqlrb 1/1 Running 0 4m4s Verify that the backed up data has been restored in `/source/data` directory of the `stash-recovered` pods of a DaemonSet using the following command, -```console +```bash $ kubectl exec -n demo stash-recovered-dqlrb -- cat /source/data/data.txt sample_data ``` @@ -582,7 +582,7 @@ sample_data To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo daemonset stash-demo kubectl delete -n demo daemonset stash-recovered kubectl delete -n demo backupconfiguration dmn-backup diff --git a/docs/guides/latest/workloads/deployment.md b/docs/guides/latest/workloads/deployment.md index 2a2ee910..cd00d607 100644 --- a/docs/guides/latest/workloads/deployment.md +++ b/docs/guides/latest/workloads/deployment.md @@ -30,7 +30,7 @@ This guide will show you how to use Stash to backup and restore volumes of a Dep To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -65,7 +65,7 @@ spec: Let's create the PVC we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/deployment/pvc.yaml persistentvolumeclaim/stash-sample-data created ``` @@ -113,14 +113,14 @@ spec: Let's create the Deployment we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/deployment/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for the pods of the Deployment to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-8cfcbcc89-2z6mq 1/1 Running 0 30s @@ -130,7 +130,7 @@ stash-demo-8cfcbcc89-q8xfd 1/1 Running 0 30s Verify that the sample data has been created in `/source/data` directory using the following command, -```console +```bash $ kubectl exec -n demo stash-demo-8cfcbcc89-2z6mq -- cat /source/data/data.txt sample_data ``` @@ -145,7 +145,7 @@ We are going to store our backed up data into a GCS bucket. We have to create a Let's create a secret called `gcs-secret` with access credentials to our desired GCS bucket, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded-sa-json.key > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -176,7 +176,7 @@ spec: Let's create the Repository we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/deployment/repository.yaml repository.stash.appscode.com/gcs-repo created ``` @@ -227,7 +227,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/deployment/backupconfiguration.yaml backupconfiguration.stash.appscode.com/deployment-backup created ``` @@ -236,7 +236,7 @@ backupconfiguration.stash.appscode.com/deployment-backup created If everything goes well, Stash will inject a sidecar container into the `stash-demo` Deployment to take backup of `/source/data` directory. Let’s check that the sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-856896bd95-4gfbh 2/2 Running 0 12s @@ -343,7 +343,7 @@ It will also create a `CronJob` with the schedule specified in `spec.schedule` f Verify that the `CronJob` has been created using the following command, -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE deployment-backup */1 * * * * False 0 35s 64s @@ -355,7 +355,7 @@ The `deployment-backup` CronJob will trigger a backup on each scheduled slot by Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 2 kubectl get backupsession -n demo Every 1.0s: kubectl get backupsession -n demo suaas-appscode: Mon Jun 24 10:23:08 2019 @@ -369,7 +369,7 @@ We can see from the above output that the backup session has succeeded. Now, we Once a backup is complete, Stash will update the respective `Repository` crd to reflect the backup. Check that the repository `gcs-repo` has been updated by the following command, -```console +```bash $ kubectl get repository -n demo gcs-repo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE gcs-repo true 0 B 5 58s 18m @@ -394,14 +394,14 @@ At first, let's stop taking any further backup of the old Deployment so that no Let's pause the `deployment-backup` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo deployment-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/deployment-backup patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE deployment-backup */1 * * * * true 26m @@ -466,7 +466,7 @@ spec: Let's create the Deployment and PVC we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/deployment/recovered_deployment.yaml persistentvolumeclaim/demo-pvc created deployment.apps/stash-recovered created @@ -509,7 +509,7 @@ Here, Let's create the `RestoreSession` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/deployment/restoresession.yaml restoresession.stash.appscode.com/deployment-restore created ``` @@ -602,7 +602,7 @@ Notice the `Init-Containers` section. We can see that the init-container `stash- Run the following command to watch RestoreSession phase, -```console +```bash $ watch -n 2 kubectl get restoresession -n demo Every 5.0s: kubectl get restoresession -n demo suaas-appscode: Mon Jun 24 10:33:57 2019 @@ -620,7 +620,7 @@ In this section, we are going to verify that the desired data has been restored At first, check if the `stash-recovered` pods of the Deployment has gone into `Running` state by the following command, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-recovered-867688ddd5-67xr8 1/1 Running 0 21m @@ -630,7 +630,7 @@ stash-recovered-867688ddd5-zswhs 1/1 Running 0 22m Verify that the sample data has been restored in `/source/data` directory of the `stash-recovered` pods of the Deployment using the following command, -```console +```bash $ kubectl exec -n demo stash-recovered-867688ddd5-67xr8 -- cat /source/data/data.txt sample_data ``` @@ -639,7 +639,7 @@ sample_data To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo deployment stash-demo kubectl delete -n demo deployment stash-recovered kubectl delete -n demo backupconfiguration deployment-backup diff --git a/docs/guides/latest/workloads/statefulset.md b/docs/guides/latest/workloads/statefulset.md index 80d18c84..9c61d455 100644 --- a/docs/guides/latest/workloads/statefulset.md +++ b/docs/guides/latest/workloads/statefulset.md @@ -30,7 +30,7 @@ This guide will show you how to use Stash to backup and restore volumes of a Sta To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -106,7 +106,7 @@ spec: Let's create the StatefulSet we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/statefulset/statefulset.yaml service/headless created statefulset.apps/stash-demo created @@ -114,7 +114,7 @@ statefulset.apps/stash-demo created Now, wait for the pods of the StatefulSet to go into the `Running` state. -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-0 1/1 Running 0 42s @@ -124,7 +124,7 @@ stash-demo-2 1/1 Running 0 36s Verify that the sample data has been generated in `/source/data` directory for `stash-demo-0` , `stash-demo-1` and `stash-demo-2` pod respectively using the following commands, -```console +```bash $ kubectl exec -n demo stash-demo-0 -- cat /source/data/data.txt stash-demo-0 $ kubectl exec -n demo stash-demo-1 -- cat /source/data/data.txt @@ -143,7 +143,7 @@ We are going to store our backed up data into a GCS bucket. We have to create a Let's create a secret called `gcs-secret` with access credentials to our desired GCS bucket, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded-sa-json.key > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -176,7 +176,7 @@ spec: Let's create the Repository we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/statefulset/repository.yaml repository.stash.appscode.com/gcs-repo created ``` @@ -227,7 +227,7 @@ Here, Let's create the `BackupConfiguration` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/statefulset/backupconfiguration.yaml backupconfiguration.stash.appscode.com/ss-backup created ``` @@ -236,7 +236,7 @@ backupconfiguration.stash.appscode.com/ss-backup created If everything goes well, Stash will inject a sidecar container into the `stash-demo` StatefulSet to take backup of `/source/data` directory. Let’s check that the sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-demo-0 2/2 Running 0 5s @@ -355,7 +355,7 @@ It will also create a `CronJob` with the schedule specified in `spec.schedule` f Verify that the `CronJob` has been created using the following command, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE ss-backup */1 * * * * 3m41s @@ -367,7 +367,7 @@ The `ss-backup` CronJob will trigger a backup on each scheduled slot by creating Wait for the next schedule for backup. Run the following command to watch `BackupSession` crd, -```console +```bash $ watch -n 2 kubectl get backupsession -n demo Every 5.0s: kubectl get bs -n demo suaas-appscode: Tue Jun 25 17:54:41 2019 @@ -381,7 +381,7 @@ We can see from the above output that the backup session has succeeded. Now, we Once a backup is complete, Stash will update the respective `Repository` crd to reflect the backup. Check that the repository `gcs-repo` has been updated by the following command, -```console +```bash $ kubectl get repository -n demo NAME INTEGRITY SIZE SNAPSHOT-COUNT LAST-SUCCESSFUL-BACKUP AGE gcs-repo true 0 B 3 103s 5m @@ -406,14 +406,14 @@ At first, let's stop taking any further backup of the old StatefulSet so that no Let's pause the `ss-backup` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo ss-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/ss-backup patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE ss-backup */1 * * * * true 26m @@ -483,7 +483,7 @@ spec: Let's create the StatefulSet we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/statefulset/recovered_statefulset.yaml service/re-headless created statefulset.apps/stash-recovered created @@ -526,7 +526,7 @@ Here, Let's create the `RestoreSession` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/workloads/statefulset/restoresession.yaml restoresession.stash.appscode.com/ss-restore created ``` @@ -616,7 +616,7 @@ Notice the `Init-Containers` section. We can see that the init-container `stash- Run the following command to watch RestoreSession phase, -```console +```bash $ watch -n 3 kubectl get restoresession -n demo Every 5.0s: kubectl get restoresession -n demo suaas-appscode: Tue Jun 25 18:27:30 2019 @@ -634,7 +634,7 @@ In this section, we are going to verify that the desired data has been restored At first, check if the `stash-recovered` pods of a StatefulSet has gone into `Running` state by the following commands, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-recovered-0 1/1 Running 0 10m @@ -644,7 +644,7 @@ stash-recovered-2 1/1 Running 0 12m Verify that the backed up data has been restored in `/source/data` directory of the `stash-recovered` pods of a StatefulSet using the following commands, -```console +```bash $ kubectl exec -n demo stash-recovered-0 -- cat /source/data/data.txt stash-demo-0 $ kubectl exec -n demo stash-recovered-1 -- cat /source/data/data.txt @@ -663,14 +663,14 @@ At first, let's stop taking any further backup of the old StatefulSet so that no Let's pause the `deployment-backup` BackupConfiguration, -```console +```bash $ kubectl patch backupconfiguration -n demo ss-backup --type="merge" --patch='{"spec": {"paused": true}}' backupconfiguration.stash.appscode.com/ss-backup patched ``` Now, wait for a moment. Stash will pause the BackupConfiguration. Verify that the BackupConfiguration has been paused, -```console +```bash $ kubectl get backupconfiguration -n demo NAME TASK SCHEDULE PAUSED AGE ss-backup */1 * * * * true 26m @@ -740,7 +740,7 @@ spec: Let's create the StatefulSet we have shown above. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/statefulset/adv_statefulset.yaml service/adv-headless created statefulset.apps/stash-recovered-adv created @@ -789,7 +789,7 @@ Here, Let's create the `RestoreSession` crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/guides/latest/workloads/statefulset/adv_restoresession.yaml restoresession.stash.appscode.com/ss-restore created ``` @@ -879,7 +879,7 @@ Notice the `Init-Containers` section. We can see that the init-container `stash- Run the following command to watch RestoreSession phase, -```console +```bash $ watch -n 3 kubectl get restoresession -n demo Every 5.0s: kubectl get restoresession -n demo suaas-appscode: Tue Jun 25 18:27:30 2019 @@ -895,7 +895,7 @@ In this section, we are going to verify that the desired data has been restored At first, check if the `stash-recovered` pods of the StatefulSet has gone into `Running` state by the following commands, -```console +```bash $ kubectl get pod -n demo NAME READY STATUS RESTARTS AGE stash-recovered-adv-0 1/1 Running 0 3m30s @@ -907,7 +907,7 @@ stash-recovered-adv-4 1/1 Running 0 8m1s Verify that the sample data has been restored in `/source/data` directory of the `stash-recovered` pods of the StatefulSet using the following commands, -```console +```bash $ kubectl exec -n demo stash-recovered-adv-0 -- cat /source/data/data.txt stash-demo-0 $ kubectl exec -n demo stash-recovered-adv-1 -- cat /source/data/data.txt @@ -926,7 +926,7 @@ We can see from the above output that backup data of `host-1` has been restored To clean up the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n demo statefulset stash-demo kubectl delete -n demo statefulset stash-recovered kubectl delete -n demo backupconfiguration ss-backup diff --git a/docs/guides/v1alpha1/backends/azure.md b/docs/guides/v1alpha1/backends/azure.md index fdc4ffd4..36704e05 100644 --- a/docs/guides/v1alpha1/backends/azure.md +++ b/docs/guides/v1alpha1/backends/azure.md @@ -28,7 +28,7 @@ To configure storage secret for this backend, following secret keys are needed: Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > AZURE_ACCOUNT_NAME $ echo -n '' > AZURE_ACCOUNT_KEY @@ -103,7 +103,7 @@ spec: Now, create the Restic we have configured above for `azure` backend, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/backends/azure/azure-restic.yaml restic "azure-restic" created ``` diff --git a/docs/guides/v1alpha1/backends/b2.md b/docs/guides/v1alpha1/backends/b2.md index c72de50c..71d39ee9 100644 --- a/docs/guides/v1alpha1/backends/b2.md +++ b/docs/guides/v1alpha1/backends/b2.md @@ -28,7 +28,7 @@ To configure storage secret for this backend, following secret keys are needed: Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > B2_ACCOUNT_ID $ echo -n '' > B2_ACCOUNT_KEY @@ -103,7 +103,7 @@ spec: Now, create the Restic we have configured above for `b2` backend, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/backends/b2/b2-restic.yaml restic "b2-restic" created ``` diff --git a/docs/guides/v1alpha1/backends/gcs.md b/docs/guides/v1alpha1/backends/gcs.md index fc6d3103..ad0acb35 100644 --- a/docs/guides/v1alpha1/backends/gcs.md +++ b/docs/guides/v1alpha1/backends/gcs.md @@ -28,7 +28,7 @@ To configure storage secret for this backend, following secret keys are needed: Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ mv downloaded-sa-json.key GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -103,7 +103,7 @@ spec: Now, create the Restic we have configured above for `gcs` backend, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/backends/gcs/gcs-restic.yaml restic "gcs-restic" created ``` diff --git a/docs/guides/v1alpha1/backends/local.md b/docs/guides/v1alpha1/backends/local.md index 6467b288..dd833982 100644 --- a/docs/guides/v1alpha1/backends/local.md +++ b/docs/guides/v1alpha1/backends/local.md @@ -26,7 +26,7 @@ To configure storage secret for this backend, following secret keys are needed: Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ kubectl create secret generic local-secret --from-file=./RESTIC_PASSWORD secret "local-secret" created @@ -97,7 +97,7 @@ spec: Now, create the `Restic` we have configured above for `local` backend, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/backends/local/local-restic-hostPath.yaml restic "local-restic" created ``` @@ -138,7 +138,7 @@ spec: Now, create the `Restic` we have configured above for `local` backend, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/backends/local/local-restic-nfs.yaml restic "local-restic" created ``` diff --git a/docs/guides/v1alpha1/backends/s3.md b/docs/guides/v1alpha1/backends/s3.md index 7f152152..356060ef 100644 --- a/docs/guides/v1alpha1/backends/s3.md +++ b/docs/guides/v1alpha1/backends/s3.md @@ -29,7 +29,7 @@ To configure storage secret for this backend, following secret keys are needed: Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > AWS_ACCESS_KEY_ID $ echo -n '' > AWS_SECRET_ACCESS_KEY @@ -106,7 +106,7 @@ spec: Now, create the Restic we have configured above for `s3` backend, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/backends/s3/s3-restic.yaml restic "s3-restic" created ``` diff --git a/docs/guides/v1alpha1/backends/swift.md b/docs/guides/v1alpha1/backends/swift.md index 6716dd24..fb2cd863 100644 --- a/docs/guides/v1alpha1/backends/swift.md +++ b/docs/guides/v1alpha1/backends/swift.md @@ -44,7 +44,7 @@ To configure storage secret this backend, following secret keys are needed: Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > OS_AUTH_URL $ echo -n '' > OS_TENANT_ID @@ -131,7 +131,7 @@ spec: Now, create the Restic we have configured above for `swift` backend, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/backends/swift/swift-restic.yaml restic "swift-restic" created ``` diff --git a/docs/guides/v1alpha1/backup.md b/docs/guides/v1alpha1/backup.md index b1cd1613..6cf919ba 100644 --- a/docs/guides/v1alpha1/backup.md +++ b/docs/guides/v1alpha1/backup.md @@ -34,7 +34,7 @@ At first, you need to have a Kubernetes cluster, and the `kubectl` command-line To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -63,7 +63,7 @@ In order to take back up, we need some sample data. Stash has some sample data i Let's create a ConfigMap from these sample data, -```console +```bash $ kubectl create configmap -n demo stash-sample-data \ --from-literal=LICENSE="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/LICENSE)" \ --from-literal=README.md="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/README.md)" @@ -114,14 +114,14 @@ spec: Let's create the deployment we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/backup/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for deployment's pod to go into `Running` state. -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-7ccd56bf5d-4x27d 1/1 Running 0 21s @@ -129,7 +129,7 @@ stash-demo-7ccd56bf5d-4x27d 1/1 Running 0 21s You can check that the `/source/data/` directory of this pod is populated with data from the `stash-sample-data` ConfigMap using this command, -```console +```bash $ kubectl exec -n demo stash-demo-7ccd56bf5d-4x27d -- ls -R /source/data /source/data: LICENSE @@ -148,7 +148,7 @@ At first, we need to create a storage secret. To configure this backend, the fol Create the secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ kubectl create secret generic -n demo local-secret \ --from-file=./RESTIC_PASSWORD @@ -157,7 +157,7 @@ secret/local-secret created Verify that the secret has been created successfully, -```console +```bash $ kubectl get secret -n demo local-secret -o yaml ``` @@ -225,14 +225,14 @@ Here, Let's create the `Restic` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/backup/restic.yaml restic.stash.appscode.com/local-restic created ``` If everything goes well, Stash will inject a sidecar container into the `stash-demo` deployment to take periodic backup. Let's check that sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-7ffdb5d7fd-5x8l6 2/2 Running 0 37s @@ -240,7 +240,7 @@ stash-demo-7ffdb5d7fd-5x8l6 2/2 Running 0 37s Look at the pod. It now has 2 containers. If you view the resource definition of this pod, you will see that there is a container named `stash` which running `backup` command. -```console +```bash $ kubectl get pod -n demo stash-demo-7ffdb5d7fd-5x8l6 -o yaml ``` @@ -427,7 +427,7 @@ status: Stash will create a `Repository` crd with name `deployment.stash-demo` for the respective repository in local backend at first backup schedule. To verify, run the following command, -```console +```bash $ kubectl get repository deployment.stash-demo -n demo NAME BACKUPCOUNT LASTSUCCESSFULBACKUP AGE deployment.stash-demo 4 23s 4m @@ -437,7 +437,7 @@ Here, `BACKUPCOUNT` field indicates the number of backup snapshots has taken in `Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup snapshots have been created successfully by running the following command: -```console +```bash $ kubectl get snapshots -n demo -l repository=deployment.stash-demo NAME AGE deployment.stash-demo-9a6e6b78 3m18s @@ -516,7 +516,7 @@ restic.stash.appscode.com/local-restic patched To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash $ kubectl delete -n demo deployment stash-demo $ kubectl delete -n demo secret local-secret $ kubectl delete -n demo restic local-restic diff --git a/docs/guides/v1alpha1/monitoring/builtin.md b/docs/guides/v1alpha1/monitoring/builtin.md index b270c795..797f23b7 100644 --- a/docs/guides/v1alpha1/monitoring/builtin.md +++ b/docs/guides/v1alpha1/monitoring/builtin.md @@ -22,7 +22,7 @@ At first, you need to have a Kubernetes cluster, and the kubectl command-line to To keep Prometheus resources isolated, we are going to use a separate namespace to deploy Prometheus server. -```console +```bash $ kubectl create ns monitoring namespace/monitoring created ``` @@ -31,7 +31,7 @@ namespace/monitoring created Enable Prometheus monitoring using `prometheus.io/builtin` agent while installing Stash. To know details about how to enable monitoring see [here](/docs/guides/v1alpha1/monitoring/overview.md#how-to-enable-monitoring). Here, we are going to enable monitoring for both `backup & recovery` and `operator` metrics using Helm 3. -```console +```bash $ helm install stash-operator appscode/stash --version {{< param "info.version" >}} \ --namespace kube-system \ --set monitoring.agent=prometheus.io/builtin \ @@ -110,7 +110,7 @@ We have deployed Stash in `kube-system` namespace. Stash exports operator metric Let's check `stash-apiserver-cert` certificate has been created in `monitoring` namespace. -```console +```bash $ kubectl get secret -n monitoring -l=app=stash NAME TYPE DATA AGE stash-apiserver-cert kubernetes.io/tls 2 2m21s @@ -120,7 +120,7 @@ stash-apiserver-cert kubernetes.io/tls 2 2m21s If you are using a RBAC enabled cluster, you have to give necessary RBAC permissions for Prometheus. Let's create necessary RBAC stuffs for Prometheus, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/monitoring/builtin/prom-rbac.yaml clusterrole.rbac.authorization.k8s.io/stash-prometheus-server created serviceaccount/stash-prometheus-server created @@ -245,7 +245,7 @@ Also note that, we have provided a bearer-token file through `bearer_token_file` Let's create the ConfigMap we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/monitoring/builtin/prom-config.yaml configmap/stash-prometheus-server-conf created ``` @@ -306,7 +306,7 @@ Notice that, we have mounted `stash-apiserver-cert` secret as a volume at `/etc/ Now, let's create the deployment, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/monitoring/builtin/prom-deployment.yaml deployment.apps/stash-prometheus-server created ``` @@ -315,7 +315,7 @@ deployment.apps/stash-prometheus-server created Prometheus server is running on port `9090`. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. Run following command on a separate terminal, -```console +```bash $ kubectl port-forward -n monitoring stash-prometheus-server-9ddbf79b6-8l6hk 9090 Forwarding from 127.0.0.1:9090 -> 9090 Forwarding from [::1]:9090 -> 9090 @@ -331,7 +331,7 @@ Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:909 To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete clusterrole stash-prometheus-server kubectl delete clusterrolebinding stash-prometheus-server diff --git a/docs/guides/v1alpha1/monitoring/coreos.md b/docs/guides/v1alpha1/monitoring/coreos.md index 0fe07515..a003dede 100644 --- a/docs/guides/v1alpha1/monitoring/coreos.md +++ b/docs/guides/v1alpha1/monitoring/coreos.md @@ -22,7 +22,7 @@ CoreOS [prometheus-operator](https://github.com/coreos/prometheus-operator) prov - To keep Prometheus resources isolated, we are going to use a separate namespace to deploy Prometheus operator and respective resources. - ```console + ```bash $ kubectl create ns monitoring namespace/monitoring created ``` @@ -35,7 +35,7 @@ Enable Prometheus monitoring using `prometheus.io/coreos-operator` agent while i Here, we are going to enable monitoring for both `backup & recovery` and `operator` metrics using Helm 3. -```console +```bash $ helm install stash-operator appscode/stash --version {{< param "info.version" >}} \ --namespace kube-system \ --set monitoring.agent=prometheus.io/coreos-operator \ @@ -90,7 +90,7 @@ Stash exports operator metrics via TLS secured `api` endpoint. So, Prometheus se Let's check secret `stash-apiserver-cert` has been created in monitoring namespace. -```console +```bash $ kubectl get secret -n monitoring -l=app=stash NAME TYPE DATA AGE stash-apiserver-cert kubernetes.io/tls 2 31m @@ -135,7 +135,7 @@ Here, `spec.serviceMonitorSelector` is used to select the `ServiceMonitor` crd t Let's create the `Prometheus` object we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/monitoring/coreos/prometheus.yaml prometheus.monitoring.coreos.com/prometheus created ``` @@ -144,7 +144,7 @@ Prometheus operator watches for `Prometheus` crd. Once a `Prometheus` crd is cre Let's check StatefulSet has been created, -```console +```bash $ kubectl get statefulset -n monitoring NAME DESIRED CURRENT AGE prometheus-prometheus 1 1 4m @@ -152,7 +152,7 @@ prometheus-prometheus 1 1 4m Check if the pod of the StatefulSet is running, -```console +```bash $ kubectl get pod prometheus-prometheus-0 -n monitoring NAME READY STATUS RESTARTS AGE prometheus-prometheus-0 2/2 Running 0 6m @@ -164,7 +164,7 @@ Now, we are ready to access Prometheus dashboard. Prometheus server is running on port `9090`. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. Run following command on a separate terminal, -```console +```bash $ kubectl port-forward -n monitoring prometheus-prometheus-0 9090 Forwarding from 127.0.0.1:9090 -> 9090 Forwarding from [::1]:9090 -> 9090 @@ -180,7 +180,7 @@ Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:909 To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash # cleanup Prometheus resources kubectl delete -n monitoring prometheus prometheus kubectl delete -n monitoring secret stash-apiserver-cert diff --git a/docs/guides/v1alpha1/monitoring/grafana.md b/docs/guides/v1alpha1/monitoring/grafana.md index ce870170..67fe1435 100644 --- a/docs/guides/v1alpha1/monitoring/grafana.md +++ b/docs/guides/v1alpha1/monitoring/grafana.md @@ -28,7 +28,7 @@ Grafana provides an elegant graphical user interface to visualize data. You can We have to add our Prometheus server `prometheus-prometheus-0` as data source of grafana. We are going to use a `ClusterIP` service to connect Prometheus server with grafana. Let's create a service to select Prometheus server `prometheus-prometheus-0`, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/monitoring/coreos/prometheus-service.yaml service/prometheus created ``` @@ -112,7 +112,7 @@ Once you have imported the dashboard successfully, you will be greeted with Stas To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash kubectl delete -n monitoring service prometheus ``` diff --git a/docs/guides/v1alpha1/monitoring/overview.md b/docs/guides/v1alpha1/monitoring/overview.md index 5fea8556..f3847ab8 100644 --- a/docs/guides/v1alpha1/monitoring/overview.md +++ b/docs/guides/v1alpha1/monitoring/overview.md @@ -121,7 +121,7 @@ You have to provides these flags while installing or upgrading or updating Stash **Helm 3:** -```console +```bash $ helm install stash-operator appscode/stash --version {{< param "info.version" >}} \ --namespace kube-system \ --set monitoring.agent=prometheus.io/coreos-operator \ @@ -133,7 +133,7 @@ $ helm install stash-operator appscode/stash --version {{< param "info.version" **Helm 2:** -```console +```bash $ helm install appscode/stash --name stash-operator --version {{< param "info.version" >}} \ --namespace kube-system \ --set monitoring.agent=prometheus.io/coreos-operator \ @@ -145,7 +145,7 @@ $ helm install appscode/stash --name stash-operator --version {{< param "info.ve **YAML (with Helm 3):** -```console +```bash $ helm template stash-operator appscode/stash --version {{< param "info.version" >}} \ --namespace kube-system \ --no-hooks \ diff --git a/docs/guides/v1alpha1/offline_backup.md b/docs/guides/v1alpha1/offline_backup.md index edcfd0ad..db3ed144 100644 --- a/docs/guides/v1alpha1/offline_backup.md +++ b/docs/guides/v1alpha1/offline_backup.md @@ -33,7 +33,7 @@ At first, you need to have a Kubernetes cluster, and the `kubectl` command-line To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -69,7 +69,7 @@ In order to take backup, we need some sample data. Stash has some sample data in Let's create a ConfigMap from these sample data, -```console +```bash $ kubectl create configmap -n demo stash-sample-data \ --from-literal=LICENSE="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/LICENSE)" \ --from-literal=README.md="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/README.md)" @@ -122,14 +122,14 @@ spec: Let's create the deployment we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/backup/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for deployment's pod to go into `Running` state. -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-7ccd56bf5d-p9p2p 1/1 Running 0 2m29s @@ -137,7 +137,7 @@ stash-demo-7ccd56bf5d-p9p2p 1/1 Running 0 2m29s You can check that the `/source/data/` directory of this pod is populated with data from the `stash-sample-data` ConfigMap using this command, -```console +```bash $ kubectl exec -n demo stash-demo-7ccd56bf5d-p9p2p -- ls -R /source/data /source/data: LICENSE @@ -156,7 +156,7 @@ At first, we need to create a storage secret. To configure this backend, the fol Create the secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ kubectl create secret generic -n demo local-secret \ --from-file=./RESTIC_PASSWORD @@ -165,7 +165,7 @@ secret/local-secret created Verify that the secret has been created successfully. -```console +```bash $ kubectl get secret -n demo local-secret -o yaml ``` @@ -225,7 +225,7 @@ Here, we have set `spec.type: offline`. This tell Stash to take backup in offlin Let's create the `Restic` we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/backup/restic_offline.yaml restic.stash.appscode.com/offline-restic created ``` @@ -234,7 +234,7 @@ If everything goes well, Stash will inject an [init-container](https://kubernete Let's check that `init-container` has been injected successfully, -```console +```bash $ kubectl get deployment -n demo stash-demo -o yaml ``` @@ -390,7 +390,7 @@ Notice that `stash-demo` deployment has an `init-container` named `stash` which Stash operator also has created a [CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) with name format `stash-scaledown-cron-{restic-name}`. Verify that the `CronJob` has been created successfully, -```console +```bash $ kubectl get cronjob -n demo NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE stash-scaledown-cron-offline-restic @every 5m False 0 2m34s @@ -400,7 +400,7 @@ stash-scaledown-cron-offline-restic @every 5m False 0 Stash will create a `Repository` crd with name `deployment.stash-demo` for the respective repository during the first backup run. To verify, run the following command, -```console +```bash $ kubectl get repository deployment.stash-demo -n demo NAME BACKUP-COUNT LAST-SUCCESSFUL-BACKUP AGE deployment.stash-demo 1 2m 2m @@ -412,7 +412,7 @@ Here, `BACKUP-COUNT` field indicates number of backup snapshot has taken in this To cleanup the Kubernetes resources created by this tutorial, run: -```console +```bash $ kubectl delete -n demo deployment stash-demo $ kubectl delete -n demo secret local-secret $ kubectl delete -n demo restic offline-restic diff --git a/docs/guides/v1alpha1/platforms/aks.md b/docs/guides/v1alpha1/platforms/aks.md index 2eb68c9d..060d1a07 100644 --- a/docs/guides/v1alpha1/platforms/aks.md +++ b/docs/guides/v1alpha1/platforms/aks.md @@ -34,7 +34,7 @@ At first, you need to have a AKS cluster. If you don't already have a cluster, c To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -47,7 +47,7 @@ In order to take backup, we need some sample data. Stash has some sample data in Let's create a ConfigMap from these sample data, -```console +```bash $ kubectl create configmap -n demo stash-sample-data \ --from-literal=LICENSE="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/LICENSE)" \ --from-literal=README.md="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/README.md)" @@ -98,14 +98,14 @@ spec: Let's create the deployment we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/aks/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for deployment's pod to go in `Running` state. -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-7584fd7748-nl5n8 1/1 Running 0 3m @@ -113,7 +113,7 @@ stash-demo-7584fd7748-nl5n8 1/1 Running 0 3m You can check that the `/source/data/` directory of this pod is populated with data from the `stash-sample-data` ConfigMap using this command, -```console +```bash $ kubectl exec -n demo stash-demo-7584fd7748-nl5n8 -- ls -R /source/data /source/data: LICENSE @@ -134,7 +134,7 @@ At first, we need to create a storage secret that hold the credentials for the b Create the storage secret as below, -```console +```bash $ echo -n 'changeit' >RESTIC_PASSWORD $ echo -n '' > AZURE_ACCOUNT_NAME $ echo -n '' > AZURE_ACCOUNT_KEY @@ -147,7 +147,7 @@ secret/azure-secret created Verify that the secret has been created successfully, -```console +```bash $ kubectl get secret -n demo azure-secret -o yaml ``` @@ -173,7 +173,7 @@ type: Opaque Now, we are going to create `Restic` crd to take backup `/source/data` directory of `stash-demo` deployment. This will create a repository in the Azure blob container specified in `azure.container` field and start taking periodic backup of `/source/data` directory. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/aks/restic.yaml restic.stash.appscode.com/azure-restic created ``` @@ -210,7 +210,7 @@ spec: If everything goes well, Stash will inject a sidecar container into the `stash-demo` deployment to take periodic backup. Let's check sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-6b8c94cdd7-8jhtn 2/2 Running 1 1h @@ -222,7 +222,7 @@ Look at the pod. It now has 2 containers. If you view the resource definition of Stash will create a `Repository` crd with name `deployment.stash-demo` for the respective repository in Azure backend at first backup schedule. To verify, run the following command, -```console +```bash $ kubectl get repository deployment.stash-demo -n demo NAME BACKUPCOUNT LASTSUCCESSFULBACKUP AGE deployment.stash-demo 8 13s 8m @@ -232,7 +232,7 @@ Here, `BACKUPCOUNT` field indicates number of backup snapshot has taken in this `Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup snapshots are created successfully by, -```console +```bash $ kubectl get snapshots -n demo -l repository=deployment.stash-demo NAME AGE deployment.stash-demo-52ee5eaa 4m36s @@ -263,7 +263,7 @@ Now, consider that we have lost our workload as well as data volume. We want to At first, let's delete `Restic` crd, `stash-demo` deployment and `stash-sample-data` ConfigMap. -```console +```bash $ kubectl delete deployment -n demo stash-demo deployment.extensions "stash-demo" deleted @@ -282,7 +282,7 @@ In order to perform recovery, we need `Repository` crd `deployment.stah-demo` an Let's create a `PersistentVolumeClaim` where our recovered data will be stored. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/aks/pvc.yaml persistentvolumeclaim/stash-recovered created ``` @@ -308,7 +308,7 @@ spec: Check that if cluster has provisioned the requested claim, -```console +```bash $ kubectl get pvc -n demo -l app=stash-demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE stash-recovered Bound pvc-f6bddbf6-e66a-11e8-b68c-a62bf720de95 1Gi RWO default 1m @@ -320,7 +320,7 @@ Look at the `STATUS` filed. `stash-recovered` PVC is bounded to volume `pvc-f6bd Now, we have to create a `Recovery` crd to recover backed up data into this PVC. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/aks/recovery.yaml recovery.stash.appscode.com/azure-recovery created ``` @@ -347,7 +347,7 @@ spec: Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, -```console +```bash $ kubectl get recovery -n demo azure-recovery NAME REPOSITORYNAMESPACE REPOSITORYNAME SNAPSHOT PHASE AGE azure-recovery demo deployment.stash-demo Succeeded 3m @@ -357,7 +357,7 @@ Here, `PHASE` `Succeeded` indicate that our recovery has been completed successf If you are using Kubernetes version older than v1.11.0 then run following command and check `status.phase` field to see whether the recovery succeeded or failed. -```console +```bash $ kubectl get recovery -n demo azure-recovery -o yaml ``` @@ -405,7 +405,7 @@ spec: Let's create the deployment, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/aks/recovered-deployment.yaml deployment.apps/stash-demo created ``` @@ -416,7 +416,7 @@ We have re-deployed `stash-demo` deployment with recovered volume. Now, it is ti Get the pod of new deployment, -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-69994758c9-v7ntg 1/1 Running 0 3m @@ -424,7 +424,7 @@ stash-demo-69994758c9-v7ntg 1/1 Running 0 3m Run following command to view data of `/source/data` directory of this pod, -```console +```bash $ kubectl exec -n demo stash-demo-69994758c9-v7ntg -- ls -R /source/data /source/data: LICENSE @@ -440,7 +440,7 @@ So, we can see that the data we had backed up from original deployment are now p To cleanup the resources created by this tutorial, run following commands: -```console +```bash $ kubectl delete recovery -n demo azure-recovery $ kubectl delete secret -n demo azure-secret $ kubectl delete deployment -n demo stash-demo diff --git a/docs/guides/v1alpha1/platforms/eks.md b/docs/guides/v1alpha1/platforms/eks.md index 3190dae6..17ffd34e 100644 --- a/docs/guides/v1alpha1/platforms/eks.md +++ b/docs/guides/v1alpha1/platforms/eks.md @@ -34,7 +34,7 @@ At first, you need to have a EKS cluster. If you don't already have a cluster, c To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -47,7 +47,7 @@ In order to take backup, we need some sample data. Stash has some sample data in Let's create a ConfigMap from these sample data, -```console +```bash $ kubectl create configmap -n demo stash-sample-data \ --from-literal=LICENSE="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/LICENSE)" \ --from-literal=README.md="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/README.md)" @@ -98,14 +98,14 @@ spec: Let's create the deployment we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/eks/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for deployment's pod to go in `Running` state. -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-756bf59b5-7tk6q 1/1 Running 0 1m @@ -113,7 +113,7 @@ stash-demo-756bf59b5-7tk6q 1/1 Running 0 1m You can check that the `/source/data/` directory of this pod is populated with data from the `stash-sample-data` ConfigMap using this command, -```console +```bash $ kubectl exec -n demo stash-demo-756bf59b5-7tk6q -- ls -R /source/data /source/data: LICENSE @@ -134,7 +134,7 @@ At first, we need to create a storage secret that hold the credentials for the b Create a the storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > AWS_ACCESS_KEY_ID $ echo -n '' > AWS_SECRET_ACCESS_KEY @@ -147,7 +147,7 @@ secret/s3-secret created Verify that the secret has been created successfully, -```console +```bash $ kubectl get secret -n demo s3-secret -o yaml ``` @@ -172,7 +172,7 @@ type: Opaque Now, we are going to create `Restic` crd to take backup `/source/data` directory of `stash-demo` deployment. This will create a repository in the S3 bucket specified in `s3.bucket` field and start taking periodic backup of `/source/data` directory. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/eks/restic.yaml restic.stash.appscode.com/s3-restic created ``` @@ -210,7 +210,7 @@ spec: If everything goes well, Stash will inject a sidecar container into the `stash-demo` deployment to take periodic backup. Let's check sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-646c854778-t4d72 2/2 Running 0 1m @@ -222,7 +222,7 @@ Look at the pod. It now has 2 containers. If you view the resource definition of Stash will create a `Repository` crd with name `deployment.stash-demo` for the respective repository in S3 backend at first backup schedule. To verify, run the following command, -```console +```bash $ kubectl get repository deployment.stash-demo -n demo NAME CREATED AT deployment.stash-demo 5m @@ -265,7 +265,7 @@ status: `Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup snapshots are created successfully by, -```console +```bash $ kubectl get snapshots -n demo -l repository=deployment.stash-demo NAME AGE deployment.stash-demo-2e9cc755 4m53s @@ -296,7 +296,7 @@ Now, consider that we have lost our workload as well as data volume. We want to At first, let's delete `Restic` crd, `stash-demo` deployment and `stash-sample-data` ConfigMap. -```console +```bash $ kubectl delete deployment -n demo stash-demo deployment.extensions "stash-demo" deleted @@ -323,7 +323,7 @@ gp2 (default) kubernetes.io/aws-ebs 6h Now, let's create a `PersistentVolumeClaim` where our recovered data will be stored. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/eks/pvc.yaml persistentvolumeclaim/stash-recovered created ``` @@ -349,7 +349,7 @@ spec: Check that if cluster has provisioned the requested claim, -```console +```bash $ kubectl get pvc -n demo -l app=stash-demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE stash-recovered Bound pvc-d86e3909-e80e-11e8-a7b0-029842a88ece 1Gi RWO gp2 18s @@ -361,7 +361,7 @@ Look at the `STATUS` filed. `stash-recovered` PVC is bounded to volume `pvc-d86e Now, we have to create a `Recovery` crd to recover backed up data into this PVC. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/eks/recovery.yaml recovery.stash.appscode.com/s3-recovery created ``` @@ -465,7 +465,7 @@ spec: Let's create the deployment, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/eks/recovered-deployment.yaml deployment.apps/stash-demo created ``` @@ -476,7 +476,7 @@ We have re-deployed `stash-demo` deployment with recovered volume. Now, it is ti Get the pod of new deployment, -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-6796866bb8-zkhv5 1/1 Running 0 55s @@ -484,7 +484,7 @@ stash-demo-6796866bb8-zkhv5 1/1 Running 0 55s Run following command to view data of `/source/data` directory of this pod, -```console +```bash $ kubectl exec -n demo stash-demo-6796866bb8-zkhv5 -- ls -R /source/data /source/data: LICENSE @@ -500,7 +500,7 @@ So, we can see that the data we had backed up from original deployment are now p To cleanup the resources created by this tutorial, run following commands: -```console +```bash $ kubectl delete recovery -n demo s3-recovery $ kubectl delete secret -n demo s3-secret $ kubectl delete deployment -n demo stash-demo diff --git a/docs/guides/v1alpha1/platforms/gke.md b/docs/guides/v1alpha1/platforms/gke.md index c09c5a3a..6122d635 100644 --- a/docs/guides/v1alpha1/platforms/gke.md +++ b/docs/guides/v1alpha1/platforms/gke.md @@ -34,7 +34,7 @@ At first, you need to have a Kubernetes cluster in Google Cloud Platform. If you To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -47,7 +47,7 @@ In order to take backup, we need some sample data. Stash has some sample data in Let's create a ConfigMap from these sample data, -```console +```bash $ kubectl create configmap -n demo stash-sample-data \ --from-literal=LICENSE="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/LICENSE)" \ --from-literal=README.md="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/README.md)" @@ -98,14 +98,14 @@ spec: Let's create the deployment we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/gke/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for deployment's pod to go in `Running` state. -```console +```bash $ kubectl get pods -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-b66b9cdfd-8s98d 1/1 Running 0 6m @@ -113,7 +113,7 @@ stash-demo-b66b9cdfd-8s98d 1/1 Running 0 6m You can check that the `/source/data/` directory of pod is populated with data from the volume source using this command, -```console +```bash $ kubectl exec -n demo stash-demo-b66b9cdfd-8s98d -- ls -R /source/data/ /source/data: LICENSE @@ -134,7 +134,7 @@ At first, we need to create a storage secret that hold the credentials for the b Create storage secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > GOOGLE_PROJECT_ID $ cat /path/to/downloaded-sa-json.key > GOOGLE_SERVICE_ACCOUNT_JSON_KEY @@ -147,7 +147,7 @@ secret "gcs-secret" created Verify that the secret has been created successfully, -```console +```bash $ kubectl get secret -n demo gcs-secret -o yaml ``` @@ -172,7 +172,7 @@ type: Opaque Now, we can create `Restic` crd. This will create a repository in the GCS bucket specified in `gcs.bucket` field and start taking periodic backup of `/source/data` directory. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/gke/restic.yaml restic.stash.appscode.com/gcs-restic created ``` @@ -209,7 +209,7 @@ spec: If everything goes well, Stash will inject a sidecar container into the `stash-demo` deployment to take periodic backup. Let's check sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-6b8c94cdd7-8jhtn 2/2 Running 1 1h @@ -221,7 +221,7 @@ Look at the pod. It now has 2 containers. If you view the resource definition of Stash will create a `Repository` crd with name `deployment.stash-demo` for the respective repository in GCS backend. To verify, run the following command, -```console +```bash $ kubectl get repository deployment.stash-demo -n demo NAME BACKUPCOUNT LASTSUCCESSFULBACKUP AGE deployment.stash-demo 1 13s 1m @@ -231,7 +231,7 @@ Here, `BACKUPCOUNT` field indicates number of backup snapshot has taken in this `Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup is taking successfully by, -```console +```bash $ kubectl get snapshots -n demo -l repository=deployment.stash-demo NAME AGE deployment.stash-demo-c1014ca6 10s @@ -259,7 +259,7 @@ Now, consider that we have lost our workload as well as data volume. We want to At first, let's delete `Restic` crd, `stash-demo` deployment and `stash-sample-data` ConfigMap. -```console +```bash $ kubectl delete deployment -n demo stash-demo deployment.extensions "stash-demo" deleted @@ -278,7 +278,7 @@ In order to perform recovery, we need `Repository` crd `deployment.stah-demo` an Now, we are going to recover the backed up data into GCE Persistent Disk. At first, create a GCE disk named `stash-recovered` from [Google cloud console](https://console.cloud.google.com/compute/disks). Then create `Recovery` crd, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/gke/recovery-gcePD.yaml recovery.stash.appscode.com/gcs-recovery created ``` @@ -306,7 +306,7 @@ spec: Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, -```console +```bash $ kubectl get recovery -n demo gcs-recovery NAME REPOSITORYNAMESPACE REPOSITORYNAME SNAPSHOT PHASE AGE gcs-recovery demo deployment.stash-demo Succeeded 3m @@ -316,7 +316,7 @@ Here, `PHASE` `Succeeded` indicate that our recovery has been completed successf If you are using Kubernetes version older than v1.11.0 then run following command and check `status.phase` field to see whether the recovery succeeded or failed. -```console +```bash $ kubectl get recovery -n demo gcs-recovery -o yaml ``` @@ -365,7 +365,7 @@ spec: Let's create the deployment, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/gke/restored-deployment-gcePD.yaml deployment.apps/stash-demo created ``` @@ -376,7 +376,7 @@ We have re-deployed `stash-demo` deployment with recovered volume. Now, it is ti Get the pod of new deployment, -```console +```bash $ kubectl get pods -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-857995799-gpml9 1/1 Running 0 34s @@ -384,7 +384,7 @@ stash-demo-857995799-gpml9 1/1 Running 0 34s Run following command to view data of `/source/data` directory of this pod, -```console +```bash $ kubectl exec -n demo stash-demo-857995799-gpml9 -- ls -R /source/data /source/data: LICENSE @@ -400,14 +400,14 @@ So, we can see that the data we had backed up from original deployment are now p Here, we are going to show how to recover the backed up data into a PVC. If you have re-deployed `stash-demo` deployment by following previous tutorial on `gcePersistentDisk`, delete the deployment first, -```console +```bash $ kubectl delete deployment -n demo stash-demo deployment.apps/stash-demo deleted ``` Now, create a `PersistentVolumeClaim` where our recovered data will be stored. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/gke/pvc.yaml persistentvolumeclaim/stash-recovered created ``` @@ -433,7 +433,7 @@ spec: Check that if cluster has provisioned the requested claim, -```console +```bash $ kubectl get pvc -n demo -l app=stash-demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE stash-recovered Bound pvc-57bec6e5-3e11-11e8-951b-42010a80002e 2Gi RWO standard 1m @@ -445,7 +445,7 @@ Look at the `STATUS` filed. `stash-recovered` PVC is bounded to volume `pvc-57be Now, we have to create a `Recovery` crd to recover backed up data into this PVC. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/gke/recovery-pvc.yaml recovery.stash.appscode.com/gcs-recovery created ``` @@ -472,7 +472,7 @@ spec: Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, -```console +```bash $ kubectl get recovery -n demo gcs-recovery NAME REPOSITORYNAMESPACE REPOSITORYNAME SNAPSHOT PHASE AGE gcs-recovery demo deployment.stash-demo Succeeded 3m @@ -524,7 +524,7 @@ spec: Let's create the deployment, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/gke/restored-deployment-pvc.yaml deployment.apps/stash-demo created ``` @@ -535,7 +535,7 @@ We have re-deployed `stash-demo` deployment with recovered volume. Now, it is ti Get the pod of new deployment, -```console +```bash $ kubectl get pods -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-559845c5db-8cd4w 1/1 Running 0 33s @@ -543,7 +543,7 @@ stash-demo-559845c5db-8cd4w 1/1 Running 0 33s Run following command to view data of `/source/data` directory of this pod, -```console +```bash $ kubectl exec -n demo stash-demo-559845c5db-8cd4w -- ls -R /source/data /source/data: LICENSE @@ -559,7 +559,7 @@ So, we can see that the data we had backed up from original deployment are now p To cleanup the resources created by this tutorial, run following commands: -```console +```bash $ kubectl delete recovery -n demo gcs-recovery $ kubectl delete secret -n demo gcs-secret $ kubectl delete deployment -n demo stash-demo diff --git a/docs/guides/v1alpha1/platforms/minio.md b/docs/guides/v1alpha1/platforms/minio.md index c73b31c8..6686d5a1 100644 --- a/docs/guides/v1alpha1/platforms/minio.md +++ b/docs/guides/v1alpha1/platforms/minio.md @@ -34,7 +34,7 @@ At first, you need to have a Kubernetes cluster, and the `kubectl` command-line To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -47,7 +47,7 @@ In order to take backup, we need some sample data. Stash has some sample data in Let's create a ConfigMap from these sample data, -```console +```bash $ kubectl create configmap -n demo stash-sample-data \ --from-literal=LICENSE="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/LICENSE)" \ --from-literal=README.md="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/README.md)" @@ -98,14 +98,14 @@ spec: Let's create the deployment we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/minio/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for deployment's pod to go in `Running` state. -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-7ccd56bf5d-n24vl 1/1 Running 0 16s @@ -113,7 +113,7 @@ stash-demo-7ccd56bf5d-n24vl 1/1 Running 0 16s You can check that the `/source/data/` directory of this pod is populated with data from the `stash-sample-data` ConfigMap using this command, -```console +```bash $ kubectl exec -n demo stash-demo-7ccd56bf5d-n24vl -- ls -R /source/data /source/data: LICENSE @@ -135,7 +135,7 @@ At first, we need to create a secret for `Restic` crd. To configure this backend Create the secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > AWS_ACCESS_KEY_ID $ echo -n '' > AWS_SECRET_ACCESS_KEY @@ -150,7 +150,7 @@ secret/minio-secret created Verify that the secret has been created successfully, -```console +```bash $ kubectl get secret -n demo minio-secret -o yaml ``` @@ -176,7 +176,7 @@ type: Opaque Now, we are going to create `Restic` crd to take backup `/source/data` directory of `stash-demo` deployment. This will create a repository in the Minio bucket specified by `s3.bucket` field and start taking periodic backup of `/source/data` directory. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/minio/restic.yaml restic.stash.appscode.com/minio-restic created ``` @@ -214,7 +214,7 @@ spec: If everything goes well, Stash will inject a sidecar container into the `stash-demo` deployment to take periodic backup. Let's check that sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-57656f6d74-hmc9z 2/2 Running 0 46s @@ -226,7 +226,7 @@ Look at the pod. It now has 2 containers. If you view the resource definition of Stash will create a `Repository` crd with name `deployment.stash-demo` for the respective repository in Minio backend at first backup schedule. To verify, run the following command, -```console +```bash $ kubectl get repository deployment.stash-demo -n demo NAME BACKUPCOUNT LASTSUCCESSFULBACKUP AGE deployment.stash-demo 1 14s 1m @@ -236,7 +236,7 @@ Here, `BACKUPCOUNT` field indicates number of backup snapshot has taken in this `Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup snapshots has been created successfully by, -```console +```bash $ kubectl get snapshots -n demo -l repository=deployment.stash-demo NAME AGE deployment.stash-demo-c588c67c 4m3s @@ -268,7 +268,7 @@ Now, consider that we have lost our workload as well as data volume. We want to At first, let's delete `Restic` crd, `stash-demo` deployment and `stash-sample-data` ConfigMap. -```console +```bash $ kubectl delete deployment -n demo stash-demo deployment.extensions "stash-demo" deleted @@ -287,7 +287,7 @@ In order to perform recovery, we need `Repository` crd `deployment.stah-demo` an We are going to recover backed up data into a PVC. At first, we need to know available [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) in our cluster. -```console +```bash $ kubectl get storageclass NAME PROVISIONER AGE standard (default) k8s.io/minikube-hostpath 8h @@ -295,7 +295,7 @@ standard (default) k8s.io/minikube-hostpath 8h Now, let's create a `PersistentVolumeClaim` where our recovered data will be stored. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/minio/pvc.yaml persistentvolumeclaim/stash-recovered created ``` @@ -321,7 +321,7 @@ spec: Check that if cluster has provisioned the requested claim, -```console +```bash $ kubectl get pvc -n demo -l app=stash-demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE stash-recovered Bound pvc-3d3b6a58-f886-11e8-9a81-0800272171a4 50Mi RWO standard 13s @@ -333,7 +333,7 @@ Look at the `STATUS` filed. `stash-recovered` PVC is bounded to volume `pvc-3d3b Now, we have to create a `Recovery` crd to recover backed up data into this PVC. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/minio/recovery.yaml recovery.stash.appscode.com/minio-recovery created ``` @@ -360,7 +360,7 @@ spec: Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, -```console +```bash $ kubectl get recovery -n demo minio-recovery NAME REPOSITORYNAMESPACE REPOSITORYNAME SNAPSHOT PHASE AGE minio-recovery demo deployment.stash-demo Succeeded 26s @@ -370,7 +370,7 @@ Here, `PHASE` `Succeeded` indicates that our recovery has been completed success If you are using Kubernetes version older than v1.11.0 then run following command and check `status.phase` field to see whether the recovery succeeded or failed. -```console +```bash $ kubectl get recovery -n demo minio-recovery -o yaml ``` @@ -418,7 +418,7 @@ spec: Let's create the deployment, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/minio/recovered-deployment.yaml deployment.apps/stash-demo created ``` @@ -429,7 +429,7 @@ We have re-deployed `stash-demo` deployment with recovered volume. Now, it is ti Get the pod of new deployment, -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-69694789df-wq9vc 1/1 Running 0 14s @@ -437,7 +437,7 @@ stash-demo-69694789df-wq9vc 1/1 Running 0 14s Run following command to view data of `/source/data` directory of this pod, -```console +```bash $ kubectl exec -n demo stash-demo-69694789df-wq9vc -- ls -R /source/data /source/data: LICENSE @@ -450,7 +450,7 @@ So, we can see that the data we had backed up from original deployment are now p To cleanup the resources created by this tutorial, run following commands: -```console +```bash $ kubectl delete recovery -n demo minio-recovery $ kubectl delete secret -n demo minio-secret $ kubectl delete deployment -n demo stash-demo diff --git a/docs/guides/v1alpha1/platforms/rook.md b/docs/guides/v1alpha1/platforms/rook.md index baa5c192..1d7f9bf7 100644 --- a/docs/guides/v1alpha1/platforms/rook.md +++ b/docs/guides/v1alpha1/platforms/rook.md @@ -34,7 +34,7 @@ At first, you need to have a Kubernetes cluster, and the kubectl command-line to To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -47,7 +47,7 @@ In order to take backup, we need some sample data. Stash has some sample data in Let's create a ConfigMap from these sample data, -```console +```bash $ kubectl create configmap -n demo stash-sample-data \ --from-literal=LICENSE="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/LICENSE)" \ --from-literal=README.md="$(curl -fsSL https://github.com/stashed/stash-data/raw/master/README.md)" @@ -98,14 +98,14 @@ spec: Let's create the deployment we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/rook/deployment.yaml deployment.apps/stash-demo created ``` Now, wait for deployment's pod to go in `Running` state. -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-7ccd56bf5d-fm74f 1/1 Running 0 18s @@ -113,7 +113,7 @@ stash-demo-7ccd56bf5d-fm74f 1/1 Running 0 18s You can check that the `/source/data/` directory of this pod is populated with data from the `stash-sample-data` ConfigMap using this command, -```console +```bash $ kubectl exec -n demo stash-demo-7ccd56bf5d-fm74f -- ls -R /source/data /source/data: LICENSE @@ -134,7 +134,7 @@ At first, we need to create a secret for `Restic` crd. To configure this backend Create the secret as below, -```console +```bash $ echo -n 'changeit' > RESTIC_PASSWORD $ echo -n '' > AWS_ACCESS_KEY_ID $ echo -n '' > AWS_SECRET_ACCESS_KEY @@ -147,7 +147,7 @@ secret/rook-secret created Verify that the secret has been created successfully, -```console +```bash $ kubectl get secret -n demo rook-secret -o yaml ``` @@ -172,7 +172,7 @@ type: Opaque Now, we are going to create `Restic` crd to take backup `/source/data` directory of `stash-demo` deployment. This will create a repository in the Rook bucket specified by `s3.bucket` field and start taking periodic backup of `/source/data` directory. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/rook/restic.yaml restic.stash.appscode.com/rook-restic created ``` @@ -210,7 +210,7 @@ spec: If everything goes well, `Stash` will inject a sidecar container into the `stash-demo` deployment to take periodic backup. Let's check that sidecar has been injected successfully, -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-6c9cd4cf4c-bn5wm 2/2 Running 0 53s @@ -222,7 +222,7 @@ Look at the pod. It now has 2 containers. If you view the resource definition of Stash will create a `Repository` crd with name `deployment.stash-demo` for the respective repository in Rook backend at first backup schedule. To verify, run the following command, -```console +```bash $ kubectl get repository deployment.stash-demo -n demo NAME BACKUPCOUNT LASTSUCCESSFULBACKUP AGE deployment.stash-demo 1 41s 1m @@ -232,7 +232,7 @@ Here, `BACKUPCOUNT` field indicates number of backup snapshot has taken in this `Restic` will take backup of the volume periodically with a 1-minute interval. You can verify that backup snapshots has been created successfully by, -```console +```bash $ kubectl get snapshots -n demo -l repository=deployment.stash-demo NAME AGE NAME AGE @@ -251,7 +251,7 @@ Now, consider that we have lost our workload as well as data volume. We want to At first, let's delete `Restic` crd, `stash-demo` deployment and `stash-sample-data` ConfigMap. -```console +```bash $ kubectl delete deployment -n demo stash-demo deployment.extensions "stash-demo" deleted @@ -270,7 +270,7 @@ In order to perform recovery, we need `Repository` crd `deployment.stah-demo` an We are going to recover our backed up data into a PVC. [Rook Block Storage](https://rook.io/docs/rook/v0.9/ceph-block.html) allows mounting Rook storage into pod using a `PersistentVolumeClaim`. At first, we need to know respective [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) for Rook Block Storage. -```console +```bash $ kubectl get storageclass NAME PROVISIONER AGE rook-ceph-block ceph.rook.io/block 96m @@ -281,7 +281,7 @@ Here, `rook-ceph-block` storage class is responsible for provisioning the PVC fr Let's create a `PersistentVolumeClaim` with `rook-ceph-block` storage class where our recovered data will be stored. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/rook/rook-pvc.yaml persistentvolumeclaim/stash-recovered created ``` @@ -307,7 +307,7 @@ spec: Check that if cluster has provisioned the requested claim, -```console +```bash $ kubectl get pvc -n demo -l app=stash-demo kubectl get pvc -n demo -l app=stash-demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE @@ -320,7 +320,7 @@ Look at the `STATUS` filed. `stash-recovered` PVC is bounded to volume `pvc-dd07 Now, we have to create a `Recovery` crd to recover backed up data into this PVC. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/rook/recovery.yaml recovery.stash.appscode.com/rook-recovery created ``` @@ -347,7 +347,7 @@ spec: Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, -```console +```bash $ kubectl get recovery -n demo rook-recovery NAME REPOSITORYNAMESPACE REPOSITORYNAME SNAPSHOT PHASE AGE rook-recovery demo deployment.stash-demo Succeeded 26s @@ -357,7 +357,7 @@ Here, `PHASE` `Succeeded` indicates that our recovery has been completed success If you are using Kubernetes version older than v1.11.0 then run following command and check `status.phase` field to see whether the recovery succeeded or failed. -```console +```bash $ kubectl get recovery -n demo rook-recovery -o yaml ``` @@ -405,7 +405,7 @@ spec: Let's create the deployment, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/platforms/rook/recovered-deployment.yaml deployment.apps/stash-demo created ``` @@ -416,7 +416,7 @@ We have re-deployed `stash-demo` deployment with recovered volume. Now, it is ti Get the pod of new deployment, -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-69694789df-rsrz6 1/1 Running 0 15s @@ -424,7 +424,7 @@ stash-demo-69694789df-rsrz6 1/1 Running 0 15s Run following command to view data of `/source/data` directory of this pod, -```console +```bash $ kubectl exec -n demo stash-demo-69694789df-rsrz6 -- ls -R /source/data source/data: LICENSE @@ -440,7 +440,7 @@ So, we can see that the data we had backed up from original deployment are now p To cleanup the resources created by this tutorial, run following commands: -```console +```bash $ kubectl delete recovery -n demo rook-recovery $ kubectl delete secret -n demo rook-secret $ kubectl delete deployment -n demo stash-demo diff --git a/docs/guides/v1alpha1/restore.md b/docs/guides/v1alpha1/restore.md index c2fc49a9..e61d9bf5 100644 --- a/docs/guides/v1alpha1/restore.md +++ b/docs/guides/v1alpha1/restore.md @@ -35,7 +35,7 @@ To proceed with this tutorial, you have to meet following requirements: To keep things isolated, we are going to use a separate namespace called `demo` throughout this tutorial. Create the namespace if you haven't created yet. -```console +```bash $ kubectl create ns demo namespace/demo created ``` @@ -65,7 +65,7 @@ Now, we are going to recover backed up data from `deployment.stash-demo` Reposit At first, let's delete `Restic` crd so that it does not lock the repository while are recovering from it. Also, delete `stash-demo` deployment and `stash-sample-data` ConfigMap if you followed our backup guide. -```console +```bash $ kubectl delete deployment -n demo stash-demo deployment.extensions "stash-demo" deleted @@ -82,7 +82,7 @@ configmap "stash-sample-data" deleted We are going to recover backed up data into a PVC. At first, we need to know available [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) in our cluster. -```console +```bash $ kubectl get storageclass NAME PROVISIONER AGE standard (default) k8s.io/minikube-hostpath 8h @@ -90,7 +90,7 @@ standard (default) k8s.io/minikube-hostpath 8h Now, let's create a `PersistentVolumeClaim` where our recovered data will be stored. -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/recovery/pvc.yaml persistentvolumeclaim/stash-recovered created ``` @@ -116,7 +116,7 @@ spec: Check whether cluster has provisioned the requested claim. -```console +```bash $ kubectl get pvc -n demo -l app=stash-demo NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE stash-recovered Bound pvc-e6ffface-fa01-11e8-8905-0800277ca39d 50Mi RWO standard 13s @@ -157,14 +157,14 @@ Here, Let's create the Recovery crd we have shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/recovery/recovery.yaml recovery.stash.appscode.com/local-recovery created ``` Wait until `Recovery` job completes its task. To verify that recovery has completed successfully run, -```console +```bash $ kubectl get recovery -n demo local-recovery NAME REPOSITORY-NAMESPACE REPOSITORY-NAME SNAPSHOT PHASE AGE local-recovery demo deployment.stash-demo Succeeded 54s @@ -174,7 +174,7 @@ Here, `PHASE` `Succeeded` indicates that our recovery has been completed success If you are using Kubernetes version older than v1.11.0 then run following command and check `status.phase` field to see whether the recovery succeeded or failed. -```console +```bash $ kubectl get recovery -n demo local-recovery -o yaml ``` @@ -222,7 +222,7 @@ spec: Let's create the deployment, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/recovery/recovered-deployment.yaml deployment.apps/stash-demo created ``` @@ -233,7 +233,7 @@ We have re-deployed `stash-demo` deployment with recovered volume. Now, it is ti Get the pod of new deployment, -```console +```bash $ kubectl get pod -n demo -l app=stash-demo NAME READY STATUS RESTARTS AGE stash-demo-69694789df-kvcp5 1/1 Running 0 20s @@ -241,7 +241,7 @@ stash-demo-69694789df-kvcp5 1/1 Running 0 20s Run following command to view data of `/source/data` directory of this pod, -```console +```bash $ kubectl exec -n demo stash-demo-69694789df-kvcp5 -- ls -R /source/data /source/data: LICENSE @@ -256,7 +256,7 @@ With the help of [Snapshot](/docs/concepts/crds/snapshot.md) object, Stash allow First, list the available snapshots, -```console +```bash $ kubectl get snapshots -n demo -l repository=deployment.stash-demo NAME AGE deployment.stash-demo-bd8db133 4m50s @@ -291,7 +291,7 @@ spec: Now, create a `Recovery` crd shown above, -```console +```bash $ kubectl apply -f https://github.com/stashed/docs/raw/{{< param "info.version" >}}/docs/examples/recovery/recovery-specific-snapshot.yaml recovery.stash.appscode.com/local-recovery-specific-snapshot created ``` @@ -300,7 +300,7 @@ recovery.stash.appscode.com/local-recovery-specific-snapshot created To cleanup the resources created by this tutorial, run following commands: -```console +```bash $ kubectl delete recovery -n demo local-recovery $ kubectl delete recovery -n demo local-recovery-specific-snapshot $ kubectl delete secret -n demo local-secret diff --git a/docs/setup/developer-guide/overview.md b/docs/setup/developer-guide/overview.md index 8f9d4223..ef4ac723 100644 --- a/docs/setup/developer-guide/overview.md +++ b/docs/setup/developer-guide/overview.md @@ -31,14 +31,14 @@ development environment, please follow [these instructions](https://golang.org/d #### Download Source -```console +```bash $ go get stash.appscode.dev/stash $ cd $(go env GOPATH)/src/stash.appscode.dev/stash ``` #### Install Dev tools To install various dev tools for Stash, run the following command: -```console +```bash $ ./hack/builddeps.sh ``` @@ -49,7 +49,7 @@ $ stash version ``` #### Run Binary Locally -```console +```bash $ stash run \ --secure-port=8443 \ --kubeconfig="$HOME/.kube/config" \ @@ -61,14 +61,14 @@ $ stash run \ #### Dependency management Stash uses [Glide](https://github.com/Masterminds/glide) to manage dependencies. Dependencies are already checked in the `vendor` folder. If you want to update/add dependencies, run: -```console +```bash $ glide slow ``` #### Build Docker images To build and push your custom Docker image, follow the steps below. To release a new version of Stash, please follow the [release guide](/docs/setup/developer-guide/release.md). -```console +```bash # Build Docker image $ ./hack/docker/setup.sh; ./hack/docker/setup.sh push @@ -80,19 +80,19 @@ $ docker push : ``` #### Generate CLI Reference Docs -```console +```bash $ ./hack/gendocs/make.sh ``` ### Testing Stash #### Unit tests -```console +```bash $ ./hack/make.py test unit ``` #### Run e2e tests Stash uses [Ginkgo](http://onsi.github.io/ginkgo/) to run e2e tests. -```console +```bash $ ./hack/make.py test e2e ``` diff --git a/docs/setup/developer-guide/release.md b/docs/setup/developer-guide/release.md index 0761ba96..fe54eb3a 100644 --- a/docs/setup/developer-guide/release.md +++ b/docs/setup/developer-guide/release.md @@ -19,7 +19,7 @@ The following steps must be done from a Linux x64 bit machine. - Push changes to the `release-x` branch and apply new tag. - Push all the changes to remote repo. - Build and push stash docker image: -```console +```bash $ cd ~/go/src/stash.appscode.dev/stash ./hack/docker/setup.sh; env APPSCODE_ENV=prod ./hack/docker/setup.sh release ``` diff --git a/docs/setup/install.md b/docs/setup/install.md index 8df41c12..68812f8c 100644 --- a/docs/setup/install.md +++ b/docs/setup/install.md @@ -34,7 +34,7 @@ Stash operator can be installed via a script or as a Helm chart. Stash can be installed via [Helm](https://helm.sh/) using the [chart](https://github.com/stashed/installer/tree/{{< param "info.version" >}}/charts/stash) from [AppsCode Charts Repository](https://github.com/appscode/charts). To install the chart with the release name `stash-operator`: -```console +```bash $ helm repo add appscode https://charts.appscode.com/stable/ $ helm repo update $ helm search repo appscode/stash --version {{< param "info.version" >}} @@ -55,7 +55,7 @@ To see the detailed configuration options, visit [here](https://github.com/stash Stash can be installed via [Helm](https://helm.sh/) using the [chart](https://github.com/stashed/installer/tree/{{< param "info.version" >}}/charts/stash) from [AppsCode Charts Repository](https://github.com/appscode/charts). To install the chart with the release name `stash-operator`: -```console +```bash $ helm repo add appscode https://charts.appscode.com/stable/ $ helm repo update $ helm search appscode/stash --version {{< param "info.version" >}} @@ -76,7 +76,7 @@ To see the detailed configuration options, visit [here](https://github.com/stash If you prefer to not use Helm, you can generate YAMLs from Stash chart and deploy using `kubectl`. Here we are going to show the prodecure using Helm 3. -```console +```bash $ helm repo add appscode https://charts.appscode.com/stable/ $ helm repo update $ helm search repo appscode/stash --version {{< param "info.version" >}} @@ -98,7 +98,7 @@ To see the detailed configuration options, visit [here](https://github.com/stash If you are installing Stash on a GKE cluster, you will need cluster admin permissions to install Stash operator. Run the following command to grant admin permision to the cluster. -```console +```bash $ kubectl create clusterrolebinding "cluster-admin-$(whoami)" \ --clusterrole=cluster-admin \ --user="$(gcloud config get-value core/account)" @@ -112,7 +112,7 @@ To use network volumes (i.e. NFS) as a backend, Stash needs an additional deploy You can configure the network volume accessor deployment's cpu, memory, user id, and privileged mode by providing the `netVolAccessor` parameters as below: -```console +```bash helm install stash-operator appscode/stash \ --version {{< param "info.version" >}} \ --namespace kube-system \ @@ -126,7 +126,7 @@ helm install stash-operator appscode/stash \ To check if Stash operator pods have started, run the following command: -```console +```bash $ kubectl get pods --all-namespaces -l app=stash --watch NAMESPACE NAME READY STATUS RESTARTS AGE @@ -137,7 +137,7 @@ Once the operator pods are running, you can cancel the above command by typing ` Now, to confirm CRD groups have been registered by the operator, run the following command: -```console +```bash $ kubectl get crd -l app=stash NAME AGE @@ -163,7 +163,7 @@ These user facing roles supports [ClusterRole Aggregation](https://kubernetes.io Stash provides a CLI using kubectl plugin to work with the stash Objects quickly. Download pre-build binaries from [stashed/cli Githhub release]() and put the binary to some directory in your `PATH`. To install linux 64-bit you can run the following commands: -```console +```bash # Linux amd 64-bit wget -O kubectl-stash https://github.com/stashed/cli/releases/download/{{< param "info.cli" >}}/kubectl-stash-linux-amd64 \ && chmod +x kubectl-stash \ @@ -172,7 +172,7 @@ wget -O kubectl-stash https://github.com/stashed/cli/releases/download/{{< param If you prefer to install kubectl Stash cli from source code, you will need to set up a GO development environment following [these instructions](https://golang.org/doc/code.html). Then, install the CLI using `go get` from source code. -```console +```bash go get github.com/stashed/cli/... ``` @@ -182,7 +182,7 @@ go get github.com/stashed/cli/... To detect Stash version, exec into the operator pod and run `stash version` command. -```console +```bash $ POD_NAMESPACE=kube-system $ POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app=stash -o jsonpath={.items[0].metadata.name}) $ kubectl exec -it $POD_NAME -c operator -n $POD_NAMESPACE /stash version diff --git a/docs/setup/uninstall.md b/docs/setup/uninstall.md index 6b0e2901..f2eb8c54 100644 --- a/docs/setup/uninstall.md +++ b/docs/setup/uninstall.md @@ -34,7 +34,7 @@ To uninstall Stash operator, run the following command: In Helm 3, release names are [scoped to a namespace](https://v3.helm.sh/docs/faq/#release-names-are-now-scoped-to-the-namespace). So, provide the namespace you used to install the operator when installing. -```console +```bash $ helm uninstall stash-operator --namespace kube-system ``` @@ -43,7 +43,7 @@ $ helm uninstall stash-operator --namespace kube-system ## Using Helm 2 -```console +```bash $ helm delete stash-operator ``` @@ -54,7 +54,7 @@ $ helm delete stash-operator If you prefer to not use Helm, you can generate YAMLs from Stash chart and uninstall using `kubectl`. -```console +```bash $ helm template stash-operator appscode/stash --namespace kube-system | kubectl delete -f - ``` diff --git a/docs/setup/upgrade.md b/docs/setup/upgrade.md index 7c49c876..313c9d28 100644 --- a/docs/setup/upgrade.md +++ b/docs/setup/upgrade.md @@ -106,7 +106,7 @@ retentionPolicies: Say, you are running `Stash` operator `0.5.1`. -```console +```bash $ kubectl get pods --all-namespaces -l app=stash NAMESPACE NAME READY STATUS RESTARTS AGE kube-system stash-operator-7cdc467c5b-drj2r 2/2 Running 0 2s @@ -369,6 +369,6 @@ backend: You can follow the same steps as the above s3 example. To move old repository to new location using [gsutil](https://cloud.google.com/storage/docs/gsutil/commands/mv#renaming-bucket-subdirectories), run: -```console +```bash $ gsutil mv gs://stash-qa/demo gs://stash-qa/demo/deployment/stash-demo ```