From 6281c9c35fcc55669c3c4b8d90bede86b3396269 Mon Sep 17 00:00:00 2001 From: Kiran Mova Date: Tue, 4 May 2021 17:15:00 +0530 Subject: [PATCH] chore(build): add spellchecker github action (#928) Add a github action that can detect spell errors and report on the PR. For terms that are not part of the standard dictionary, the new words can be added to one of the following files: - hack/cspell-contributors.txt - company and user names - hack/cspell-ignore.txt - random text that is part of code output - hack/cspell-words.txt - technology words Signed-off-by: kmova --- .github/workflows/markdown.yml | 29 ++ NOTICE.md | 4 +- cSpell.json | 45 ++++ docs/architecture.md | 4 +- docs/casengines.md | 4 +- docs/cassandra.md | 2 +- docs/cstor.md | 6 +- docs/faq.md | 14 +- docs/features.md | 10 +- docs/installation.md | 10 +- docs/jivaguide.md | 12 +- docs/kb.md | 12 +- docs/mayactl.md | 6 +- docs/mayastor-concept.md | 10 +- docs/mayastor.md | 2 +- docs/minio.md | 6 +- docs/overview.md | 2 +- docs/postgres.md | 12 +- docs/prerequisites.md | 2 +- docs/releases-0x.md | 2 +- docs/releases-1x.md | 6 +- docs/releases.md | 18 +- docs/t-cstor.md | 4 +- docs/t-install.md | 18 +- docs/t-jiva.md | 2 +- docs/t-localpv.md | 4 +- docs/t-mayastor.md | 12 +- docs/t-ndm.md | 2 +- docs/t-uninstall.md | 2 +- docs/t-volume-provisioning.md | 19 +- docs/troubleshooting.md | 2 +- docs/ugcstor.md | 66 ++--- docs/uglocalpv-device.md | 20 +- docs/uglocalpv-hostpath.md | 14 +- docs/ugndm.md | 2 +- docs/uninstall.md | 2 +- hack/cspell-contributors.txt | 14 + hack/cspell-ignore.txt | 50 ++++ hack/cspell-words.txt | 475 +++++++++++++++++++++++++++++++++ 39 files changed, 769 insertions(+), 157 deletions(-) create mode 100644 .github/workflows/markdown.yml create mode 100644 cSpell.json create mode 100644 hack/cspell-contributors.txt create mode 100644 hack/cspell-ignore.txt create mode 100644 hack/cspell-words.txt diff --git a/.github/workflows/markdown.yml b/.github/workflows/markdown.yml new file mode 100644 index 000000000..e49e1fe25 --- /dev/null +++ b/.github/workflows/markdown.yml @@ -0,0 +1,29 @@ +name: Markdown Linter +on: [push, pull_request] +defaults: + run: + shell: bash + +jobs: +#TODO: Need to push a baseline commit to fix existing issues +# linting: +# name: "Markdown linting" +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v2 +# name: Check out the code +# - name: Lint Code Base +# uses: docker://avtodev/markdown-lint:v1 +# with: +# args: "**/*.md" + spellchecking: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + name: Check out the code + - uses: actions/setup-node@v1 + name: Run spell check + with: + node-version: "12" + - run: npm install -g cspell + - run: cspell --config ./cSpell.json "**/*.md" diff --git a/NOTICE.md b/NOTICE.md index 45c601d64..9f9decd93 100644 --- a/NOTICE.md +++ b/NOTICE.md @@ -1,5 +1,5 @@ The source code developed for the OpenEBS Project is licensed under Apache 2.0. -However, the OpenEBS project contains unmodified/modified subcomponents from other Open Source Projects with separate copyright notices and license terms. +However, the OpenEBS project contains unmodified/modified sub components from other Open Source Projects with separate copyright notices and license terms. -Your use of the source code for these subcomponents is subject to the terms and conditions as defined by those source projects. +Your use of the source code for these sub components is subject to the terms and conditions as defined by those source projects. diff --git a/cSpell.json b/cSpell.json new file mode 100644 index 000000000..b7abc6e76 --- /dev/null +++ b/cSpell.json @@ -0,0 +1,45 @@ +{ + "version": "0.2", + "language": "en-US", + "flagWords": [ + "hte" + ], + "ignorePaths": [ + "*.lock", + "*.json", + "*.toml", + "*.conf", + "*.py", + "*.txt", + "*.sh", + "website", + "hack", + "Dockerfile" + ], + "ignoreWords": [], + "dictionaryDefinitions": [ + { + "name": "cspell-words", + "path": "./hack/cspell-words.txt" + }, + { + "name": "cspell-contributors", + "path": "./hack/cspell-contributors.txt" + }, + { + "name": "cspell-ignore", + "path": "./hack/cspell-ignore.txt" + } + ], + "dictionaries": [ + "cspell-words", + "cspell-contributors", + "cspell-ignore" + ], + "languageSettings": [ + { + "languageId": "*", + "dictionaries": [] + } + ] +} diff --git a/docs/architecture.md b/docs/architecture.md index 59a01d816..ed3502cb6 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -98,7 +98,7 @@ The OpenEBS data plane is responsible for the actual volume IO path. A storage e ### Jiva -Jiva storage engine is developed with Rancher's LongHorn and gotgt as the base. Jiva engine is written in GO language and runs in the user space. LongHorn controller synchronously replicates the incoming IO to the LongHorn replicas. The replica considers a Linux sparse file as the foundation for building the storage features such as thin provisioning, snapshotting, rebuilding etc. More details on Jiva architecture are written [here](/docs/next/jiva.html). +Jiva storage engine is developed with Rancher's LongHorn and gotgt as the base. Jiva engine is written in GO language and runs in the user space. LongHorn controller synchronously replicates the incoming IO to the LongHorn replicas. The replica considers a Linux sparse file as the foundation for building the storage features such as thin provisioning, snapshots, rebuilding etc. More details on Jiva architecture are written [here](/docs/next/jiva.html). ### cStor @@ -134,7 +134,7 @@ Prometheus is installed as a micro service by the OpenEBS operator during the in ### WeaveScope -WeaveScope is a well-regarded cloud-native visualisation solution in Kubernetes to view metrics, tags and metadata within the context of a process, container, service or host. Node Disk Manager components, volume pods, and other persistent storage structures of Kubernetes have been enabled for WeaveScope integration. With these enhancements, exploration and traversal of these components have become significantly easier. +WeaveScope is a well-regarded cloud-native visualization solution in Kubernetes to view metrics, tags and metadata within the context of a process, container, service or host. Node Disk Manager components, volume pods, and other persistent storage structures of Kubernetes have been enabled for WeaveScope integration. With these enhancements, exploration and traversal of these components have become significantly easier. diff --git a/docs/casengines.md b/docs/casengines.md index d80860b38..51c3b2d2d 100644 --- a/docs/casengines.md +++ b/docs/casengines.md @@ -19,7 +19,7 @@ Operators or administrators typically choose a storage engine with a specific so OpenEBS provides three types of storage engines. -1. **Jiva** - Jiva is the first storage engine that was released in 0.1 version of OpenEBS and is the most simple to use. It is built in GoLang and uses LongHorn and gotgt stacks inside. Jiva runs entirely in user space and provides standard block storage capabilities such as synchronous replication. Jiva is suitable for smaller capacity workloads in general and not suitable when extensive snapshotting and cloning features are a major need. Read more details of Jiva [here](/docs/next/jiva.html) +1. **Jiva** - Jiva is the first storage engine that was released in 0.1 version of OpenEBS and is the most simple to use. It is built in GoLang and uses LongHorn and gotgt stacks inside. Jiva runs entirely in user space and provides standard block storage capabilities such as synchronous replication. Jiva is suitable for smaller capacity workloads in general and not suitable when extensive snapshots and cloning features are a major need. Read more details of Jiva [here](/docs/next/jiva.html) 2. **cStor** - cStor is the most recently released storage engine, which became available from 0.7 version of OpenEBS. cStor is very robust, provides data consistency and supports enterprise storage features like snapshots and clones very well. It also comes with a robust storage pool feature for comprehensive storage management both in terms of capacity and performance. Together with NDM (Node Disk Manager), cStor provides complete set of persistent storage features for stateful applications on Kubernetes. Read more details of cStor [here](/docs/next/cstor.html) @@ -226,7 +226,7 @@ A short summary is provided below. ### [Jiva User Guide](/docs/next/jivaguide.html) -### [Local PV Hospath User Guide](/docs/next/uglocalpv-hostpath.html) +### [Local PV Hostpath User Guide](/docs/next/uglocalpv-hostpath.html) ### [Local PV Device User Guide](/docs/next/uglocalpv-device.html) diff --git a/docs/cassandra.md b/docs/cassandra.md index 5a36c93c3..05ba667d8 100644 --- a/docs/cassandra.md +++ b/docs/cassandra.md @@ -8,7 +8,7 @@ sidebar_label: Cassandra OpenEBS and Cassandra -This tutorial provides detailed instructions to run a Kudo operator based Cassandra StatefulsSets with OpenEBS storage and perform some simple database operations to verify the successful deployment and it's performance benchmark. +This tutorial provides detailed instructions to run a Kudo operator based Cassandra StatefulSets with OpenEBS storage and perform some simple database operations to verify the successful deployment and it's performance benchmark. ## Introduction diff --git a/docs/cstor.md b/docs/cstor.md index cf27e1e15..7f3c6fcf0 100644 --- a/docs/cstor.md +++ b/docs/cstor.md @@ -104,7 +104,7 @@ cStor Pool is an important component in the storage management. It is fundamenta **Add a new pool instance** : A new pool instance may need to be added for many different reasons. The steps for expanding a cStor pool to a new node can be found [here](/docs/next/ugcstor.html#expanding-cStor-pool-to-a-new-node). Few example scenarios where need of cStor pool expansion to new nodes are: -- New node is being added to the Kubernetes cluster and the blockedvices in new node needs to be considered for persistent volume storage. +- New node is being added to the Kubernetes cluster and the blockdevices in new node needs to be considered for persistent volume storage. - An existing pool instance is full in capacity and it cannot be expanded as either local disks or network disks are not available. Hence, a new pool instance may be needed for hosting the new volume replicas. - An existing pool instance is fully utilized in performance and it cannot be expanded either because CPU is saturated or more local disks are not available or more network disks or not available. A new pool instance may be added and move some of the existing volumes to the new pool instance to free up some disk IOs on this instance. @@ -304,7 +304,7 @@ Following are most commonly observed areas of troubleshooting The cause of high memory consumption of Kubelet is seen on Fedora 29 mainly due to the following. - There are 3 modules are involved - `cstor-isgt`, `kubelet` and `iscsiInitiator(iscsiadm)`. + There are 3 modules are involved - `cstor-istgt`, `kubelet` and `iscsiInitiator(iscsiadm)`. kubelet runs iscsiadm command to do discovery on cstor-istgt. If there is any delay in receiving response of discovery opcode (either due to network or delay in processing on target side), iscsiadm retries few times and gets into infinite loop dumping error messages as below: iscsiadm: Connection to Discovery Address 127.0.0.1 failed @@ -339,7 +339,7 @@ This issue is fixed in 0.8.1 version. | cStor volume features | | | Expanding the size of a cStor volume using CSI provisioner (Alpha) | 1.2.0 | | CSI driver support(Alpha) | 1.1.0 | -| Snapshot and Clone of cStor volume provisoned via CSI provisioner(Alpha) | 1.4.0 | +| Snapshot and Clone of cStor volume provisioned via CSI provisioner(Alpha) | 1.4.0 | | Scaling up of cStor Volume Replica | 1.3.0 | | Scaling down of cStor Volume Replica | 1.4.0 | diff --git a/docs/faq.md b/docs/faq.md index bf6dbf511..c0c0e87b6 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -24,7 +24,7 @@ sidebar_label: FAQs [What is the default OpenEBS Reclaim policy?](#default-reclaim-policy) -[Why NDM daemon set required privileged mode?](#why-ndm-priviledged) +[Why NDM daemon set required privileged mode?](#why-ndm-privileged) [Is OpenShift supported?](#openebs-in-openshift) @@ -36,7 +36,7 @@ sidebar_label: FAQs [How backup and restore is working with OpenEBS volumes?](#backup-restore-openebs-volumes) -[Why customized parameters set on default OpenEBS StorageClasses are not getting persisted?](#customized-values-not-peristed-after-reboot) +[Why customized parameters set on default OpenEBS StorageClasses are not getting persisted?](#customized-values-not-persisted-after-reboot) [Why NDM listens on host network?](#why-ndm-listens-on-host-network) @@ -76,7 +76,7 @@ sidebar_label: FAQs [How OpenEBS detects disks for creating cStor Pool?](#how-openebs-detects-disks-for-creating-cstor-pool) -[Can I provision OpenEBS volume if the request in PVC is more than the available physical capacity of the pools in the Storage Nodes?](#provision-pvc-higher-than-physical-sapce) +[Can I provision OpenEBS volume if the request in PVC is more than the available physical capacity of the pools in the Storage Nodes?](#provision-pvc-higher-than-physical-space) [What is the difference between cStor Pool creation using manual method and auto method?](#what-is-the-difference-between-cstor-pool-creation-using-manual-method-and-auto-method) @@ -135,7 +135,7 @@ To determine exactly where your data is physically stored, you can run the follo The output displays the following pods. ``` - IO Controller: pvc-ee171da3-07d5-11e8-a5be-42010a8001be-ctrl-6798475d8c-7dcqd + IO Controller: pvc-ee171da3-07d5-11e8-a5be-42010a8001be-ctrl-6798475d8c-7node Replica 1: pvc-ee171da3-07d5-11e8-a5be-42010a8001be-rep-86f8b8c758-hls6s Replica 2: pvc-ee171da3-07d5-11e8-a5be-42010a8001be-rep-86f8b8c758-tr28f ``` @@ -191,7 +191,7 @@ For jiva, from 0.8.0 version, the data is deleted via scrub jobs. The completed -

Why NDM Daemon set required privileged mode?

+

Why NDM Daemon set required privileged mode?

Currently, NDM Daemon set runs in the privileged mode. NDM requires privileged mode because it requires access to `/dev` and `/sys` directories for monitoring the devices attached and also to fetch the details of the attached device using various probes. @@ -314,7 +314,7 @@ OpenEBS cStor volume is working based on cStor/ZFS snapshot using Velero. For Op -

Why customized parameters set on default OpenEBS StorageClasses are not getting persisted?

+

Why customized parameters set on default OpenEBS StorageClasses are not getting persisted?

The customized parameters set on default OpenEBS StorageClasses will not persist after restarting `maya-apiserver` pod or restarting the node where `maya-apiserver` pod is running. StorageClasses created by maya-apiserver are owned by it and it tries to overwrite them upon its creation. @@ -552,7 +552,7 @@ It is also possible to customize by adding more disk types associated with your -

Can I provision OpenEBS volume if the request in PVC is more than the available physical capacity of the pools in the Storage Nodes?

+

Can I provision OpenEBS volume if the request in PVC is more than the available physical capacity of the pools in the Storage Nodes?

As of 0.8.0, the user is allowed to create PVCs that cross the available capacity of the pools in the Nodes. In the future release, it will validate with an option `overProvisioning=false`, the PVC request should be denied if there is not enough available capacity to provision the volume. diff --git a/docs/features.md b/docs/features.md index 897429445..c61ffddd6 100644 --- a/docs/features.md +++ b/docs/features.md @@ -11,7 +11,7 @@ sidebar_label: Features and Benefits | [Containerized storage for containers](#containerized-storage-for-containers) | [Granular policies per stateful workload](#granular-policies-per-stateful-workload) | | [Synchronous replication](#synchronous-replication) | [Avoid Cloud Lock-in](#avoid-cloud-lock-in) | | [Snapshots and clones](#snapshots-and-clones) | [Reduced storage TCO up to 50%](#reduced-storage-tco-up-to-50) | -| [Backup and restore](#backup-and-restore) | [Native HCI on Kubernetes](#natively-hyperconvergenced-on-kubernetes) | +| [Backup and restore](#backup-and-restore) | [Native HCI on Kubernetes](#natively-hyperconverged-on-kubernetes) | | [Prometheus metrics and Grafana dashboard](#prometheus-metrics-for-workload-tuning) | [High availability - No Blast Radius](#high-availability) | @@ -59,7 +59,7 @@ Copy-on-write snapshots are another optional and popular feature of OpenEBS. Whe Backup and Restore Icon -The backup and restore of OpenEBS volumes works with Kubernetes backup and restore solutions such as Velero (formerly Heptio Ark) via open source OpenEBS Velero-plugins. Data backup to object storage targets such as AWS S3, GCP Object Storage or MinIO are frequently deployed using the OpenEBS incremental snapshot capability. This storage level snapshotting and backup saves a significant amount of bandwidth and storage space as only incremental data is used for backup. +The backup and restore of OpenEBS volumes works with Kubernetes backup and restore solutions such as Velero (formerly Heptio Ark) via open source OpenEBS Velero-plugins. Data backup to object storage targets such as AWS S3, GCP Object Storage or MinIO are frequently deployed using the OpenEBS incremental snapshot capability. This storage level snapshot and backup saves a significant amount of bandwidth and storage space as only incremental data is used for backup.
@@ -98,9 +98,9 @@ OpenEBS is cloud native storage for stateful applications on Kubernetes where "c ### Avoid Cloud Lock-in -Avoid Cloud Lockin Icon +Avoid Cloud Lock-in Icon -Even though Kubernetes provides an increasingly ubiquitous control plane, concerns about data gravity resulting in lock-in and otherwise inhibiting the benefits of Kubernetes remain. With OpenEBS, the data can be written to the OpenEBS layer - if cStor, Jiva or Mayastor are used - and if so OpenEBS acts as a data abstraction layer. Using this data abstraction layer, data can be much more easily moved amongst Kubernetes enviroments, whether they are on premise and attached to traditional storage systems or in the cloud and attached to local storage or managed storage services. +Even though Kubernetes provides an increasingly ubiquitous control plane, concerns about data gravity resulting in lock-in and otherwise inhibiting the benefits of Kubernetes remain. With OpenEBS, the data can be written to the OpenEBS layer - if cStor, Jiva or Mayastor are used - and if so OpenEBS acts as a data abstraction layer. Using this data abstraction layer, data can be much more easily moved amongst Kubernetes environments, whether they are on premise and attached to traditional storage systems or in the cloud and attached to local storage or managed storage services.
@@ -131,7 +131,7 @@ On most clouds, block storage is charged based on how much is purchased and not
-### Natively Hyperconvergenced on Kubernetes +### Natively Hyperconverged on Kubernetes Natively HCI on K8s Icon diff --git a/docs/installation.md b/docs/installation.md index f3b337bb4..017fc1878 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -89,7 +89,7 @@ Installed helm version can be obtained by using the following command: ``` helm version ``` -Example ouptut: +Example output:
Client: &version.Version{SemVer:"v2.16.8", GitCommit:"145206680c1d5c28e3fcf30d6f596f0ba84fcb47", GitTreeState:"clean"} @@ -112,13 +112,13 @@ See [helm docs](https://helm.sh/docs/intro/install/#from-script) for setting up ``` helm version ``` -Example ouptut: +Example output:
version.BuildInfo{Version:"v3.0.2", GitCommit:"19e47ee3283ae98139d98460de796c1be1e3975f", GitTreeState:"clean", GoVersion:"go1.13.5"}
-OpenEBS instalaltion with helm v3 can be done by 2 ways: +OpenEBS installation with helm v3 can be done by 2 ways: **Option 1:** Helm v3 takes the current namespace from the local kube config and use that namespace the next time the user executes helm commands. If it is not present, the default namespace is used. Assign the `openebs` namespace to the current context and run the following commands to install openebs in `openebs` namespace. @@ -162,7 +162,7 @@ To view the chart ``` helm ls -n openebs ``` -The above commans will install OpenEBS in `openebs` namespace and chart name as `openebs` +The above commands will install OpenEBS in `openebs` namespace and chart name as `openebs` **Note:** @@ -676,7 +676,7 @@ metadata: namespace: openebs data: # udev-probe is default or primary probe which should be enabled to run ndm - # filterconfigs contails configs of filters - in their form fo include + # filterconfigs contains configs of filters - in their form fo include # and exclude comma separated strings node-disk-manager.config: | probeconfigs: diff --git a/docs/jivaguide.md b/docs/jivaguide.md index c0a111198..9b5e46c3a 100644 --- a/docs/jivaguide.md +++ b/docs/jivaguide.md @@ -259,7 +259,7 @@ The application pods should be running as displayed below ``` NAME READY STATUS RESTARTS AGE -busybox-66db7d9b88-kkktl 1/1 Running 0 2m16s +busybox-66db7d9b88-pod01 1/1 Running 0 2m16s ``` @@ -297,7 +297,7 @@ Grafana charts can be built for the above Prometheus metrics. OpenEBS volume can be backed up and restore along with application using velero plugin. It helps the user for taking backup of OpenEBS volumes to a third party storage location and then restoration of the data whenever it needed. The steps for taking backup and restore are following. -

Prerequisites

+

Prerequisites

- Mount propagation feature has to be enabled on Kubernetes, otherwise the data written from the pods will not visible in the restic daemonset pod on the same node. It is enabled by default on Kubernetes version 1.12. More details can be get from [here](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/). - Latest tested Velero version is 1.4.0. @@ -336,11 +336,11 @@ The following is an example output in a 3 Node cluster. NAME READY STATUS RESTARTS AGE restic-8hxx8 1/1 Running 0 9s restic-nd9d9 1/1 Running 0 9s -restic-zfggm 1/1 Running 0 9s +restic-nd8d8 1/1 Running 0 9s velero-db6459bb-n2rff 1/1 Running 0 9s ``` -

Annotate Application Pod

+

Annotate Application Pod

Run the following to annotate each application pod that contains a volume to back up. @@ -475,7 +475,7 @@ The process of creating a Jiva pool include the following steps.

Prepare disks and mount them

-If it is a cloud disk provision and mount on the node. If three replicas of Jiva volume are needed, provision three cloud disks and mount them on each node. The mount path needs to be same on all three nodes. The following is the steps for creating a GPD disk on Google cloud and mounthing to the node. +If it is a cloud disk provision and mount on the node. If three replicas of Jiva volume are needed, provision three cloud disks and mount them on each node. The mount path needs to be same on all three nodes. The following is the steps for creating a GPD disk on Google cloud and mounting to the node. - Create a GPD @@ -752,7 +752,7 @@ provisioner: openebs.io/provisioner-iscsi

Target NodeSelector Policy

-You can specify the *TargetNodeSelector* where Target pod has to be scheduled using the *value* for *TargetNodeSelector*. In following example, `node: apnode`is the node label. +You can specify the *TargetNodeSelector* where Target pod has to be scheduled using the *value* for *TargetNodeSelector*. In following example, `node: appnode` is the node label. ``` apiVersion: storage.k8s.io/v1 diff --git a/docs/kb.md b/docs/kb.md index 06061985d..7742de119 100644 --- a/docs/kb.md +++ b/docs/kb.md @@ -8,7 +8,7 @@ sidebar_label: Knowledge Base Summary -[How do I reuse an existing PV - after re-creating Kubernetes StatefulSet and its PVC](#resuse-pv-after-recreating-sts) +[How do I reuse an existing PV - after re-creating Kubernetes StatefulSet and its PVC](#reuse-pv-after-recreating-sts) [How to scale up Jiva replica?](#how-to-scale-up-jiva-replica) @@ -34,7 +34,7 @@ sidebar_label: Knowledge Base
-

How do I reuse an existing PV - after re-creating Kubernetes StatefulSet and its PVC

+

How do I reuse an existing PV - after re-creating Kubernetes StatefulSet and its PVC

There are some cases where it had to delete the StatefulSet and re-install a new StatefulSet. In the process you may have to delete the PVCs used by the StatefulSet and retain PV policy by ensuring the Retain as the "Reclaim Policy". In this case, following are the procedures for re-using an existing PV in your StatefulSet application. @@ -358,7 +358,7 @@ oc adm policy add-scc-to-user privileged -z default -n myproject **Note:** OpenShift automatically creates a project for every namespace, and a `default` ServiceAccount for every project. -Once these permissions have been granted, you can provision persistant volumes using OpenEBS. See [CAS Engines](casengines.md) for more details. +Once these permissions have been granted, you can provision persistent volumes using OpenEBS. See [CAS Engines](casengines.md) for more details. Go to top @@ -1368,7 +1368,7 @@ Status of each cStor pool can be found under `STATUS` column. The following are **Error:** This means cstor-pool container in cStor pool pod is not in running state. -**DetetionFailed:** There could be an internal error occurred when CSP is deleted. +**DeletionFailed:** There could be an internal error occurred when CSP is deleted. **Note:** Status of CSPs are updated only if its corresponding cStor pool pod is Running. If the cStor pool pod of corresponding cStor pool is not running, then the status of cStor pool shown in the above output may be stale. @@ -1412,7 +1412,7 @@ The following are the different type of STATUS information of cStor volumes and For getting the number of replicas connected to the target pod of the cStor volume, use following command: ``` -kubectl get cstorvolume -n -oyaml. +kubectl get cstorvolume -n -o yaml. ``` Example output: @@ -1852,7 +1852,7 @@ mount /dev/sdc /var/lib/kubelet/plugins/kubernetes.io/iscsi/iface-default/10.20. mount /dev/sdc /var/lib/kubelet/pods/25abb7fa-eb2d-11e9-b8d1-42010a800093/volumes/kubernetes.io~iscsi/pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093 ``` -**Step 16:** Execute the below command in Kuberenetes master node. Restart the application pod using the following command: +**Step 16:** Execute the below command in Kubernetes master node. Restart the application pod using the following command: ``` kubectl delete pod diff --git a/docs/mayactl.md b/docs/mayactl.md index 8c2037e55..66600ceda 100644 --- a/docs/mayactl.md +++ b/docs/mayactl.md @@ -48,7 +48,7 @@ For getting access to `mayactl` command line tool, you have to login or execute Following is an example output. -
maya-apiserver-7f5689b96b-tfssh 1/1 Running 0 10d
+
maya-apiserver-7f5689b96b-p1p2p 1/1 Running 0 10d
2. It is possible that there are multiple instances of maya-apiserver pods for scaling purposes. You can run mayactl in any one of them. Shell into one of the pods using ` kubectl exec` command . You can do as following way. @@ -110,7 +110,7 @@ Usage: Available Commands: describe Displays Openebs Volume information list Displays status information about Volume(s) - stats Displays the runtime statisics of Volume + stats Displays the runtime statistics of Volume
@@ -150,7 +150,7 @@ Replica Details : ----------------- NAME STATUS POOL NAME NODE ---- ------ --------- ----- -pvc-dc3cb979-51ec-11e9-803f-42010a800179-cstor-sparse-pool-ejs2 Running cstor-sparse-pool-ejs2 gke-ranjith-082-default-pool-2cd2b6cb-dphl +pvc-dc3cb979-51ec-11e9-803f-42010a800179-cstor-sparse-pool-ejs2 Running cstor-sparse-pool-ejs2 gke-ranjith-082-default-pool-2cd2b6cb-d456 pvc-dc3cb979-51ec-11e9-803f-42010a800179-cstor-sparse-pool-gf1d Running cstor-sparse-pool-gf1d gke-ranjith-082-default-pool-2cd2b6cb-l4ck pvc-dc3cb979-51ec-11e9-803f-42010a800179-cstor-sparse-pool-m8cy Running cstor-sparse-pool-m8cy gke-ranjith-082-default-pool-2cd2b6cb-x571 diff --git a/docs/mayastor-concept.md b/docs/mayastor-concept.md index 4edb5e85c..8eebbcc74 100644 --- a/docs/mayastor-concept.md +++ b/docs/mayastor-concept.md @@ -86,7 +86,7 @@ The scheduling of Mayastor pods is determined declaratively by using a DaemonSet #### Mayastor-CSI -The mayastor-csi pods within a cluster implement the node plugin component of Mayastor's CSI driver. As such, their function is to orchestrate the mounting of Maystor provisioned volumes on worker nodes on which application pods consuming those volumes are scheduled. By default a mayastor-csi pod is scheduled on every node in the target cluster, as determined by a DaemonSet resource of the same name. These pods each encapsulate two containers, `mayastor-csi` and `csi-driver-registrar` +The mayastor-csi pods within a cluster implement the node plugin component of Mayastor's CSI driver. As such, their function is to orchestrate the mounting of Mayastor provisioned volumes on worker nodes on which application pods consuming those volumes are scheduled. By default a mayastor-csi pod is scheduled on every node in the target cluster, as determined by a DaemonSet resource of the same name. These pods each encapsulate two containers, `mayastor-csi` and `csi-driver-registrar` It is not necessary for the node plugin to run on every worker node within a cluster and this behaviour can be modified if so desired through the application of appropriate node labeling and the addition of a corresponding `nodeSelector` entry within the pod spec of the mayastor-csi DaemonSet. It should be noted that if a node does not host a plugin pod, then it will not be possible to schedule pod on it which is configured to mount Mayastor volumes. @@ -117,7 +117,7 @@ Mayastor is currently considered to be beta software. > "(it) will generally have many more bugs in it than completed software and speed or performance issues, and may still cause crashes or data loss." -The project's maintainers operate a live issue tracking dashboard for defects which they have under active triage and investigation. It can be accessed [here](https://mayadata.atlassian.net/secure/Dashboard.jspa?selectPageId=10015). You are strongly encouraged to familiarise yourself with the issues identified there before using Mayastor and when raising issue reports in order to limit to the extent possible redundant issue reporting. +The project's maintainers operate a live issue tracking dashboard for defects which they have under active triage and investigation. It can be accessed [here](https://mayadata.atlassian.net/secure/Dashboard.jspa?selectPageId=10015). You are strongly encouraged to familiarize yourself with the issues identified there before using Mayastor and when raising issue reports in order to limit to the extent possible redundant issue reporting. ### How is Mayastor Tested? @@ -134,7 +134,7 @@ The Mayastor process has been sent the SIGILL signal as the result of attempting #### Deploying Mayastor on RKE & Fedora CoreOS -In addition to ensuring that the general prerequisites for installation are met, it is necessary to add the following directory mapping to the `services_kublet->extra_binds` section of the ckuster's`cluster.yml file.` +In addition to ensuring that the general prerequisites for installation are met, it is necessary to add the following directory mapping to the `services_kubelet->extra_binds` section of the cluster's`cluster.yml file.` ```text /opt/rke/var/lib/kubelet/plugins:/var/lib/kubelet/plugins @@ -163,8 +163,8 @@ Mayastor has no snapshot or cloning capabilities. ### Volumes are "Highly Durable" but without multipathing are not "Highly Available" -Mayastor Volumes can be configured (or subsequently re-configured) to be composed of 2 or more "children" or "replicas"; causing synchronously mirrored copies of the volumes's data to be maintained on more than one worker node and Disk Pool. This contributes additional "durability" at the persistence layer, ensuring that viable copyies of a volume's data remain even if a Disk Pool device is lost. +Mayastor Volumes can be configured (or subsequently re-configured) to be composed of 2 or more "children" or "replicas"; causing synchronously mirrored copies of the volume's data to be maintained on more than one worker node and Disk Pool. This contributes additional "durability" at the persistence layer, ensuring that viable copies of a volume's data remain even if a Disk Pool device is lost. -However a Mayastor volume is currently accessible to an application only via a single target instance (NVMe-oF, or iSCSI) of a single Mayastor pod. If that pod terminates (through the loss of the worker node on which it's scheduled, excution failure, pod eviction etc.) then there will be no viable I/O path to any remaining healthy replicas and access to data on the volume cannot be maintained. +However a Mayastor volume is currently accessible to an application only via a single target instance (NVMe-oF, or iSCSI) of a single Mayastor pod. If that pod terminates (through the loss of the worker node on which it's scheduled, execution failure, pod eviction etc.) then there will be no viable I/O path to any remaining healthy replicas and access to data on the volume cannot be maintained. There has been initial discovery work completed in supporting and testing the use of multipath connectivity to Mayastor pods. The work of developing and supporting production usage of multipath connectivity is currently scheduled to complete after general availability. diff --git a/docs/mayastor.md b/docs/mayastor.md index 57dc58d49..39b2edbc2 100644 --- a/docs/mayastor.md +++ b/docs/mayastor.md @@ -225,7 +225,7 @@ mayastor 3 3 3 3 3 kubernetes.io/ar For each resulting Mayastor pod instance, a Mayastor Node \(MSN\) custom resource definition should be created. List these definitions and verify that the count meets the expected number and that all nodes are reporting their State as `online` -To obatin the list of MSN, execute: +To obtain the list of MSN, execute: ``` kubectl -n mayastor get msn ``` diff --git a/docs/minio.md b/docs/minio.md index 26d3b8f41..de6778501 100644 --- a/docs/minio.md +++ b/docs/minio.md @@ -146,7 +146,7 @@ $ kubectl get pod NAME READY STATUS RESTARTS AGE minio-operator-59b8965ff5-tzx8n 1/1 Running 0 6m46s -tenant1-console-6589f7574d-6kgnp 1/1 Running 0 19s +tenant1-console-6589f7574d-6node 1/1 Running 0 19s tenant1-console-6589f7574d-wt47v 1/1 Running 0 19s tenant1-zone-0-0 1/1 Running 0 51s tenant1-zone-0-1 1/1 Running 0 51s @@ -230,7 +230,7 @@ http://3.6.91.169:30383 You should enter the Access key and Secret key to login into the admin console. These credentials can be obtained from the secret. ``` -$ kubectl get secret tenant1-console-secret -oyaml +$ kubectl get secret tenant1-console-secret -o yaml ``` The following is a sample snippet of the output of the above command. It will show the Access key and Secret key in encoded form. The decoded value should be given in the web browser to login to the user console. ``` @@ -281,7 +281,7 @@ http://3.6.91.169:32095 You should enter the `Access key` and `Secret key` to login into the user console. These credentials can be obtained from the secret. ``` -$ kubectl get secret tenant1-creds-secret -oyaml +$ kubectl get secret tenant1-creds-secret -o yaml ``` The following is a sample snippet of the output of the above command. It will show the Access key and Secret key in encoded form. The decoded value should be given in the web browser to login to the user console. ``` diff --git a/docs/overview.md b/docs/overview.md index b9c8c3a3d..9e694c418 100644 --- a/docs/overview.md +++ b/docs/overview.md @@ -129,7 +129,7 @@ Installing OpenEBS in your cluster is as simple as a few `kubectl` or `helm` com
- OpenEBS Archipdate ntecture + OpenEBS Architecture
diff --git a/docs/postgres.md b/docs/postgres.md index 48a49dcb2..bd25a300c 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -20,7 +20,7 @@ This guide explains the basic installation for StackGres PostgreSQL on OpenEBS L ## Deployment model -OpenEBS and StakcGres PostgreSQL localpv device +OpenEBS and StackGres PostgreSQL localpv device We will use GKE, where we will install Stackgres PostgreSQL with OpenEBS storage engine. The Local PV volume will be provisioned on a node where Stackgres PostgreSQL pod is getting scheduled and uses one of the matching unclaimed block devices, which will then use the entire block device for storing data. No other application can use this device. If users have limited blockdevices attached to some nodes, they can use `nodeSelector` in the application YAML to provision applications on particular nodes where the available block device is present. The recommended configuration is to have at least three nodes and one unclaimed external disk to be attached per node. @@ -71,9 +71,9 @@ Once it’s ready, you will see that the two pods are `Running` and the other p $ kubectl get pod -n stackgres NAME READY STATUS RESTARTS AGE -stackgres-operator-78d57d4f55-vtlhj 1/1 Running 0 3m29s +stackgres-operator-78d57d4f55-node1 1/1 Running 0 3m29s stackgres-operator-bootstrap-9p9zs 0/1 Completed 0 3m58s -stackgres-operator-conversion-webhooks-jdhxx 0/1 Completed 0 4m +stackgres-operator-conversion-webhooks-node2 0/1 Completed 0 4m stackgres-operator-crd-upgrade-5fx7c 0/1 Completed 0 3m59s stackgres-operator-create-certificate-r75cn 0/1 Completed 0 3m58s stackgres-operator-upgrade-wn79r 0/1 Completed 0 3m57s @@ -188,9 +188,9 @@ Since we have mentioned 2 replicas and capacity with 90G in Postgres cluster spe $ kubectl get bd -n openebs NAME NODENAME SIZE CLAIMSTATE STATUS AGE -blockdevice-1fcc50ef4b3550ada3f82fe90102daca gke-ranjith-doc-default-pool-41db3a16-t4d0 107373116928 Claimed Active 17m -blockdevice-58c88ac19e09084c6f71178130c20ba8 gke-ranjith-doc-default-pool-41db3a16-rqbt 107373116928 Unclaimed Active 19m -blockdevice-8fd1127f57cf19b01e4da75110ae488a gke-ranjith-doc-default-pool-41db3a16-81tl 107373116928 Claimed Active 19m +blockdevice-1fcc50ef4b3550ada3f82fe90102daca gke-user-doc-default-pool-41db3a16-t4d0 107373116928 Claimed Active 17m +blockdevice-58c88ac19e09084c6f71178130c20ba8 gke-user-doc-default-pool-41db3a16-1122 107373116928 Unclaimed Active 19m +blockdevice-8fd1127f57cf19b01e4da75110ae488a gke-user-doc-default-pool-41db3a16-81tl 107373116928 Claimed Active 19m ``` Verify the master and slave configuration. diff --git a/docs/prerequisites.md b/docs/prerequisites.md index 2715e0b77..f53256131 100644 --- a/docs/prerequisites.md +++ b/docs/prerequisites.md @@ -330,7 +330,7 @@ sudo docker ps | grep "hyperkube kubelet" Following is the example output: ``` -3aab0f9a48e2 k8s-gcrio.azureedge.net/hyperkube-amd64:v1.8.7 "/hyperkube kubele..." 48 minutes ago Up 48 minutes eager_einstein +3aab0f9a48e2 k8s-gcrio.azureedge.net/hyperkube-amd64:v1.8.7 "/hyperkube kubelet..." 48 minutes ago Up 48 minutes eager_einstein ``` Once kubelet container ID is obtained, you need to get to the shell diff --git a/docs/releases-0x.md b/docs/releases-0x.md index b907b596d..65b5ce1aa 100644 --- a/docs/releases-0x.md +++ b/docs/releases-0x.md @@ -48,7 +48,7 @@ This release has been deprecated. Please upgrade to the latest release. See [upg **Change summary:** - Enhanced the metrics exported by cStor Pools to include details of the provisioning errors. - Fixed an issue causing cStor Volume Replica CRs to be stuck, when the OpenEBS namespace was being deleted. -- Fixed an issue where a newly added cStor Volume Replica may not be successfully registered with the cStor target, if the cStor tries to connect to Replica before the replica is completely initialised. +- Fixed an issue where a newly added cStor Volume Replica may not be successfully registered with the cStor target, if the cStor tries to connect to Replica before the replica is completely initialized. - Fixed an issue with Jiva Volumes where target can mark the Replica as Timed out on IO, even when the Replica might actually be processing the Sync IO. - Fixed an issue with Jiva Volumes that would not allow for Replicas to re-connect with the Target, if the initial Registration failed to successfully process the hand-shake request. - Fixed an issue with Jiva Volumes that would cause Target to restart when a send diagnostic command was received from the client diff --git a/docs/releases-1x.md b/docs/releases-1x.md index dc5b2d285..7a5177898 100644 --- a/docs/releases-1x.md +++ b/docs/releases-1x.md @@ -45,7 +45,7 @@ sidebar_label: Releases 1.x - Enhanced cStor volume replica status with additional 2 phases based on different scenarios. The additional phases are `NewReplicaDegraded ` and `ReconstructingNewReplica `. - Enhanced `maya-exporter` by adding pool last sync time metric as `openebs_zpool_last_sync_time ` . This also modifies value of `openebs.io:livenesstimestamp` in cStor pool YAML to set date in epoch timestamp. - Enhanced admission webhook server by adding missing labels in config,secret and service and will fatal out when a missing ENV's error and configs happen. -- Fixes a bug in Jiva where Jiva replica pods are stuck in `crashedloopbackoff` state after a restart. +- Fixes a bug in Jiva where Jiva replica pods are stuck in `crashloopbackoff` state after a restart. - Fixes a bug in cStor target while rebuilding process in a single replica quorum case. - Fixes a bug in NDM for device detection in KVM-based virtual machines.. A new disk model `QEMU_HARDDISK` is added to the list of disk models. - Fixes a bug in NDM, where the os-disk filter was not able to exclude the blockdevices if the OS was installed on an NVMe device. @@ -57,7 +57,7 @@ sidebar_label: Releases 1.x ## 1.3.0 - Oct 15 2019 **Change summary:** -- Add support to scaleup replicas, replica movement across pools and replica replacement scenarios. This feature is in alpha state. This feature will work for cStor volumes which are created with existing SPC configuration. +- Add support to scale up replicas, replica movement across pools and replica replacement scenarios. This feature is in alpha state. This feature will work for cStor volumes which are created with existing SPC configuration. - Availability of NDM on different platforms like amd64 and arm64. NDM can now also be compiled in ARM architecture using manual steps. - Added support for provisioning CSI based volume using lease leader election API. - Support of running OpenEBS in Kubernetes 1.16 version. The k8s v1.16 release will stop serving the deprecated API versions in favour of newer and more stable API versions. @@ -90,7 +90,7 @@ sidebar_label: Releases 1.x - Fixes a bug in NDM where all devices on a node were getting excluded when os-disk-exclude-filter is failed to find the device where OS is installed. - Fixes a bug in snapshot controller where snapshot operation is not throwing any error for invalid `cas-type`. This fix will add `cas-type` validation before triggering the snapshot operations. The valid `cas-type` are cStor and Jiva. - Fixes the bug where more than required BlockDevicesClaims are created for requested SPC in auto pool method. -- Fixes an issue in maya-api installer to skip re-apply of default SPC and SC resources if they were installed previously by older version(s) of maya or prior to mayaapi-server restart(s) +- Fixes an issue in maya-api installer to skip re-apply of default SPC and SC resources if they were installed previously by older version(s) of maya or prior to maya-api-server restart(s) - Fixes a bug in cStor pool when cStor Storage Pool management creates pool if pool import failed when a disk is not accessible momentarily just at the time of import. cStor storage pool will be in the pending state when this scenario occurs. This PR will fix cStor pool creation by looking on `Status.Phase` as `Init` or `PoolCreationFailed` to create the pool. If `Status.Phase` is any other string, cStor Storage Pool management will try to import the pool. This can cause impact to the current workflow of Ephemeral disks, which works as of now, as NDM can't detect it as different disk and recognizes as the previous disk. - Fixes a bug during a cleanup operation performed on BlockDevice and clean up job is not getting canceled when the state of BlockDevice is changed from `Active` to other states. - Fixes a bug in NDM where cleanup jobs remain in pending state in Openshift cluster. The fix will add service account to cleanup jobs, so that clean-up job pods acquire privileged access to perform the action. diff --git a/docs/releases.md b/docs/releases.md index a56aa4ced..6adcf0c39 100644 --- a/docs/releases.md +++ b/docs/releases.md @@ -41,7 +41,7 @@ Here are some of the key highlights in this release. - Fixed an issue with cStor that was causing a crash in arm64 due to invalid uzfs zc_nvlist_dst_size handling. - Fixed an issue where NDM would not automatically de-activate a removed disk when GPTBasedUUID was enabled. - Fixes an issue where Rawfile Local PV volume deletion would not delete the provisioned PV folders. -- Fixes an issue which caused resize on btrs filesystems on top of Rawfile Local PV was failing. +- Fixes an issue which caused resize on btrfs filesystems on top of Rawfile Local PV was failing. - Several fixes to docs were also included in this release. @@ -210,7 +210,7 @@ Here are some of the key highlights in this release. ### Key Improvements -- Added support for specifying a custom node affinity label for OpenEBS Local Hostpath volumes. By default, OpenEBS Local Hostpath volumes use `kubenetes.io/hostname` for setting the PV Node Affinity. Users can now specify a custom label to use for PV Node Affinity. Custom node affinity can be specified in the Local PV storage class as follows: +- Added support for specifying a custom node affinity label for OpenEBS Local Hostpath volumes. By default, OpenEBS Local Hostpath volumes use `kubernetes.io/hostname` for setting the PV Node Affinity. Users can now specify a custom label to use for PV Node Affinity. Custom node affinity can be specified in the Local PV storage class as follows: ``` kind: StorageClass metadata: @@ -227,7 +227,7 @@ Here are some of the key highlights in this release. reclaimPolicy: Delete ``` This will help with use cases like: - - Deployments where `kubenetes.io/hostname` is not unique across the cluster (Ref: https://github.com/openebs/openebs/issues/2875) + - Deployments where `kubernetes.io/hostname` is not unique across the cluster (Ref: https://github.com/openebs/openebs/issues/2875) - Deployments, where an existing Kubernetes node in the cluster running Local volumes is replaced with a new node, and storage attached to the old node, is moved to a new node. Without this feature, the Pods using the older node will remain in the pending state. - Added a configuration option to the Jiva volume provisioner to skip adding replica node affinity. This will help in deployments where replica nodes are frequently replaced with new nodes causing the replica to remain in the pending state. The replica node affinity should be used in cases where replica nodes are not replaced with new nodes or the new node comes back with the same node-affinity label. (Ref: https://github.com/openebs/openebs/issues/3226). The node affinity for jiva volumes can be skipped by specifying the following ENV variable in the OpenEBS Provisioner Deployment. ``` @@ -246,7 +246,7 @@ Here are some of the key highlights in this release. ... autoSetTargetIP: "true" ``` - (Huge thanks to @zlymeda for working on this feature which involved co-ordinating this fix across multiple repositories). + (Huge thanks to @zlymeda for working on this feature which involved coordinating this fix across multiple repositories). - Enhanced the OpenEBS Velero plugin used to automatically create the target namespace during restore, if the target namespace doesn't exist. (Ref: https://github.com/openebs/velero-plugin/issues/137). - Enhanced the OpenEBS helm chart to support Image pull secrets. https://github.com/openebs/charts/pull/174 - Enhance OpenEBS helm chart to allow specifying resource limits on OpenEBS control plane pods. https://github.com/openebs/charts/issues/151 @@ -339,7 +339,7 @@ Here are some of the key highlights in this release. ### Key Improvements -- Added support for specifying a custom node affinity label for OpenEBS Local Hostpath volumes. By default, OpenEBS Local Hostpath volumes use `kubenetes.io/hostname` for setting the PV Node Affinity. Users can now specify a custom label to use for PV Node Affinity. Custom node affinity can be specified in the Local PV storage class as follows: +- Added support for specifying a custom node affinity label for OpenEBS Local Hostpath volumes. By default, OpenEBS Local Hostpath volumes use `kubernetes.io/hostname` for setting the PV Node Affinity. Users can now specify a custom label to use for PV Node Affinity. Custom node affinity can be specified in the Local PV storage class as follows: ``` kind: StorageClass metadata: @@ -356,7 +356,7 @@ Here are some of the key highlights in this release. reclaimPolicy: Delete ``` This will help with use cases like: - - Deployments where `kubenetes.io/hostname` is not unique across the cluster (Ref: https://github.com/openebs/openebs/issues/2875) + - Deployments where `kubernetes.io/hostname` is not unique across the cluster (Ref: https://github.com/openebs/openebs/issues/2875) - Deployments, where an existing Kubernetes node in the cluster running Local volumes is replaced with a new node, and storage attached to the old node, is moved to a new node. Without this feature, the Pods using the older node will remain in the pending state. - Added a configuration option to the Jiva volume provisioner to skip adding replica node affinity. This will help in deployments where replica nodes are frequently replaced with new nodes causing the replica to remain in the pending state. The replica node affinity should be used in cases where replica nodes are not replaced with new nodes or the new node comes back with the same node-affinity label. (Ref: https://github.com/openebs/openebs/issues/3226). The node affinity for jiva volumes can be skipped by specifying the following ENV variable in the OpenEBS Provisioner Deployment. ``` @@ -375,7 +375,7 @@ Here are some of the key highlights in this release. ... autoSetTargetIP: "true" ``` - (Huge thanks to @zlymeda for working on this feature which involved co-ordinating this fix across multiple repositories). + (Huge thanks to @zlymeda for working on this feature which involved coordinating this fix across multiple repositories). - Enhanced the OpenEBS Velero plugin used to automatically create the target namespace during restore, if the target namespace doesn't exist. (Ref: https://github.com/openebs/velero-plugin/issues/137). - Enhanced the OpenEBS helm chart to support Image pull secrets. https://github.com/openebs/charts/pull/174 - Enhance OpenEBS helm chart to allow specifying resource limits on OpenEBS control plane pods. https://github.com/openebs/charts/issues/151 @@ -582,7 +582,7 @@ Here are some of the key highlights in this release. ## 1.10.0 - May 15 2020 **New capabilities** -- The first release of OpenEBS Mayastor developed using NVMe based architecture, targetted at addressing performance requirements of IO-intensive workloads is ready for alpha testing. For detailed instructions on how to get started with Mayastor please refer to this [Quickstart guide](https://github.com/openebs/Mayastor/blob/master/doc/quick.md). +- The first release of OpenEBS Mayastor developed using NVMe based architecture, targeting at addressing performance requirements of IO-intensive workloads is ready for alpha testing. For detailed instructions on how to get started with Mayastor please refer to this [Quickstart guide](https://github.com/openebs/Mayastor/blob/master/doc/quick.md). - Enhancements to OpenEBS ZFS Local PV that includes resolving issues found during scale testing, fully functional CSI driver, and sample Grafana Dashboard for monitoring metrics on ZFS Volumes and Pools. For detailed instructions on how to get started with ZFS Local PV please refer to the [Quick start guide](https://github.com/openebs/zfs-localpv). **Key Improvements** @@ -681,7 +681,7 @@ Here are some of the key highlights in this release. - Enhance the logging mechanism for cStor pool pods during pool import time. These changes of logs will help to identify the existence of bad disk on the node. - Support for enabling core dump by adding an ENV variable ENABLE_COREDUMP= “1” for cStor pool pod to control whether cores need to be dumped in case of process crashes. By default, dumping cores will be disabled. Make sure this environment variable is not enabled if mountPoint of `SparseDir` has been changed in CAS Templates. - Enhance upgrade logs by providing pool and volume status information during the upgrade and also helps in estimating the time taken for deployment pods to come up. -- Improves Jiva rebuilding process by checkpointing the io numbers. Now only those snapshots will be synced which has less no of io’s. +- Improves Jiva rebuilding process by check pointing the io numbers. Now only those snapshots will be synced which has less no of io’s. - Fixes an issue with Jiva controller by removing WO replica if new replica with greater revision count get added to controller. - Disable core dump in NDM daemon by default. This can be enabled by setting an ENV variable `ENABLE_COREDUMP` to `1`. Core files will be stored inside /var/openebs/ndm/core. - Fixes issues in default core dumping location for NDM. System core pattern which is common for all processes on the node will not be modified. NDM will dump the cores in a location under openebs base directory. NDM process will be launched from the openebs directory, so core files will get automatically written to the $PWD, without requiring to change the core pattern. diff --git a/docs/t-cstor.md b/docs/t-cstor.md index 03288f1ae..760dc85cb 100644 --- a/docs/t-cstor.md +++ b/docs/t-cstor.md @@ -8,7 +8,7 @@ sidebar_label: cStor General guidelines for troubleshooting - Contact OpenEBS Community for support. -- Search for similar issues added in this troubleshootiung section. +- Search for similar issues added in this troubleshooting section. - Search for any reported issues on StackOverflow under OpenEBS tag
@@ -77,7 +77,7 @@ From the above highlighted logs, we can confirm `cstor-pool-mgmt` in new pod is When a cstor pool pod is deleted there are high chances that two cstor pool pods of same pool can present i.e old pool pod will be in `Terminating` state(which means not all the containers completely terminated) and new pool pod will be in `Running` state(might be few containers are in running state but not all). In this scenario `cstor-pool-mgmt` container in new pool pod is communicating with `cstor-pool` in old pool pod. This can cause CVR resource to set to `Invalid`. -**Note:** This issue has observed in all OpenEBS versions upto 1.2. +**Note:** This issue has observed in all OpenEBS versions up to 1.2. **Resolution:** diff --git a/docs/t-install.md b/docs/t-install.md index df541ea26..850b61adf 100644 --- a/docs/t-install.md +++ b/docs/t-install.md @@ -8,7 +8,7 @@ sidebar_label: Install General guidelines for troubleshooting - Contact OpenEBS Community for support. -- Search for similar issues added in this troubleshootiung section. +- Search for similar issues added in this troubleshooting section. - Search for any reported issues on StackOverflow under OpenEBS tag
@@ -19,7 +19,7 @@ sidebar_label: Install [iSCSI client is not setup on Nodes. Application Pod is in ContainerCreating state.](#install-failed-iscsi-not-configured) -[Why does OpenEBS provisioner pod restart continuously?](#openebs-provsioner-restart-continuously) +[Why does OpenEBS provisioner pod restart continuously?](#openebs-provisioner-restart-continuously) [OpenEBS installation fails on Azure](#install-failed-azure-no-rbac-set). @@ -71,7 +71,7 @@ This logs points that iscsid.service may not be enabled and running on your Node -

Why does OpenEBS provisioner pod restart continuously?

+

Why does OpenEBS provisioner pod restart continuously?

The following output displays the pod status of all namespaces in which the OpenEBS provisioner is restarting continuously. @@ -80,18 +80,18 @@ NAMESPACE NAME READY STATUS default percona 0/1 Pending 0 36m kube-system calico-etcd-tl4td 1/1 Running 0 1h 192.168.56.65 master kube-system calico-kube-controllers-84fd4db7cd-jz9wt 1/1 Running 0 1h 192.168.56.65 master -kube-system calico-node-5rqdl 2/2 Running 0 1h 192.168.56.65 master +kube-system calico-node-node1 2/2 Running 0 1h 192.168.56.65 master kube-system calico-node-zt95x 2/2 Running 0 1h 192.168.56.66 node -kube-system coredns-78fcdf6894-2plxb 1/1 Running 0 1h 192.168.219.65 master -kube-system coredns-78fcdf6894-gcjj7 1/1 Running 0 1h 192.168.219.66 master +kube-system coredns-78fcdf6894-2test 1/1 Running 0 1h 192.168.219.65 master +kube-system coredns-78fcdf6894-test7 1/1 Running 0 1h 192.168.219.66 master kube-system etcd-master 1/1 Running 0 1h 192.168.56.65 master kube-system kube-apiserver-master 1/1 Running 0 1h 192.168.56.65 master kube-system kube-controller-manager-master 1/1 Running 0 1h 192.168.56.65 master kube-system kube-proxy-9t98s 1/1 Running 0 1h 192.168.56.65 master kube-system kube-proxy-mwk9f 1/1 Running 0 1h 192.168.56.66 node kube-system kube-scheduler-master 1/1 Running 0 1h 192.168.56.65 master -openebs maya-apiserver-5598cf68ff-tndgm 1/1 Running 0 1h 192.168.167.131 node -openebs openebs-provisioner-776846bbff-rqfzr 0/1 CrashLoopBackOff 16 1h 192.168.167.129 node +openebs maya-apiserver-5598cf68ff-pod17 1/1 Running 0 1h 192.168.167.131 node +openebs openebs-provisioner-776846bbff-pod19 0/1 CrashLoopBackOff 16 1h 192.168.167.129 node openebs openebs-snapshot-operator-5b5f97dd7f-np79k 0/2 CrashLoopBackOff 32 1h 192.168.167.130 node ``` @@ -113,7 +113,7 @@ On AKS, while installing OpenEBS using Helm, you may see the following error. ``` $ helm install openebs/openebs --name openebs --namespace openebs -Error: release openebsfailed: clusterroles.rbac.authorization.k8s.io "openebs" isforbidden: attempt to grant extra privileges:[PolicyRule{Resources:["nodes"], APIGroups:["*"],Verbs:["get"]} PolicyRule{Resources:["nodes"],APIGroups:["*"], Verbs:["list"]}PolicyRule{Resources:["nodes"], APIGroups:["*"],Verbs:["watch"]} PolicyRule{Resources:["nodes/proxy"],APIGroups:["*"], Verbs:["get"]}PolicyRule{Resources:["nodes/proxy"], APIGroups:["*"],Verbs:["list"]} PolicyRule{Resources:["nodes/proxy"],APIGroups:["*"], Verbs:["watch"]}PolicyRule{Resources:["namespaces"], APIGroups:["*"],Verbs:["*"]} PolicyRule{Resources:["services"],APIGroups:["*"], Verbs:["*"]} PolicyRule{Resources:["pods"],APIGroups:["*"], Verbs:["*"]}PolicyRule{Resources:["deployments"], APIGroups:["*"],Verbs:["*"]} PolicyRule{Resources:["events"],APIGroups:["*"], Verbs:["*"]}PolicyRule{Resources:["endpoints"], APIGroups:["*"],Verbs:["*"]} PolicyRule{Resources:["persistentvolumes"],APIGroups:["*"], Verbs:["*"]} PolicyRule{Resources:["persistentvolumeclaims"],APIGroups:["*"], Verbs:["*"]}PolicyRule{Resources:["storageclasses"],APIGroups:["storage.k8s.io"], Verbs:["*"]}PolicyRule{Resources:["storagepools"], APIGroups:["*"],Verbs:["get"]} PolicyRule{Resources:["storagepools"], APIGroups:["*"],Verbs:["list"]} PolicyRule{NonResourceURLs:["/metrics"],Verbs:["get"]}] user=&{system:serviceaccount:kube-system:tiller6f3172cc-4a08-11e8-9af5-0a58ac1f1729 [system:serviceaccounts system:serviceaccounts:kube-systemsystem:authenticated] map[]} ownerrules=[]ruleResolutionErrors=[clusterroles.rbac.authorization.k8s.io"cluster-admin" not found] +Error: release openebs failed: clusterroles.rbac.authorization.k8s.io "openebs" isforbidden: attempt to grant extra privileges:[PolicyRule{Resources:["nodes"], APIGroups:["*"],Verbs:["get"]} PolicyRule{Resources:["nodes"],APIGroups:["*"], Verbs:["list"]}PolicyRule{Resources:["nodes"], APIGroups:["*"],Verbs:["watch"]} PolicyRule{Resources:["nodes/proxy"],APIGroups:["*"], Verbs:["get"]}PolicyRule{Resources:["nodes/proxy"], APIGroups:["*"],Verbs:["list"]} PolicyRule{Resources:["nodes/proxy"],APIGroups:["*"], Verbs:["watch"]}PolicyRule{Resources:["namespaces"], APIGroups:["*"],Verbs:["*"]} PolicyRule{Resources:["services"],APIGroups:["*"], Verbs:["*"]} PolicyRule{Resources:["pods"],APIGroups:["*"], Verbs:["*"]}PolicyRule{Resources:["deployments"], APIGroups:["*"],Verbs:["*"]} PolicyRule{Resources:["events"],APIGroups:["*"], Verbs:["*"]}PolicyRule{Resources:["endpoints"], APIGroups:["*"],Verbs:["*"]} PolicyRule{Resources:["persistentvolumes"],APIGroups:["*"], Verbs:["*"]} PolicyRule{Resources:["persistentvolumeclaims"],APIGroups:["*"], Verbs:["*"]}PolicyRule{Resources:["storageclasses"],APIGroups:["storage.k8s.io"], Verbs:["*"]}PolicyRule{Resources:["storagepools"], APIGroups:["*"],Verbs:["get"]} PolicyRule{Resources:["storagepools"], APIGroups:["*"],Verbs:["list"]} PolicyRule{NonResourceURLs:["/metrics"],Verbs:["get"]}] user=&{system:serviceaccount:kube-system:tiller6f3172cc-4a08-11e8-9af5-0a58ac1f1729 [system:serviceaccounts system:serviceaccounts:kube-systemsystem:authenticated] map[]} ownerrules=[]ruleResolutionErrors=[clusterroles.rbac.authorization.k8s.io"cluster-admin" not found] ``` **Troubleshooting** diff --git a/docs/t-jiva.md b/docs/t-jiva.md index f14c020b6..3872f5eb4 100644 --- a/docs/t-jiva.md +++ b/docs/t-jiva.md @@ -8,7 +8,7 @@ sidebar_label: Jiva General guidelines for troubleshooting - Contact OpenEBS Community for support. -- Search for similar issues added in this troubleshootiung section. +- Search for similar issues added in this troubleshooting section. - Search for any reported issues on StackOverflow under OpenEBS tag
diff --git a/docs/t-localpv.md b/docs/t-localpv.md index 5650602bd..0e4af1f0a 100644 --- a/docs/t-localpv.md +++ b/docs/t-localpv.md @@ -8,7 +8,7 @@ sidebar_label: LocalPV General guidelines for troubleshooting - Contact OpenEBS Community for support. -- Search for similar issues added in this troubleshootiung section. +- Search for similar issues added in this troubleshooting section. - Search for any reported issues on StackOverflow under OpenEBS tag
@@ -43,7 +43,7 @@ Warning FailedScheduling 7m24s (x2 over 7m24s) default-scheduler persistentv **Troubleshooting:** -Check if there is a blockdevice present on the node (to which the application pod was scheduled,) which mathces the capacity requirements of the PVC. +Check if there is a blockdevice present on the node (to which the application pod was scheduled,) which matches the capacity requirements of the PVC. ``` kubectl get bd -n openebs -o wide diff --git a/docs/t-mayastor.md b/docs/t-mayastor.md index 21b1a51cd..2c7545392 100644 --- a/docs/t-mayastor.md +++ b/docs/t-mayastor.md @@ -20,13 +20,13 @@ Sample output: ``` NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES mayastor-csi-7pg82 2/2 Running 0 15m 10.0.84.131 worker-2 -mayastor-csi-gmpq6 2/2 Running 0 15m 10.0.239.174 worker-1 +mayastor-csi-node5 2/2 Running 0 15m 10.0.239.174 worker-1 mayastor-csi-xrmxx 2/2 Running 0 15m 10.0.85.71 worker-0 -mayastor-qgpw6 1/1 Running 0 14m 10.0.85.71 worker-0 +mayastor-node6 1/1 Running 0 14m 10.0.85.71 worker-0 mayastor-qr84q 1/1 Running 0 14m 10.0.239.174 worker-1 -mayastor-xhmj5 1/1 Running 0 14m 10.0.84.131 worker-2 +mayastor-node1 1/1 Running 0 14m 10.0.84.131 worker-2 moac-b8f4446b5-r5gwk 3/3 Running 0 15m 10.244.2.2 worker-2 -nats-6fdd6dfb4f-swlm8 1/1 Running 0 16m 10.244.3.2 worker-0 +nats-6fdd6dfb4f-node2 1/1 Running 0 16m 10.244.3.2 worker-0 ``` @@ -77,7 +77,7 @@ a problem on the storage node. To obtain mayastor's log execute: ``` -kubectl -n mayastor logs mayastor-qgpw6 mayastor +kubectl -n mayastor logs mayastor-node6 mayastor ``` ### mayastor CSI agent's log file @@ -234,7 +234,7 @@ Type "apropos word" to search for commands related to "word"... [New LWP 14] [New LWP 16] [New LWP 18] -Core was generated by `/bin/mayastor -l0 -nnats'. +Core was generated by `/bin/mayastor -l0 -n nats'. Program terminated with signal SIGABRT, Aborted. #0 0x00007ffdad99fb37 in clock_gettime () [Current thread is 1 (LWP 13)] diff --git a/docs/t-ndm.md b/docs/t-ndm.md index 601f81a4a..62b6312c7 100644 --- a/docs/t-ndm.md +++ b/docs/t-ndm.md @@ -8,7 +8,7 @@ sidebar_label: NDM General guidelines for troubleshooting - Contact OpenEBS Community for support. -- Search for similar issues added in this troubleshootiung section. +- Search for similar issues added in this troubleshooting section. - Search for any reported issues on StackOverflow under OpenEBS tag
diff --git a/docs/t-uninstall.md b/docs/t-uninstall.md index e88f2f550..d89b65e59 100644 --- a/docs/t-uninstall.md +++ b/docs/t-uninstall.md @@ -8,7 +8,7 @@ sidebar_label: Uninstall General guidelines for troubleshooting - Contact OpenEBS Community for support. -- Search for similar issues added in this troubleshootiung section. +- Search for similar issues added in this troubleshooting section. - Search for any reported issues on StackOverflow under OpenEBS tag
diff --git a/docs/t-volume-provisioning.md b/docs/t-volume-provisioning.md index a2e04c77b..ab3aa01f6 100644 --- a/docs/t-volume-provisioning.md +++ b/docs/t-volume-provisioning.md @@ -8,7 +8,7 @@ sidebar_label: Volume Provisioning General guidelines for troubleshooting - Contact OpenEBS Community for support. -- Search for similar issues added in this troubleshootiung section. +- Search for similar issues added in this troubleshooting section. - Search for any reported issues on StackOverflow under OpenEBS tag
@@ -23,7 +23,7 @@ sidebar_label: Volume Provisioning [Application pod is stuck in ContainerCreating state after deployment](#application-pod-stuck-after-deployment) -[Creating cStor pool fails on CentOS when there are partitions on the disk](#cstor-pool-failed-centos-partion-disk) +[Creating cStor pool fails on CentOS when there are partitions on the disk](#cstor-pool-failed-centos-partition-disk) [Application pod enters CrashLoopBackOff state](#application-crashloopbackoff) @@ -59,7 +59,7 @@ Application sometimes complain about the underlying filesystem has become ReadOn This can happen for many reasons. - The cStor target pod is evicted because of resource constraints and is not scheduled within time -- Node is rebooted in adhoc manner (or unscheduled reboot) and Kubernetes is waiting for Kubelet to come backup to know that the node is rebooted and the pods on that node need to be rescheduled. Kubernetes can take upto 30 minutes as timeout before deciding the node does not comebackup and pods need to be rescheduled. During this time, the iSCSI initiator at the application pod has timeout and marked the underlying filesystem as ReadOnly +- Node is rebooted in adhoc manner (or unscheduled reboot) and Kubernetes is waiting for Kubelet to respond to know if the node is rebooted and the pods on that node need to be rescheduled. Kubernetes can take up to 30 minutes as timeout before deciding the node is going to stay offline and pods need to be rescheduled. During this time, the iSCSI initiator at the application pod has timeout and marked the underlying filesystem as ReadOnly - cStor target has lost quorum because of underlying node losses and target has marked the lun as ReadOnly Go through the Kubelet logs and application pod logs to know the reason for marking the ReadOnly and take appropriate action. [Maintaining volume quorum](/docs/next/k8supgrades.html) is necessary during Kubernetes node reboots. @@ -95,7 +95,7 @@ The setup environment where the issue occurs is rancher/rke with bare metal host ``` NAME READY STATUS RESTARTS AGE -nginx-deployment-57849d9f57-gvzkh 0/1 ContainerCreating 0 2m +nginx-deployment-57849d9f57-12345 0/1 ContainerCreating 0 2m pvc-adb79406-8e3e-11e8-a06a-001c42c2325f-ctrl-58dcdf997f-n4kd9 2/2 Running 0 8m pvc-adb79406-8e3e-11e8-a06a-001c42c2325f-rep-696b599894-gq4z6 1/1 Running 0 8m pvc-adb79406-8e3e-11e8-a06a-001c42c2325f-rep-696b599894-hwx52 1/1 Running 0 8m @@ -148,13 +148,13 @@ More details are mentioned [here](/docs/next/prerequisites.html#rancher). -

Creating cStor pool fails on CentOS when there are partitions on the disk.

+

Creating cStor pool fails on CentOS when there are partitions on the disk.

Creating cStor pool fails with the following error message: ``` -E0920 14:51:17.474702 8 pool.go:78] Unable to create pool: /dev/disk/by-id/ata-WDC_WD2500BPVT-00JJ +E0920 14:51:17.474702 8 pool.go:78] Unable to create pool: /dev/disk/by-id/ata-WDC_WD2500BOOM-00JJ ``` sdb and sdc are used for cStor pool creation. @@ -248,8 +248,7 @@ The procedure to ensure application recovery in the above cases is as follows: umount /var/lib/kubelet/plugins/kubernetes.io/iscsi/iface-default/10.39.241.26: 3260-iqn.2016-09.com.openebs.jiva:mongo-jiva-mongo-persistent-storage-mongo-0-3481266901-lun-0 - umount /var/lib/kubelet/pods/ae74da97-c852-11e8-a219-42010af000b6/volumes/kuber - netes.io~iscsi/mongo-jiva-mongo-persistent-storage-mongo-0-3481266901 + umount /var/lib/kubelet/pods/ae74da97-c852-11e8-a219-42010af000b6/volumes/kubernetes.io~iscsi/mongo-jiva-mongo-persistent-storage-mongo-0-3481266901 ``` 5. Identify whether the iSCSI session is re-established after failure. This can be verified using `iscsiadm -m session`, with the device mapping established using `iscsiadm -m session -P 3` and `fdisk -l`. **Note:** Sometimes, it is observed that there are stale device nodes (scsi device names) present on the Kubernetes node. Unless the logs confirm that a re-login has occurred after the system issues were resolved, it is recommended to perform the following step after doing a purge/logout of the existing session using `iscsiadm -m node -T -u`. @@ -440,7 +439,7 @@ Events: Warning FailedScheduling 58s (x2 over 59s) default-scheduler pod has unbound PersistentVolumeClaims (repeated 4 times) Normal Scheduled 58s default-scheduler Successfully assigned redis-master-0 to node0 Normal SuccessfulAttachVolume 58s attachdetach-controller AttachVolume.Attach succeeded for volume "pvc-a036d681-8fd4-11e8-ad96-de1a202c9007" - Normal SuccessfulMountVolume 55s kubelet, node0 MountVolume.SetUp succeeded for volume "default-token-ngjhh" + Normal SuccessfulMountVolume 55s kubelet, node0 MountVolume.SetUp succeeded for volume "default-token-12345" Warning FailedMount 24s (x4 over 43s) kubelet, node0 MountVolume.WaitForAttach failed for volume "pvc-a036d681-8fd4-11e8-ad96-de1a202c9007" : failed to get any path for iscsi disk, last err seen: iscsi: failed to sendtargets to portal 10.233.27.8:3260 output: iscsiadm: cannot make connection to 10.233.27.8: Connection refused iscsiadm: cannot make connection to 10.233.27.8: Connection refused @@ -510,7 +509,7 @@ By default admission webhook service has been configured to 443 port and the err

Unable to provision OpenEBS volume on DigitalOcean


-User is unable to provision cStor or jiva volume on DigitalcOcean, encountering error thrown from iSCSI PVs: +User is unable to provision cStor or jiva volume on DigitalOcean, encountering error thrown from iSCSI PVs:
``` diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 7f73ad0d2..fd63e4d99 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -97,7 +97,7 @@ Feb 06 06:11:38 kubelet[1063]: iscsiadm: failed to send SendTargets P The cause of high memory consumption of kubelet is mainly due to the following. -There are 3 modules are involved - cstor-isgt, kubelet and iscsiInitiator(iscsiadm). kubelet runs iscsiadm command to do discovery on cstor-istgt. If there is any delay in receiving response of discovery opcode (either due to network or delay in processing on target side), iscsiadm retries few times, and, gets into infinite loop dumping error messages as below: +There are 3 modules are involved - cstor-istgt, kubelet and iscsiInitiator(iscsiadm). kubelet runs iscsiadm command to do discovery on cstor-istgt. If there is any delay in receiving response of discovery opcode (either due to network or delay in processing on target side), iscsiadm retries few times, and, gets into infinite loop dumping error messages as below: iscsiadm: Connection to Discovery Address 127.0.0.1 failed iscsiadm: failed to send SendTargets PDU diff --git a/docs/ugcstor.md b/docs/ugcstor.md index e243a8a51..35bdb7740 100644 --- a/docs/ugcstor.md +++ b/docs/ugcstor.md @@ -263,7 +263,7 @@ This will not affect any `PersistentVolumeClaims` or `PersistentVolumes` that we OpenEBS volume can be backed up and restored along with the application using OpenEBS velero plugin. It helps the user for backing up the OpenEBS volumes to third party storage location and restore the data whenever it is required. The steps for taking backup and restore are as follows. -

Prerequisites

+

Prerequisites

- Latest tested Velero version is 1.4.0. - Create required storage provider configuration to store the backup. @@ -652,7 +652,7 @@ The application pods should be running as displayed below: ``` NAME READY STATUS RESTARTS AGE -busybox-66db7d9b88-kkktl 1/1 Running 0 2m16s +busybox-66db7d9b88-unique 1/1 Running 0 2m16s ```
  • @@ -798,9 +798,9 @@ The output will be similar to the following.
    NAME NODENAME SIZE CLAIMSTATE STATUS AGE -blockdevice-1c10eb1bb14c94f02a00373f2fa09b93 gke-ranjith-14-default-pool-da9e1336-mbq9 42949672960 Unclaimed Active 2m39s -blockdevice-77f834edba45b03318d9de5b79af0734 gke-ranjith-14-default-pool-da9e1336-d9zq 42949672960 Unclaimed Active 2m47s -blockdevice-936911c5c9b0218ed59e64009cc83c8f gke-ranjith-14-default-pool-da9e1336-9j2w 42949672960 Unclaimed Active 2m55s
    +blockdevice-1c10eb1bb14c94f02a00373f2fa09b93 gke-user-14-default-pool-da9e1336-mbq9 42949672960 Unclaimed Active 2m39s +blockdevice-77f834edba45b03318d9de5b79af0734 gke-user-14-default-pool-da9e1336-d9zq 42949672960 Unclaimed Active 2m47s +blockdevice-936911c5c9b0218ed59e64009cc83c8f gke-user-14-default-pool-da9e1336-9j2w 42949672960 Unclaimed Active 2m55s
  • The details of blockdevice can be get using the following command. @@ -933,9 +933,9 @@ kubectl get pod -n openebs | grep cstor-disk-pool Example Output: ``` -cstor-disk-pool-2gcb-64876b956b-q8fgp 3/3 Running 0 2m30s -cstor-disk-pool-9q2f-b85ccf6f-6cpdm 3/3 Running 0 2m30s -cstor-disk-pool-ilz1-5587ff79bf-6djjf 3/3 Running 0 2m31s +cstor-disk-pool-2gcb-64876b956b-61001 3/3 Running 0 2m30s +cstor-disk-pool-9q2f-b85ccf6f-61002 3/3 Running 0 2m30s +cstor-disk-pool-ilz1-5587ff79bf-61003 3/3 Running 0 2m31s ``` If all pods are showing are running, then you can use these cStor pools for creating cStor volumes. @@ -978,7 +978,7 @@ spec:

    PoolResourceRequests Policy

    -This feature allow you to specify pool resource requests that need to be available before scheduling the containers. If not specified, the default values are used. The following sample configuration will set memory as `2Gi` and ephemeral-stroage request value as `100Mi`. The memory will be shared for all the volume replicas that reside on a pool. The memory can be `2Gi` to `4Gi` per pool on a given node for better performance. These values can be changed as per the node configuration for better performance. The below configuration also set the `cstor-pool` container with `100Mi` as `ephemeral-storage` requests which will avoid erraneous eviction by K8s. +This feature allow you to specify pool resource requests that need to be available before scheduling the containers. If not specified, the default values are used. The following sample configuration will set memory as `2Gi` and ephemeral-storage request value as `100Mi`. The memory will be shared for all the volume replicas that reside on a pool. The memory can be `2Gi` to `4Gi` per pool on a given node for better performance. These values can be changed as per the node configuration for better performance. The below configuration also set the `cstor-pool` container with `100Mi` as `ephemeral-storage` requests which will avoid erroneous eviction by K8s. ``` apiVersion: openebs.io/v1alpha1 @@ -1000,7 +1000,7 @@ spec:

    Tolerations

    -cStor pool pods can be ensure that pods are not scheduled onto inappropriate nodes. This can be acheived using taint and tolerations method. If Nodes are tainted to schedule the pods which are tolerating the taint, then cStor pool pods also can be scheduled using this method. Tolerations are applied to cStor pool pods, and allow (but do not require) the pods to schedule onto nodes with matching taints. +cStor pool pods can be ensure that pods are not scheduled onto inappropriate nodes. This can be achieved using taint and tolerations method. If Nodes are tainted to schedule the pods which are tolerating the taint, then cStor pool pods also can be scheduled using this method. Tolerations are applied to cStor pool pods, and allow (but do not require) the pods to schedule onto nodes with matching taints. ``` apiVersion: openebs.io/v1alpha1 @@ -1052,7 +1052,7 @@ metadata:

    AuxResourceRequests Policy

    -The below configuration will set the cstor-pool side-cars with memory as `0.5Gi`, cpu as `100m`. This also set the `cstor-pool` side-cars with ephemeral-storage request `50Mi` which will avoid erraneous eviction by K8s. +The below configuration will set the cstor-pool side-cars with memory as `0.5Gi`, cpu as `100m`. This also set the `cstor-pool` side-cars with ephemeral-storage request `50Mi` which will avoid erroneous eviction by K8s. ``` apiVersion: openebs.io/v1alpha1 @@ -1324,7 +1324,7 @@ You can specify the *TargetResourceRequests* to specify resource requests that n apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: opeenbs-sc-tgt-request + name: openebs-sc-tgt-request annotations: cas.openebs.io/config: | - name: TargetResourceRequests @@ -1406,7 +1406,7 @@ The configuration for implementing this policy is different for deployment and S
    For StatefulSet Applications
    -In the case of provisioning StatfulSet applications with replication factor greater than "1" and volume replication factor equal to "1", for a given OpenEBS volume, target and replica related to that volume should be scheduled on the same node where the application pod resides. This feature can be achieved by using either of the following approaches. +In the case of provisioning StatefulSet applications with replication factor greater than "1" and volume replication factor equal to "1", for a given OpenEBS volume, target and replica related to that volume should be scheduled on the same node where the application pod resides. This feature can be achieved by using either of the following approaches. **Approach 1:** @@ -1729,7 +1729,7 @@ cStorPools can be horizontally scaled when needed typically when a new Kubernete The steps for expanding the pool to new nodes is given below. -

    With specifiying blockDeviceList

    +

    With specifying blockDeviceList

    If you are following this approach, you should have created cStor Pool initially using the steps provided [here](/docs/next/ugcstor.html#creating-cStor-storage-pools). For expanding pool onto a new OpenEBS node, you have to edit corresponding pool configuration(SPC) YAML with the required block device names under the `blockDeviceList` . **Step 1:** Edit the existing pool configuration spec that you originally used and apply it (OR) directly edit the in-use spec file using `kubectl edit spc `. @@ -1855,7 +1855,7 @@ This section provide the steps for scaling up the replica of a cStor volume. demo-vol1-claim Bound pvc-3f86fcdf-02f6-11ea-b0f6-42010a8000f8 500Gi RWO openebs-sc-cstor 3h18m - From the above output, get `VOLUME` name and use in the following command to get the details of corresponding cStor volume. All commands are peformed by considering above PVC. + From the above output, get `VOLUME` name and use in the following command to get the details of corresponding cStor volume. All commands are performed by considering above PVC. Get the details of cStor volume details using the following command: @@ -1886,7 +1886,7 @@ This section provide the steps for scaling up the replica of a cStor volume. 3. Perform the following command to get complete details of the existing cStor volume replica: ``` - kubectl get cvr pvc-3f86fcdf-02f6-11ea-b0f6-42010a8000f8-cstor-disk-pool-hgt4 -n openebs -oyaml + kubectl get cvr pvc-3f86fcdf-02f6-11ea-b0f6-42010a8000f8-cstor-disk-pool-hgt4 -n openebs -o yaml ``` Example snippet of output: @@ -1896,7 +1896,7 @@ This section provide the steps for scaling up the replica of a cStor volume. kind: CStorVolumeReplica metadata: annotations: - cstorpool.openebs.io/hostname: gke-ranjith-scaleup-default-pool-48c9bf17-tb7w + cstorpool.openebs.io/hostname: gke-user-cluster-default-pool-48c9bf17-tb7w isRestoreVol: "false" openebs.io/storage-class-ref: | name: openebs-sc-cstor @@ -1980,7 +1980,7 @@ This section provide the steps for scaling up the replica of a cStor volume. 6. Perform the following command to get the details of the cStor Pool where new replica will be created: ``` - kubectl get csp -n openebs cstor-disk-pool-2phf -oyaml + kubectl get csp -n openebs cstor-disk-pool-2phf -o yaml ``` Example snippet of output: @@ -1990,11 +1990,11 @@ This section provide the steps for scaling up the replica of a cStor volume. kind: CStorPool metadata: annotations: - openebs.io/csp-lease: '{"holder":"openebs/cstor-disk-pool-2phf-5d68b6b7ff-nbslc","leaderTransition":1}' + openebs.io/csp-lease: '{"holder":"openebs/cstor-disk-pool-2phf-5d68b6b7ff-12345","leaderTransition":1}' creationTimestamp: "2019-11-09T13:28:17Z" generation: 2196 labels: - kubernetes.io/hostname: gke-ranjith-scaleup-default-pool-48c9bf17-tjvs + kubernetes.io/hostname: gke-user-cluster-default-pool-48c9bf17-1234 openebs.io/cas-template-name: cstor-pool-create-default-1.4.0 openebs.io/cas-type: cstor openebs.io/storage-pool-claim: cstor-disk-pool @@ -2025,7 +2025,7 @@ This section provide the steps for scaling up the replica of a cStor volume. kind: CStorVolumeReplica metadata: annotations: - cstorpool.openebs.io/hostname: + cstorpool.openebs.io/hostname: isRestoreVol: "false" openebs.io/storage-class-ref: | name: @@ -2054,7 +2054,7 @@ This section provide the steps for scaling up the replica of a cStor volume. current: ``` - - ****: Kubernetes node name where cStor pool exists and new CVR will be created on this Node. This can be obtained from step 6. + - ****: Kubernetes node name where cStor pool exists and new CVR will be created on this Node. This can be obtained from step 6. - **:** Storageclass name used to create the cStor volume. It is also available in any existing CVR. This can be obtained from step 3. @@ -2068,11 +2068,11 @@ This section provide the steps for scaling up the replica of a cStor volume. - ****: Name of cStor volume. This can be get from step 3. - - **-**: This is the newe CVR name which is going to be created. This should be named as a combination of particular cStor volume name and identified cStor pool name. This can be get from step 3 and step 6. + - **-**: This is the new CVR name which is going to be created. This should be named as a combination of particular cStor volume name and identified cStor pool name. This can be get from step 3 and step 6. - ****: Capacity of the cStor volume. This can be get from step 3. - - ****: `Targetip` of corresponding cStor volume. This can be get from step 3. + - ****: Target IP of corresponding cStor volume. This can be got from step 3. - ****: It is the unique value referred to as `replicaid` in the whole cluster. This can be generated by running the following command: @@ -2100,7 +2100,7 @@ This section provide the steps for scaling up the replica of a cStor volume. kind: CStorVolumeReplica metadata: annotations: - cstorpool.openebs.io/hostname: gke-ranjith-scaleup-default-pool-48c9bf17-tjvs + cstorpool.openebs.io/hostname: gke-user-cluster-default-pool-48c9bf17-1234 isRestoreVol: "false" openebs.io/storage-class-ref: | name: openebs-sc-cstor @@ -2186,7 +2186,7 @@ This section provide the steps for scaling up the replica of a cStor volume. cstorvolume.openebs.io/pvc-3f86fcdf-02f6-11ea-b0f6-42010a8000f8 edited -11. Verify if the rebuilding has started on new replica of the cStor volume. Once rebuilding has completed, it will update its `STATUS` as `Healthy`. Get the latest status of the CVRs using the folloiwng command: +11. Verify if the rebuilding has started on new replica of the cStor volume. Once rebuilding has completed, it will update its `STATUS` as `Healthy`. Get the latest status of the CVRs using the following command: ``` kubectl get cvr -n openebs @@ -2255,7 +2255,7 @@ This section provide the steps for scaling down the replica of a cStor volume. - All the other cStor volume replicas(CVR) should be in `Healthy` state except the cStor volume replica that is going to deleted(i.e deleting CVR can be in any state). -- There shouldn't be any ongoing scaleup process. Verify that `replicationFactor` should be equal to the `desiredReplicationFactor` from corresponding cStor volume CR specification. +- There shouldn't be any ongoing scale up process. Verify that `replicationFactor` should be equal to the `desiredReplicationFactor` from corresponding cStor volume CR specification. **Notes to remember:** @@ -2276,7 +2276,7 @@ This section provide the steps for scaling down the replica of a cStor volume. kubectl get pvc ``` - From the output of above command, get `VOLUME` name and use in the following command to get the details of corresponding cStor volume. All commands are peformed by considering above PVC. + From the output of above command, get `VOLUME` name and use in the following command to get the details of corresponding cStor volume. All commands are performed by considering above PVC. ``` kubectl get cstorvolume -n openebs -l openebs.io/persistent-volume=pvc-ed6e893a-051d-11ea-a786-42010a8001c9 @@ -2307,7 +2307,7 @@ This section provide the steps for scaling down the replica of a cStor volume. 2. Identify the cStor volume replica from above output which needs to be removed. Then, perform the following command to get the `replicaid` of the corresponding cStor volume replica. In this example, identified cStor volume replica is `pvc-ed6e893a-051d-11ea-a786-42010a8001c9-cstor-disk-pool-c0tw`. ``` - kubectl get cvr pvc-ed6e893a-051d-11ea-a786-42010a8001c9-cstor-disk-pool-c0tw -n openebs -oyaml | grep -i replicaid + kubectl get cvr pvc-ed6e893a-051d-11ea-a786-42010a8001c9-cstor-disk-pool-c0tw -n openebs -o yaml | grep -i replicaid ``` Example snippet: @@ -2369,15 +2369,15 @@ This section provide the steps for scaling down the replica of a cStor volume. Example snippet of output:
    - Normal Healthy 18m pvc-ed6e893a-051d-11ea-a786-42010a8001c9-target-58d76bdbd-95hdh, gke-ranjith-scaledown-default-pool-0dece219-jt3d Volume is in Healthy state - Warning FailUpdate 92s (x4 over 22m) pvc-ed6e893a-051d-11ea-a786-42010a8001c9-target-58d76bdbd-95hdh, gke-ranjith-scaledown-default-pool-0dece219-jt3d Ignoring changes on volume pvc-ed6e893a-051d-11ea-a786-42010a8001c9 - Normal Updated 92s pvc-ed6e893a-051d-11ea-a786-42010a8001c9-target-58d76bdbd-95hdh, gke-ranjith-scaledown-default-pool-0dece219-jt3d Successfully updated the desiredReplicationFactor to 2 + Normal Healthy 18m pvc-ed6e893a-051d-11ea-a786-42010a8001c9-target-58d76bdbd-95hdh, gke-user-cluster-default-pool-0dece219-jt3d Volume is in Healthy state + Warning FailUpdate 92s (x4 over 22m) pvc-ed6e893a-051d-11ea-a786-42010a8001c9-target-58d76bdbd-95hdh, gke-user-cluster-default-pool-0dece219-jt3d Ignoring changes on volume pvc-ed6e893a-051d-11ea-a786-42010a8001c9 + Normal Updated 92s pvc-ed6e893a-051d-11ea-a786-42010a8001c9-target-58d76bdbd-95hdh, gke-user-cluster-default-pool-0dece219-jt3d Successfully updated the desiredReplicationFactor to 2
    Verify the updated details of cStor volume using the following command: ``` - kubectl get cstorvolume pvc-ed6e893a-051d-11ea-a786-42010a8001c9 -n openebs -oyaml + kubectl get cstorvolume pvc-ed6e893a-051d-11ea-a786-42010a8001c9 -n openebs -o yaml ``` Example snippet of output: diff --git a/docs/uglocalpv-device.md b/docs/uglocalpv-device.md index 77a61881e..89002f154 100644 --- a/docs/uglocalpv-device.md +++ b/docs/uglocalpv-device.md @@ -15,7 +15,7 @@ This guide will help you to set up and use OpenEBS Local Persistent Volumes back *OpenEBS Dynamic Local PV provisioner* can create Kubernetes Local Persistent Volumes using block devices available on the node to persist data, hereafter referred to as *OpenEBS Local PV Device* volumes. -*OpenEBS Local PV Device* volumes have the following advantages compared to native Kubernetes Local Peristent Volumes. +*OpenEBS Local PV Device* volumes have the following advantages compared to native Kubernetes Local Persistent Volumes. - Dynamic Volume provisioner as opposed to a Static Provisioner. - Better management of the Block Devices used for creating Local PVs by OpenEBS NDM. NDM provides capabilities like discovering Block Device properties, setting up Device Filters, metrics collection and ability to detect if the Block Devices have moved across nodes. @@ -102,14 +102,14 @@ You can skip this section if you have already installed OpenEBS. Helm key: helper.image - - Specify the list of block devices for which BlockDevice CRs must be created. A comma seperated values of path regular expressions can be specified. + - Specify the list of block devices for which BlockDevice CRs must be created. A comma separated values of path regular expressions can be specified.
    Default value: all YAML specification: data."node-disk-manager.config".filterconfigs.key["path-filter"].include on ConfigMap(openebs-ndm-config) Helm key: ndm.filters.includePaths
    - - Specify the list of block devices for which BlockDevice CRs must not be created. A comma seperated values of path regular expressions can be specified. + - Specify the list of block devices for which BlockDevice CRs must not be created. A comma separated values of path regular expressions can be specified.
    Default value: "loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md" YAML specification: data."node-disk-manager.config".filterconfigs.key["path-filter"].exclude on ConfigMap(openebs-ndm-config) @@ -126,7 +126,7 @@ You can skip this section if you have already installed OpenEBS. ``` :::note - If you would like to use only Local PV (hostpath and device), you can install a lite verison of OpenEBS using the following command. + If you would like to use only Local PV (hostpath and device), you can install a lite version of OpenEBS using the following command. kubectl apply -f https://openebs.github.io/charts/openebs-operator-lite.yaml kubectl apply -f https://openebs.github.io/charts/openebs-lite-sc.yaml @@ -316,13 +316,13 @@ By default, Local PV volume will be provisioned with volumeMode as filesystem. I kubectl describe pod hello-local-device-pod ``` - The output shows that the Pod is running on `Node: gke-kmova-helm-default-pool-3a63aff5-1tmf` and using the peristent volume provided by `local-describe-pvc`. + The output shows that the Pod is running on `Node: gke-user-helm-default-pool-3a63aff5-1tmf` and using the persistent volume provided by `local-describe-pvc`.
    Name: hello-local-device-pod Namespace: default Priority: 0 - Node: gke-kmova-helm-default-pool-92abeacf-89nd/10.128.0.16 + Node: gke-user-helm-default-pool-92abeacf-89nd/10.128.0.16 Start Time: Thu, 16 Apr 2020 17:56:04 +0000 ... Volumes: @@ -383,7 +383,7 @@ By default, Local PV volume will be provisioned with volumeMode as filesystem. I - key: kubernetes.io/hostname operator: In values: - - gke-kmova-helm-default-pool-92abeacf-89nd + - gke-user-helm-default-pool-92abeacf-89nd persistentVolumeReclaimPolicy: Delete storageClassName: local-device volumeMode: Filesystem @@ -414,7 +414,7 @@ A few important characteristics of a *OpenEBS Local PV* can be seen from the abo ``` kubectl get bd -n openebs blockdevice-d1ef1e1b9dccf224e000c6f2e908c5f2 -o yaml ``` - The output shows that the BD is on the node `spec.nodeAttributes.nodeName: gke-kmova-helm-default-pool-92abeacf-89nd`. + The output shows that the BD is on the node `spec.nodeAttributes.nodeName: gke-user-helm-default-pool-92abeacf-89nd`.
    apiVersion: openebs.io/v1alpha1 @@ -456,7 +456,7 @@ A few important characteristics of a *OpenEBS Local PV* can be seen from the abo fsType: ext4 mountPoint: /mnt/disks/ssd0 nodeAttributes: - nodeName: gke-kmova-helm-default-pool-92abeacf-89nd + nodeName: gke-user-helm-default-pool-92abeacf-89nd partitioned: "No" path: /dev/sdb status: @@ -500,7 +500,7 @@ The following steps assume that you already have Velero with Restic integration The following steps will help you to prepare and backup the data from the volume created for the example pod (`hello-local-device-pod`), with the volume mount (`local-storage`). -1. Prepare the application pod for backup. Velero uses Kubernetes labels to select the pods that need to be backed up. Velero uses annotation on the pods to determine which volumes need to be backed up. For the example pod launched in this guide, you can inform velero to backup by specifing the following label and annotation. +1. Prepare the application pod for backup. Velero uses Kubernetes labels to select the pods that need to be backed up. Velero uses annotation on the pods to determine which volumes need to be backed up. For the example pod launched in this guide, you can inform velero to backup by specifying the following label and annotation. ``` kubectl label pod hello-local-device-pod app=test-velero-backup diff --git a/docs/uglocalpv-hostpath.md b/docs/uglocalpv-hostpath.md index df2d36613..ddcd1b401 100644 --- a/docs/uglocalpv-hostpath.md +++ b/docs/uglocalpv-hostpath.md @@ -108,7 +108,7 @@ You can skip this section if you have already installed OpenEBS. ``` :::note - If you would like to use only Local PV (hostpath and device), you can install a lite verison of OpenEBS using the following command. + If you would like to use only Local PV (hostpath and device), you can install a lite version of OpenEBS using the following command. kubectl apply -f https://openebs.github.io/charts/openebs-operator-lite.yaml kubectl apply -f https://openebs.github.io/charts/openebs-lite-sc.yaml @@ -204,7 +204,7 @@ Once you have installed OpenEBS, verify that *OpenEBS Local PV provisioner* is r openebs-localpv-provisioner-5ff697f967-nb7f4 1/1 Running 0 2m49s
    -2. To verify *OpenEBS Local PV Hostpath* Storageclass is created, execute the following command. +2. To verify *OpenEBS Local PV Hostpath* StorageClass is created, execute the following command. ``` kubectl get sc @@ -257,7 +257,7 @@ The next step is to create a PersistentVolumeClaim. Pods will use PersistentVolu local-hostpath-pvc Pending openebs-hostpath 3m7s
    -## Create Pod to consume OpenEBS Local PV Hospath Storage +## Create Pod to consume OpenEBS Local PV Hostpath Storage 1. Here is the configuration file for the Pod that uses Local PV. Save the following Pod definition to `local-hostpath-pod.yaml`. @@ -309,13 +309,13 @@ The next step is to create a PersistentVolumeClaim. Pods will use PersistentVolu kubectl describe pod hello-local-hostpath-pod ``` - The output shows that the Pod is running on `Node: gke-kmova-helm-default-pool-3a63aff5-1tmf` and using the peristent volume provided by `local-hostpath-pvc`. + The output shows that the Pod is running on `Node: gke-user-helm-default-pool-3a63aff5-1tmf` and using the persistent volume provided by `local-hostpath-pvc`.
    Name: hello-local-hostpath-pod Namespace: default Priority: 0 - Node: gke-kmova-helm-default-pool-3a63aff5-1tmf/10.128.0.28 + Node: gke-user-helm-default-pool-3a63aff5-1tmf/10.128.0.28 Start Time: Thu, 16 Apr 2020 17:56:04 +0000 ... Volumes: @@ -376,7 +376,7 @@ The next step is to create a PersistentVolumeClaim. Pods will use PersistentVolu - key: kubernetes.io/hostname operator: In values: - - gke-kmova-helm-default-pool-3a63aff5-1tmf + - gke-user-helm-default-pool-3a63aff5-1tmf persistentVolumeReclaimPolicy: Delete storageClassName: openebs-hostpath volumeMode: Filesystem @@ -418,7 +418,7 @@ The following steps assume that you already have Velero with Restic integration The following steps will help you to prepare and backup the data from the volume created for the example pod (`hello-local-hostpath-pod`), with the volume mount (`local-storage`). -1. Prepare the application pod for backup. Velero uses Kubernetes labels to select the pods that need to be backed up. Velero uses annotation on the pods to determine which volumes need to be backed up. For the example pod launched in this guide, you can inform velero to backup by specifing the following label and annotation. +1. Prepare the application pod for backup. Velero uses Kubernetes labels to select the pods that need to be backed up. Velero uses annotation on the pods to determine which volumes need to be backed up. For the example pod launched in this guide, you can inform velero to backup by specifying the following label and annotation. ``` kubectl label pod hello-local-hostpath-pod app=test-velero-backup diff --git a/docs/ugndm.md b/docs/ugndm.md index 2dc973a43..fbc7b860d 100644 --- a/docs/ugndm.md +++ b/docs/ugndm.md @@ -100,7 +100,7 @@ filterconfigs: **Note:** -- Regex support is not available on the `filterconfigs` in NDM `Configmap` and the Configmap is applicable to cluster level. This means, if user provide `/dev/sdb` in configmap as an exlcuded filter, then all `/dev/sdb` blockdevices from all nodes in the cluster will be excluded by NDM. +- Regex support is not available on the `filterconfigs` in NDM `Configmap` and the Configmap is applicable to cluster level. This means, if user provide `/dev/sdb` in configmap as an excluded filter, then all `/dev/sdb` blockdevices from all nodes in the cluster will be excluded by NDM. - It is recommended to use OpenEBS provisioner alone in the cluster. If you are using other storage provider provisioner like `gce-pd` along with OpenEBS, use exclude filters to avoid those disks from being consumed by OpenEBS. For example, if you are using the `standard` storage class in GKE with storage provisioner as **kubernetes.io/gce-pd**, and when it creates a PVC, a GPD is attached to the node. This GPD will be detected by NDM and it may be used by OpenEBS for provisioning volume. To avoid this scenario, it is recommended to put the associated device path created on the node in the **exclude** field under **path-filter**. If GPD is attached as `/dev/sdc` , then add `/dev/sdc` in the above mentioned field. diff --git a/docs/uninstall.md b/docs/uninstall.md index d81f1325a..298942e4f 100644 --- a/docs/uninstall.md +++ b/docs/uninstall.md @@ -63,7 +63,7 @@ The recommended steps to uninstall the OpenEBS cluster gracefully is as follows. kubectl get bd -n ``` - If present, remove the finalizer entry from the corresponding BD and then delete it. To remove the finazlier, use the following command + If present, remove the finalizer entry from the corresponding BD and then delete it. To remove the finalizer, use the following command ``` kubectl patch -n openebs bd -p '{"metadata":{"finalizers":null}}' --type=merge ``` diff --git a/hack/cspell-contributors.txt b/hack/cspell-contributors.txt new file mode 100644 index 000000000..15a0e7b63 --- /dev/null +++ b/hack/cspell-contributors.txt @@ -0,0 +1,14 @@ +Hightower +Nehalem +Optoro +Thakur +akhilerm +allenhaozi +bajpai +datacore +doriftoshoes +prateekpandey +ranjith +shubham +sonasingh +zlymeda diff --git a/hack/cspell-ignore.txt b/hack/cspell-ignore.txt new file mode 100644 index 000000000..e68a9ba3d --- /dev/null +++ b/hack/cspell-ignore.txt @@ -0,0 +1,50 @@ +CSFFFSHBMM +HMAC +Mmxha +MYTV +PBKDF +PBKDF +Shmem +Surp +Rsvd +WSREP +ZHZX +Zuvp +divcol +divrow +empid +fwxr +gpdpool +hjid +hjsv +installnamespace +jbfm +jcgwc +jlgc +kkpk +lbtqt +lcfz +ljcc +ltdsf +mayalab +mxqcf +myproject +nuyzam +openebspsp +nhwb +privilegedpsp +ptxss +pvcspc +pxdbc +rclm +rhfbk +sbtest +sjvtq +testns +xvswn +znbr +zpvhh + + + + diff --git a/hack/cspell-words.txt b/hack/cspell-words.txt new file mode 100644 index 000000000..396675d67 --- /dev/null +++ b/hack/cspell-words.txt @@ -0,0 +1,475 @@ +Autobind +Basepath +BLOCKDEVICENAME +Bing +Bundler's +Codeblock +CLAIMSTATE +CLOUDBYT +CNCF +Coredumps +COREFILE +CSPC +Ctype +Datacenter +Dependabot +ENTRYPOINT +ERRORED +ESNEXT +Exps +FIXURL +Fluentd +GOOG +GOPATH +GOROOT +Galera +Gemfile +Gitaly +Glusterfs +Grafana +Gridworkz +Hacktoberfest +HARDDISK +Heptio +HOSTPATH +Hostpaths +Hotjar +HSLA +HSVA +HWaddress +Hyperconverged +IANA +Initiatorname +Interop +INPROGRESS +Jekylling +Jupyter +Keyspace +Konvoy +Kramdown +Kubera +Kubernetes +Kustomize +LOCALPV +Lunr +Luworkers +MaxXmitDataSegmentLength +Mayastor +Minio +MOAC +MOUNTPOINT +NATS +Netdev +NODENAME +NUOCA +NUOCMD +Nutanix +OnGres +OpenEBS +Percona +Postgres +Preformat +PYTHONPATH +RAIDZ +Rawfile +Restic +SIGABRT +SIGILL +StackGres +Staticman's +STORAGECLASS +Targ +TSDB +QUICKSTART +VARCHAR +VARDIR +Unavail +UNPARKER +Weavescope +WORKERNODE +XtraBackup +XtraDB +acanthopterygious +accesskey +activitybar +algolia +alphafeatures +alloc +agentpool +anonymize +apiextensions +apiserver +appid +appnode +arraylength +attachdetach +automount +autoscaler +autoupdate +azureedge +azureuser +backtraces +backupstoragelocation +behaviour +blockdevice +blockdeviceclaim +blockdeviceclaims +blockdevices +blockstore +btrfs +cachefile +casengines +castemplates +centos +ceph +cfml +cloudvm +clustercheck +clusterrole +clusterrolebinding +clusterroles +checkpointedIOSeq +colorscheme +configmap +configmaps +coredns +coredump +coredumpctl +coredumps +crashloopbackoff +creds +crew +csidriver +csidrivers +csinodeinfos +cspi +cstor +cStor +cStor's +cstorbackups +cstorbackupcompleted +cstorcompletedbackups +cstorpoolclusters +cstorpoolinstances +cstorpool +cstorpools +cstorrestores +cstorvolume +cstorvolumeattachments +cstorvolumeclaims +cstorvolumeconfigs +cstorvolumepolicies +cstorvolumereplica +cstorvolumereplicas +cstorvolumes +customresourcedefinitions +cqlsh +daemonset +darkblue +datadir +dcopy +deploymentconfig +devlinks +devname +devpath +devtool +devtools +dict +diskfilter +distros +disqus +dmsetup +docker +dockerd +doks +dontimport +downlevel +eCryptfs +elasticdb +enablement +endhighlight +envs +epoll +excluded +fallocate +fdisk +filesyscheck +filesystems +filterconfigs +finalizer +finalizers +fstype +fsuuid +gcloud +gcrio +gcpbucket +gettime +gemfiles +gimsuy +githubusercontent +gitlab +gotgt +gtag +healthz +hostdir +hostnames +hostpathbkp +htaccess +httpserver +hugepages +hyperconvergence +hyperkube +iface +importattr +inode +inodes +iscsi +iscsid +iscsiadm +isforbidden +istgt +jsonpath +jekyllrb +jemoji +jivaguide +journalctl +knownreplicas +konvoy +kotlinscript +krew +kube +kubeconfig +kubectl +kubelet +languageclient +leaderelection +lerna +libc +libcrypto +libdl +libgcc +libisns +libopeniscsiusr +libpthread +libspdk +libudev +libuzfs +lightblue +lightbulb +lightgrey +livenesstimestamp +loadgen +localprovisioner +localpv +lockin +logdir +logfile +logtostderr +logind +lowtco +luks +luworkers +mademistakes +malloc +masterless +maxpools +mayactl +mayadata +mayastor +mayastornodes +mayastorpools +mayastorvolumes +mayastor's +mayastornode +mayastorpool +mayastorvolume +microk8s +microservice +minification +minikube +minio +mkdir +mgmt +mmistakes +moac +moac's +mobx +mountpoints +msvalidate +multipath +multipathing +multipaths +multiroot +mutex +myvariable +nats +naver +netlink +newbackup +newbucket +nocrypto +nodeselector +nodetool +nodetype +nohoist +nospace +nuoadmin +nuodb +nuodocker +nuoinsights +nuopython +nuosm +nuote +nvlist +nvme +nvmf +oltp +onfinally +openapi +openebs +openshift +operatorhub +optin +ownerrules +paginator +parallelly +patroni +patronictl +percona +perconalab +permalinks +persistentvolume +persistentvolumeclaim +persistentvolumes +persistentvolumeclaims +pluginproxy +pmmdata +pmmserver +podname +podsecuritypolicies +podsecuritypolicy +poolname +poolpod +postgres +postgresql +probeconfigs +proxyadmin +privilegedpsp +psql +pthread +pynuoadmin +quotemark +ramdisks +raftlog +rebalancing +rebootmgr +rebootmgrd +relatime +replicacount +replicaid +resizer +restapi +restructuredtext +rmwc +roadmap +rootdir +runtasks +sbin +scrollbar +sched +schedulable +seachest +seccomp +secretkey +securitycontextconstraints +selfsubjectaccessreviews +selfsubjectrulesreviews +sendtargets +serviceaccount +serviceaccounts +sgcluster +shortname +shrinkwrap +signoff +snapshotter +sourcemap +squigglies +statefulset +statefulsets +staticman +staticmanapp +storageclass +storageclasses +streetsidesoftware +stylesheets +storagepool +storagepoolclaims +storagepools +subfolder +subpath +supgrades +swaggerapi +sysbench +sysroot +systemsystem +targetip +targetselector +textdocument +timeo +tolerations +toupper +tsih +tslib +tunables +typechecking +udev +udevadm +ugcstor +uglocalpv +ugndm +ugmayastor +unelevated +unmanaged +unmap +unmount +uroot +untracked +upgradetasks +uri's +usecases +userspace +uzfs +velero +virtio +visualstudio +vfat +volname +volumesnapshot +volumesnapshotclasses +volumesnapshotdata +volumesnapshotdatas +volumesnapshotcontents +volumesnapshotlocation +volumesnapshotlocations +volumesnapshots +vsce +vsmarketplacebadge +vtable +walkthrough +webhook +webscale +webview +wipefs +workernode +xargs +xfsprogs +xrmxx +xtrabackup +xvda +xvdf +xvdg +yandex +zpool +zrepl +zvol +zvols