From f367daf7b6538b7f2d3a11dd7602fb1c422e6ad7 Mon Sep 17 00:00:00 2001 From: alexzhc Date: Mon, 8 Aug 2022 14:01:09 +0800 Subject: [PATCH] [doc] update "configure storage pool" Signed-off-by: alexzhc --- docs/docs/01.installation/02.post-deploy.md | 17 +- docs/docs/01.installation/03.config-pool.md | 98 +++ .../{03.upgrade.md => 04.upgrade.md} | 2 +- .../{04.uninstall.md => 05.uninstall.md} | 14 +- docs/docs/04.components/_category_.json | 4 - .../{04.components => 04.modules}/00.crd.md | 0 .../01.local-disk-manager.md | 0 .../02.local-storage.md | 0 .../{04.components => 04.modules}/03.DRBD.md | 0 .../04.scheduler.md | 0 docs/docs/04.modules/_category_.json | 4 + .../current.json | 6 +- .../current/01.installation/02.post-deploy.md | 2 +- .../current/01.installation/03.config-pool.md | 98 +++ .../{03.upgrade.md => 04.upgrade.md} | 2 +- .../{04.uninstall.md => 05.uninstall.md} | 2 +- .../current/04.components/_category_.json | 4 - .../{04.components => 04.modules}/00.crd.md | 0 .../01.local-disk-manager.md | 0 .../02.local-storage.md | 0 .../{04.components => 04.modules}/03.DRBD.md | 0 .../04.scheduler.md | 0 .../current/04.modules/_category_.json | 4 + .../templates/post-install-claim-disks.yaml | 2 +- helm/hwameistor/templates/storageclass.yaml | 16 +- helm/hwameistor/values.yaml | 2 +- test.yaml | 772 ++++++++++++++++++ 27 files changed, 1007 insertions(+), 42 deletions(-) create mode 100644 docs/docs/01.installation/03.config-pool.md rename docs/docs/01.installation/{03.upgrade.md => 04.upgrade.md} (94%) rename docs/docs/01.installation/{04.uninstall.md => 05.uninstall.md} (74%) delete mode 100644 docs/docs/04.components/_category_.json rename docs/docs/{04.components => 04.modules}/00.crd.md (100%) rename docs/docs/{04.components => 04.modules}/01.local-disk-manager.md (100%) rename docs/docs/{04.components => 04.modules}/02.local-storage.md (100%) rename docs/docs/{04.components => 04.modules}/03.DRBD.md (100%) rename docs/docs/{04.components => 04.modules}/04.scheduler.md (100%) create mode 100644 docs/docs/04.modules/_category_.json create mode 100644 docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/03.config-pool.md rename docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/{03.upgrade.md => 04.upgrade.md} (96%) rename docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/{04.uninstall.md => 05.uninstall.md} (96%) delete mode 100644 docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/_category_.json rename docs/i18n/cn/docusaurus-plugin-content-docs/current/{04.components => 04.modules}/00.crd.md (100%) rename docs/i18n/cn/docusaurus-plugin-content-docs/current/{04.components => 04.modules}/01.local-disk-manager.md (100%) rename docs/i18n/cn/docusaurus-plugin-content-docs/current/{04.components => 04.modules}/02.local-storage.md (100%) rename docs/i18n/cn/docusaurus-plugin-content-docs/current/{04.components => 04.modules}/03.DRBD.md (100%) rename docs/i18n/cn/docusaurus-plugin-content-docs/current/{04.components => 04.modules}/04.scheduler.md (100%) create mode 100644 docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/_category_.json create mode 100644 test.yaml diff --git a/docs/docs/01.installation/02.post-deploy.md b/docs/docs/01.installation/02.post-deploy.md index f36e6ba92..d58ad0f63 100644 --- a/docs/docs/01.installation/02.post-deploy.md +++ b/docs/docs/01.installation/02.post-deploy.md @@ -41,18 +41,7 @@ hwameistor-webhook-986479678-278cr 1/1 Running `local-disk-manager` and `local-storage` are `DaemonSets`. They should have one pod on each Kubernetes node. ::: - -## Step 2: Check the storageclass - -A `storageClass` should be created: - -```bash -$ kubectl get storageclass hwameistor-storage-disk-hdd -NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE -hwameistor-storage-disk-hdd disk.hwameistor.io Delete WaitForFirstConsumer true 4m29s -``` - -## Step 3: Check the APIs +## Step 2: Check the APIs HwameiStor CRDs create the following APIs. @@ -74,9 +63,9 @@ localvolumereplicas lvr hwameistor.io/v1alpha1 false Lo localvolumes lv hwameistor.io/v1alpha1 false LocalVolume ``` -For the details about CRDs, please also refer to the chapter [CRDs](../04.components/00.crd.md). +For the details about CRDs, please also refer to the chapter [CRDs](../04.modules/00.crd.md). -# Step 4: Check the localDiskNode and localDisks +# Step 3: Check the localDiskNode and localDisks HwameiStor autoscans each node and registers each disk as CRD `localDisk(ld)`. The unused disks are displayed with `PHASE: Unclaimed`. diff --git a/docs/docs/01.installation/03.config-pool.md b/docs/docs/01.installation/03.config-pool.md new file mode 100644 index 000000000..5f061773a --- /dev/null +++ b/docs/docs/01.installation/03.config-pool.md @@ -0,0 +1,98 @@ +--- +sidebar_position: 4 +sidebar_label: "Configure Storage Pool" +--- + +# Configure Storage Pool + +## Step 1: Create LocalDiskClaim objects + +HwameiStor sets up storage pools by creating `LocalDiskClaim` objects according to the storage media types. To create an HDD pool on all kubernetes worker nodes: + +```bash +$ helm template helm/hwameistor \ + -s templates/post-install-claim-disks.yaml \ + --set storageNodes='{k8s-worker-1,k8s-worker-2,k8s-worker-3}' \ + | kubectl apply -f - +``` + +## Step 2: Verify LocalDiskClaim objects + +```bash +$ kubectl get ldc +NAME NODEMATCH PHASE +k8s-worker-1 k8s-worker-1 Bound +k8s-worker-2 k8s-worker-2 Bound +k8s-worker-3 k8s-worker-3 Bound +``` + +## Step 3: Verify StorageClass + +```bash +$ kubectl get sc hwameistor-storage-lvm-hdd +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +hwameistor-storage-lvm-hdd lvm.hwameistor.io Delete WaitForFirstConsumer true 114s +``` + +## Step 4: Verify LocalDisk objects + +```bash +$ kubectl get ld +NAME NODEMATCH CLAIM PHASE +k8s-worker-1-sda k8s-worker-1 Inuse +k8s-worker-1-sdb k8s-worker-1 k8s-worker-1 Claimed +k8s-worker-1-sdc k8s-worker-1 k8s-worker-1 Claimed +k8s-worker-1-sdd k8s-worker-1 Inuse +k8s-worker-1-sde k8s-worker-1 Inuse +k8s-worker-2-sda k8s-worker-2 Inuse +k8s-worker-2-sdb k8s-worker-2 k8s-worker-2 Claimed +k8s-worker-2-sdc k8s-worker-2 k8s-worker-2 Claimed +k8s-worker-2-sdd k8s-worker-2 Inuse +k8s-worker-2-sde k8s-worker-2 Inuse +k8s-worker-3-sda k8s-worker-3 Inuse +k8s-worker-3-sdb k8s-worker-3 k8s-worker-3 Claimed +k8s-worker-3-sdc k8s-worker-3 k8s-worker-3 Claimed +k8s-worker-3-sdd k8s-worker-3 Inuse +k8s-worker-3-sde k8s-worker-3 Inuse +``` + +## Step 5 (Optional): Observe VG + +On a kubernetes worker node, observe a `VG` is created for an `LocalDiskClaim` object + +```bash +root@k8s-worker-1:~$ vgdisplay LocalStorage_PoolHDD + --- Volume group --- + VG Name LocalStorage_PoolHDD + System ID + Format lvm2 + Metadata Areas 2 + Metadata Sequence No 1 + VG Access read/write + VG Status resizable + MAX LV 0 + Cur LV 0 + Open LV 0 + Max PV 0 + Cur PV 2 + Act PV 2 + VG Size 199.99 GiB + PE Size 4.00 MiB + Total PE 51198 + Alloc PE / Size 0 / 0 + Free PE / Size 51198 / 199.99 GiB + VG UUID jJ3s7g-iyoJ-c4zr-3Avc-3K4K-BrJb-A5A5Oe +``` + +## Set up storage pool during deployment + +A storage pool can be configured during HwameiStor deployment by helm command: + +```bash +$ helm install \ + --namespace hwameistor \ + --create-namespace \ + hwameistor \ + helm/hwameistor \ + --set storageNodes='{k8s-worker-1,k8s-worker-2,k8s-worker-3}' +``` diff --git a/docs/docs/01.installation/03.upgrade.md b/docs/docs/01.installation/04.upgrade.md similarity index 94% rename from docs/docs/01.installation/03.upgrade.md rename to docs/docs/01.installation/04.upgrade.md index 90276bf1b..4e75004dc 100644 --- a/docs/docs/01.installation/03.upgrade.md +++ b/docs/docs/01.installation/04.upgrade.md @@ -1,5 +1,5 @@ --- -sidebar_position: 4 +sidebar_position: 5 sidebar_label: "Upgrade" --- diff --git a/docs/docs/01.installation/04.uninstall.md b/docs/docs/01.installation/05.uninstall.md similarity index 74% rename from docs/docs/01.installation/04.uninstall.md rename to docs/docs/01.installation/05.uninstall.md index 65cbaabdb..b944d5d27 100644 --- a/docs/docs/01.installation/04.uninstall.md +++ b/docs/docs/01.installation/05.uninstall.md @@ -1,5 +1,5 @@ --- -sidebar_position: 5 +sidebar_position: 6 sidebar_label: "Uninstall" --- @@ -33,10 +33,18 @@ $ kubectl get crd -o name \ | xargs -t kubectl delete ``` -### Remove clusterroles and rolebindings +### Remove clusterRoles and roleBindings ```bash $ kubectl get clusterrolebinding,clusterrole -o name \ | grep hwameistor \ | xargs -t kubectl delete -``` \ No newline at end of file +``` + +### Remove storageClass + +```bash +$ kubectl get sc -o name \ + | grep hwameistor-storage-lvm- \ + | xargs -t kubectl delete +``` diff --git a/docs/docs/04.components/_category_.json b/docs/docs/04.components/_category_.json deleted file mode 100644 index 43817222d..000000000 --- a/docs/docs/04.components/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Components", - "position": 5 -} \ No newline at end of file diff --git a/docs/docs/04.components/00.crd.md b/docs/docs/04.modules/00.crd.md similarity index 100% rename from docs/docs/04.components/00.crd.md rename to docs/docs/04.modules/00.crd.md diff --git a/docs/docs/04.components/01.local-disk-manager.md b/docs/docs/04.modules/01.local-disk-manager.md similarity index 100% rename from docs/docs/04.components/01.local-disk-manager.md rename to docs/docs/04.modules/01.local-disk-manager.md diff --git a/docs/docs/04.components/02.local-storage.md b/docs/docs/04.modules/02.local-storage.md similarity index 100% rename from docs/docs/04.components/02.local-storage.md rename to docs/docs/04.modules/02.local-storage.md diff --git a/docs/docs/04.components/03.DRBD.md b/docs/docs/04.modules/03.DRBD.md similarity index 100% rename from docs/docs/04.components/03.DRBD.md rename to docs/docs/04.modules/03.DRBD.md diff --git a/docs/docs/04.components/04.scheduler.md b/docs/docs/04.modules/04.scheduler.md similarity index 100% rename from docs/docs/04.components/04.scheduler.md rename to docs/docs/04.modules/04.scheduler.md diff --git a/docs/docs/04.modules/_category_.json b/docs/docs/04.modules/_category_.json new file mode 100644 index 000000000..2b41381e9 --- /dev/null +++ b/docs/docs/04.modules/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Modules", + "position": 5 +} \ No newline at end of file diff --git a/docs/i18n/cn/docusaurus-plugin-content-docs/current.json b/docs/i18n/cn/docusaurus-plugin-content-docs/current.json index 112a2e8c3..ba77365ee 100755 --- a/docs/i18n/cn/docusaurus-plugin-content-docs/current.json +++ b/docs/i18n/cn/docusaurus-plugin-content-docs/current.json @@ -15,9 +15,9 @@ "message": "后端操作", "description": "The label for category Backend Operations in sidebar tutorialSidebar" }, - "sidebar.tutorialSidebar.category.Components": { - "message": "组件", - "description": "The label for category Components in sidebar tutorialSidebar" + "sidebar.tutorialSidebar.category.Modules": { + "message": "模块", + "description": "The label for category modules in sidebar tutorialSidebar" }, "sidebar.tutorialSidebar.category.Applications": { "message": "应用", diff --git a/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/02.post-deploy.md b/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/02.post-deploy.md index c597bda95..560b423fe 100644 --- a/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/02.post-deploy.md +++ b/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/02.post-deploy.md @@ -74,7 +74,7 @@ localvolumereplicas lvr hwameistor.io/v1alpha1 false Lo localvolumes lv hwameistor.io/v1alpha1 false LocalVolume ``` -For the details about CRDs, please also refer to the chapter [CRDs](../04.components/00.crd.md). +For the details about CRDs, please also refer to the chapter [CRDs](../04.modules/00.crd.md). # Step 4: Check the localDiskNode and localDisks diff --git a/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/03.config-pool.md b/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/03.config-pool.md new file mode 100644 index 000000000..f11bc55a0 --- /dev/null +++ b/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/03.config-pool.md @@ -0,0 +1,98 @@ +--- +sidebar_position: 4 +sidebar_label: "配置存储池" +--- + +# Configure Storage Pool + +## Step 1: Create LocalDiskClaim objects + +HwameiStor sets up storage pools by creating `LocalDiskClaim` objects according to the storage media types. To create an HDD pool on all kubernetes worker nodes: + +```bash +$ helm template helm/hwameistor \ + -s templates/post-install-claim-disks.yaml \ + --set storageNodes='{k8s-worker-1,k8s-worker-2,k8s-worker-3}' \ + | kubectl apply -f - +``` + +## Step 2: Verify LocalDiskClaim objects + +```bash +$ kubectl get ldc +NAME NODEMATCH PHASE +k8s-worker-1 k8s-worker-1 Bound +k8s-worker-2 k8s-worker-2 Bound +k8s-worker-3 k8s-worker-3 Bound +``` + +## Step 3: Verify StorageClass + +```bash +$ kubectl get sc hwameistor-storage-lvm-hdd +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +hwameistor-storage-lvm-hdd lvm.hwameistor.io Delete WaitForFirstConsumer true 114s +``` + +## Step 4: Verify LocalDisk objects + +```bash +$ kubectl get ld +NAME NODEMATCH CLAIM PHASE +k8s-worker-1-sda k8s-worker-1 Inuse +k8s-worker-1-sdb k8s-worker-1 k8s-worker-1 Claimed +k8s-worker-1-sdc k8s-worker-1 k8s-worker-1 Claimed +k8s-worker-1-sdd k8s-worker-1 Inuse +k8s-worker-1-sde k8s-worker-1 Inuse +k8s-worker-2-sda k8s-worker-2 Inuse +k8s-worker-2-sdb k8s-worker-2 k8s-worker-2 Claimed +k8s-worker-2-sdc k8s-worker-2 k8s-worker-2 Claimed +k8s-worker-2-sdd k8s-worker-2 Inuse +k8s-worker-2-sde k8s-worker-2 Inuse +k8s-worker-3-sda k8s-worker-3 Inuse +k8s-worker-3-sdb k8s-worker-3 k8s-worker-3 Claimed +k8s-worker-3-sdc k8s-worker-3 k8s-worker-3 Claimed +k8s-worker-3-sdd k8s-worker-3 Inuse +k8s-worker-3-sde k8s-worker-3 Inuse +``` + +## Step 5 (Optional): Observe VG + +On a kubernetes worker node, observe a `VG` is created for an `LocalDiskClaim` object + +```bash +root@k8s-worker-1:~$ vgdisplay LocalStorage_PoolHDD + --- Volume group --- + VG Name LocalStorage_PoolHDD + System ID + Format lvm2 + Metadata Areas 2 + Metadata Sequence No 1 + VG Access read/write + VG Status resizable + MAX LV 0 + Cur LV 0 + Open LV 0 + Max PV 0 + Cur PV 2 + Act PV 2 + VG Size 199.99 GiB + PE Size 4.00 MiB + Total PE 51198 + Alloc PE / Size 0 / 0 + Free PE / Size 51198 / 199.99 GiB + VG UUID jJ3s7g-iyoJ-c4zr-3Avc-3K4K-BrJb-A5A5Oe +``` + +## Configure storage pool during deployment + +A storage pool can be configured during HwameiStor deployment by helm command: + +```bash +$ helm install \ + --namespace hwameistor \ + --create-namespace \ + hwameistor \ + helm/hwameistor \ + --set storageNodes='{k8s-worker-1,k8s-worker-2,k8s-worker-3}' +``` diff --git a/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/03.upgrade.md b/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/04.upgrade.md similarity index 96% rename from docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/03.upgrade.md rename to docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/04.upgrade.md index 8016936db..abc2dbfcf 100644 --- a/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/03.upgrade.md +++ b/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/04.upgrade.md @@ -1,5 +1,5 @@ --- -sidebar_position: 4 +sidebar_position: 5 sidebar_label: "升级" --- diff --git a/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/04.uninstall.md b/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/05.uninstall.md similarity index 96% rename from docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/04.uninstall.md rename to docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/05.uninstall.md index 4f6f7add2..9c18ad55c 100644 --- a/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/04.uninstall.md +++ b/docs/i18n/cn/docusaurus-plugin-content-docs/current/01.installation/05.uninstall.md @@ -1,5 +1,5 @@ --- -sidebar_position: 5 +sidebar_position: 6 sidebar_label: "卸载" --- diff --git a/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/_category_.json b/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/_category_.json deleted file mode 100644 index 43817222d..000000000 --- a/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Components", - "position": 5 -} \ No newline at end of file diff --git a/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/00.crd.md b/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/00.crd.md similarity index 100% rename from docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/00.crd.md rename to docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/00.crd.md diff --git a/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/01.local-disk-manager.md b/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/01.local-disk-manager.md similarity index 100% rename from docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/01.local-disk-manager.md rename to docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/01.local-disk-manager.md diff --git a/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/02.local-storage.md b/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/02.local-storage.md similarity index 100% rename from docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/02.local-storage.md rename to docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/02.local-storage.md diff --git a/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/03.DRBD.md b/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/03.DRBD.md similarity index 100% rename from docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/03.DRBD.md rename to docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/03.DRBD.md diff --git a/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/04.scheduler.md b/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/04.scheduler.md similarity index 100% rename from docs/i18n/cn/docusaurus-plugin-content-docs/current/04.components/04.scheduler.md rename to docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/04.scheduler.md diff --git a/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/_category_.json b/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/_category_.json new file mode 100644 index 000000000..88139e050 --- /dev/null +++ b/docs/i18n/cn/docusaurus-plugin-content-docs/current/04.modules/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "模块", + "position": 5 +} \ No newline at end of file diff --git a/helm/hwameistor/templates/post-install-claim-disks.yaml b/helm/hwameistor/templates/post-install-claim-disks.yaml index 02ab5ff61..d7f3afb73 100644 --- a/helm/hwameistor/templates/post-install-claim-disks.yaml +++ b/helm/hwameistor/templates/post-install-claim-disks.yaml @@ -14,6 +14,6 @@ spec: # The field value should be consistent with the nodeName in localdisk nodeName: {{ . }} description: - diskType: {{ $.Values.storageclass.diskType }} + diskType: {{ $.Values.storageClass.diskType }} --- {{- end}} diff --git a/helm/hwameistor/templates/storageclass.yaml b/helm/hwameistor/templates/storageclass.yaml index 1da002e5e..316d5c50a 100644 --- a/helm/hwameistor/templates/storageclass.yaml +++ b/helm/hwameistor/templates/storageclass.yaml @@ -1,19 +1,19 @@ -{{- if .Values.storageclass.enabled -}} +{{- if .Values.storageClass.enabled -}} apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: annotations: - storageclass.kubernetes.io/is-default-class: {{ .Values.storageclass.default | quote }} - name: hwameistor-storage-lvm-{{ .Values.storageclass.diskType | lower}} + storageClass.kubernetes.io/is-default-class: {{ .Values.storageClass.default | quote }} + name: hwameistor-storage-lvm-{{ .Values.storageClass.diskType | lower}} provisioner: lvm.hwameistor.io volumeBindingMode: WaitForFirstConsumer -allowVolumeExpansion: {{ .Values.storageclass.allowVolumeExpansion }} -reclaimPolicy: {{ .Values.storageclass.reclaimPolicy }} +allowVolumeExpansion: {{ .Values.storageClass.allowVolumeExpansion }} +reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }} parameters: - replicaNumber: {{ .Values.storageclass.replicaNumber | quote}} - poolClass: {{ .Values.storageclass.diskType | quote}} + replicaNumber: {{ .Values.storageClass.replicaNumber | quote}} + poolClass: {{ .Values.storageClass.diskType | quote}} poolType: "REGULAR" volumeKind: "LVM" striped: "true" - csi.storage.k8s.io/fstype: {{ .Values.storageclass.fsType | quote}} + csi.storage.k8s.io/fstype: {{ .Values.storageClass.fsType | quote}} {{- end }} \ No newline at end of file diff --git a/helm/hwameistor/values.yaml b/helm/hwameistor/values.yaml index a68ae5da4..a302be21e 100644 --- a/helm/hwameistor/values.yaml +++ b/helm/hwameistor/values.yaml @@ -7,7 +7,7 @@ hwameistorImageRegistry: ghcr.io kubeletRootDir: /var/lib/kubelet # StorageClass Settings -storageclass: +storageClass: # If enabled, a hwameistor storageclass will be created in your system enabled: true # If enabled, hwameistor storageclass will be a default storageclass in system diff --git a/test.yaml b/test.yaml new file mode 100644 index 000000000..78a3d97e2 --- /dev/null +++ b/test.yaml @@ -0,0 +1,772 @@ +# Source: hwameistor/templates/post-install-claim-disks.yaml +# Claim nodes disks +apiVersion: hwameistor.io/v1alpha1 +kind: LocalDiskClaim +metadata: + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-delete-policy": hook-failed + name: k8s-worker-3 +spec: + # nodeName is the node where disk is attached + # If nodeName contains ".",please replace it with "-". + # The field value should be consistent with the nodeName in localdisk + nodeName: k8s-worker-3 + description: + diskType: HDD +--- +# Source: hwameistor/templates/post-install-claim-disks.yaml +apiVersion: hwameistor.io/v1alpha1 +kind: LocalDiskClaim +metadata: + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-delete-policy": hook-failed + name: k8s-worker-4 +spec: + # nodeName is the node where disk is attached + # If nodeName contains ".",please replace it with "-". + # The field value should be consistent with the nodeName in localdisk + nodeName: k8s-worker-4 + description: + diskType: HDD +--- +# Source: hwameistor/templates/post-install-claim-disks.yaml +apiVersion: hwameistor.io/v1alpha1 +kind: LocalDiskClaim +metadata: + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-delete-policy": hook-failed + name: k8s-worker-5 +spec: + # nodeName is the node where disk is attached + # If nodeName contains ".",please replace it with "-". + # The field value should be consistent with the nodeName in localdisk + nodeName: k8s-worker-5 + description: + diskType: HDD +MANIFEST: +--- +# Source: hwameistor/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hwameistor-admin + namespace: hwameistor +--- +# Source: hwameistor/templates/scheduler-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hwameistor-scheduler-config + namespace: hwameistor +data: +apiVersion: v1 +kind: ConfigMap +metadata: + name: hwameistor-scheduler-config + namespace: hwameistor +data: + hwameistor-scheduler-config.yaml: | + apiVersion: kubescheduler.config.k8s.io/v1alpha1 + kind: KubeSchedulerConfiguration + schedulerName: hwameistor-scheduler + leaderElection: + leaderElect: true + lockObjectName: hwameistor-scheduler + resourceLock: leases + plugins: + filter: + enabled: + - name: hwameistor-scheduler-plugin + reserve: + enabled: + - name: hwameistor-scheduler-plugin + unreserve: + enabled: + - name: hwameistor-scheduler-plugin +--- +# Source: hwameistor/templates/storageclass.yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + name: hwameistor-storage-lvm-hdd +provisioner: lvm.hwameistor.io +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +reclaimPolicy: Delete +parameters: + replicaNumber: "1" + poolClass: "HDD" + poolType: "REGULAR" + volumeKind: "LVM" + striped: "true" + csi.storage.k8s.io/fstype: "xfs" +--- +# Source: hwameistor/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: hwameistor-role +rules: +- apiGroups: ["*"] + resources: + - "*" + verbs: ["*"] +- nonResourceURLs: ["*"] + verbs: ["*"] +--- +# Source: hwameistor/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: hwameistor-admin-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hwameistor-role +subjects: +- kind: ServiceAccount + name: hwameistor-admin + namespace: hwameistor +--- +# Source: hwameistor/templates/webhook.yaml +apiVersion: v1 +kind: Service +metadata: + name: hwameistor-webhook + namespace: hwameistor +spec: + selector: + app: hwameistor-webhook + ports: + - port: 443 + targetPort: webhook-api +--- +# Source: hwameistor/templates/local-disk-manager.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: hwameistor-local-disk-manager + namespace: hwameistor +spec: + selector: + matchLabels: + app: hwameistor-local-disk-manager + template: + metadata: + labels: + app: hwameistor-local-disk-manager + spec: + hostNetwork: true + hostPID: true + serviceAccountName: hwameistor-admin + containers: + - name: registrar + resources: + {} + image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0 + imagePullPolicy: IfNotPresent + args: + - --v=5 + - --csi-address=/csi/csi.sock + - --kubelet-registration-path=/var/snap/microk8s/common/var/lib/kubelet//plugins/disk.hwameistor.io/csi.sock + lifecycle: + preStop: + exec: + command: [ "/bin/sh", "-c", "rm -rf /registration/disk.hwameistor.io /registration/disk.hwameistor.io-reg.sock" ] + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: manager + resources: + {} + # Replace this with the built image name + image: ghcr.io/hwameistor/local-disk-manager:v0.3.3-rc.0 + command: + - /local-disk-manager + args: + - --endpoint=$(CSI_ENDPOINT) + - --nodeid=$(NODENAME) + - --csi-enable=true + imagePullPolicy: IfNotPresent + volumeMounts: + - name: udev + mountPath: /run/udev + - name: procmount + mountPath: /host/proc + readOnly: true + - name: devmount + mountPath: /dev + - name: registration-dir + mountPath: /var/snap/microk8s/common/var/lib/kubelet//plugins_registry + - name: plugin-dir + mountPath: /var/snap/microk8s/common/var/lib/kubelet//plugins + mountPropagation: "Bidirectional" + - name: pods-mount-dir + mountPath: /var/snap/microk8s/common/var/lib/kubelet//pods + mountPropagation: "Bidirectional" + env: + - name: CSI_ENDPOINT + value: unix://var/snap/microk8s/common/var/lib/kubelet//plugins/disk.hwameistor.io/csi.sock + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: OPERATOR_NAME + value: "local-disk-manager" + securityContext: + privileged: true + volumes: + - name: udev + hostPath: + path: /run/udev + type: Directory + - name: procmount + # mount /proc/1/mounts (mount file of process 1 of host) inside container + # to read which partition is mounted on / path + hostPath: + path: /proc + type: Directory + - name: devmount + # the /dev directory is mounted so that we have access to the devices that + # are connected at runtime of the pod. + hostPath: + path: /dev + type: Directory + - name: socket-dir + hostPath: + path: /var/snap/microk8s/common/var/lib/kubelet//plugins/disk.hwameistor.io + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: /var/snap/microk8s/common/var/lib/kubelet//plugins_registry/ + type: Directory + - name: plugin-dir + hostPath: + path: /var/snap/microk8s/common/var/lib/kubelet//plugins + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/snap/microk8s/common/var/lib/kubelet//pods + type: DirectoryOrCreate + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists +--- +# Source: hwameistor/templates/local-storage.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: hwameistor-local-storage + namespace: hwameistor +spec: + selector: + matchLabels: + app: hwameistor-local-storage + template: + metadata: + labels: + app: hwameistor-local-storage + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: lvm.hwameistor.io/enable + operator: NotIn + values: + - "false" + + containers: + - args: + - --v=5 + - --csi-address=/csi/csi.sock + - --kubelet-registration-path=/var/snap/microk8s/common/var/lib/kubelet//plugins/lvm.hwameistor.io/csi.sock + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/lvm.hwameistor.io /registration/lvm.hwameistor.io-reg.sock + name: registrar + resources: + {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /registration + name: registration-dir + - args: + - --nodename=$(MY_NODENAME) + - --namespace=$(POD_NAMESPACE) + - --csi-address=$(CSI_ENDPOINT) + - --http-port=80 + - --debug=true + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: MY_NODENAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CSI_ENDPOINT + value: unix://var/snap/microk8s/common/var/lib/kubelet//plugins/lvm.hwameistor.io/csi.sock + - name: NODE_ANNOTATION_KEY_STORAGE_IPV4 + value: localstorage.hwameistor.io/storage-ipv4 + image: ghcr.io/hwameistor/local-storage:v0.3.3-rc.0 + imagePullPolicy: IfNotPresent + name: member + ports: + - containerPort: 80 + name: healthz + protocol: TCP + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 3 + resources: + {} + securityContext: + # allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/snap/microk8s/common/var/lib/kubelet//plugins + mountPropagation: Bidirectional + name: plugin-dir + - mountPath: /var/snap/microk8s/common/var/lib/kubelet//plugins_registry + name: registration-dir + - mountPath: /var/snap/microk8s/common/var/lib/kubelet//pods + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: host-dev + - mountPath: /etc/drbd.d + mountPropagation: Bidirectional + name: host-etc-drbd + dnsPolicy: ClusterFirstWithHostNet + hostPID: true + priorityClassName: + restartPolicy: Always + schedulerName: default-scheduler + serviceAccountName: hwameistor-admin + serviceAccount: hwameistor-admin + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + volumes: + - hostPath: + path: /var/snap/microk8s/common/var/lib/kubelet//plugins/lvm.hwameistor.io + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/snap/microk8s/common/var/lib/kubelet//plugins + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/snap/microk8s/common/var/lib/kubelet//plugins_registry/ + type: Directory + name: registration-dir + - hostPath: + path: /dev + type: "" + name: host-dev + - hostPath: + path: /etc/drbd.d + type: DirectoryOrCreate + name: host-etc-drbd + - hostPath: + path: /var/snap/microk8s/common/var/lib/kubelet//pods + type: DirectoryOrCreate + name: pods-mount-dir + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate +--- +# Source: hwameistor/templates/local-disk-manager-csi-controller.yaml +kind: Deployment +apiVersion: apps/v1 +metadata: + name: hwameistor-local-disk-csi-controller + namespace: hwameistor +spec: + replicas: 1 + selector: + matchLabels: + app: hwameistor-local-disk-csi-controller + template: + metadata: + labels: + app: hwameistor-local-disk-csi-controller + spec: + priorityClassName: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - hwameistor-local-disk-manager + topologyKey: topology.disk.hwameistor.io/node + serviceAccount: hwameistor-admin + containers: + - name: provisioner + resources: + {} + image: quay.io/k8scsi/csi-provisioner:v2.0.3 + imagePullPolicy: "IfNotPresent" + args: + - "--v=5" + - "--csi-address=$(CSI_ADDRESS)" + - "--leader-election=true" + - "--feature-gates=Topology=true" + - "--strict-topology" + - "--extra-create-metadata=true" + env: + - name: CSI_ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: attacher + resources: + {} + image: quay.io/k8scsi/csi-attacher:v3.0.1 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--csi-address=$(CSI_ADDRESS)" + - "--leader-election=true" + - "--timeout=120s" + env: + - name: CSI_ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + volumes: + - name: socket-dir + hostPath: + path: /var/snap/microk8s/common/var/lib/kubelet//plugins/disk.hwameistor.io + type: DirectoryOrCreate +--- +# Source: hwameistor/templates/local-storage-csi-controller.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hwameistor-local-storage-csi-controller + namespace: hwameistor +spec: + replicas: 1 + selector: + matchLabels: + app: hwameistor-local-storage-csi-controller + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + app: hwameistor-local-storage-csi-controller + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - hwameistor-local-storage + topologyKey: topology.lvm.hwameistor.io/node + containers: + - args: + - --v=5 + - --csi-address=$(CSI_ADDRESS) + - --leader-election=true + - --feature-gates=Topology=true + - --strict-topology + - --extra-create-metadata=true + env: + - name: CSI_ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-provisioner:v2.0.3 + imagePullPolicy: IfNotPresent + name: provisioner + resources: + {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=5 + - --csi-address=$(CSI_ADDRESS) + - --leader-election=true + - --timeout=120s + env: + - name: CSI_ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-attacher:v3.0.1 + imagePullPolicy: IfNotPresent + name: attacher + resources: + {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=5 + - --csi-address=$(CSI_ADDRESS) + - --leader-election=true + env: + - name: CSI_ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-resizer:v1.0.1 + imagePullPolicy: IfNotPresent + name: resizer + resources: + {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: ClusterFirst + priorityClassName: + restartPolicy: Always + serviceAccountName: hwameistor-admin + serviceAccount: hwameistor-admin + securityContext: {} + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + volumes: + - hostPath: + path: /var/snap/microk8s/common/var/lib/kubelet//plugins/lvm.hwameistor.io + type: DirectoryOrCreate + name: socket-dir +--- +# Source: hwameistor/templates/scheduler.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hwameistor-scheduler + namespace: hwameistor +spec: + replicas: 1 + selector: + matchLabels: + app: hwameistor-scheduler + strategy: + type: Recreate + template: + metadata: + labels: + app: hwameistor-scheduler + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - -v=2 + - --bind-address=0.0.0.0 + - --kubeconfig=/etc/kubernetes/scheduler.conf + - --leader-elect=false + - --leader-elect-resource-name=hwameistor-scheduler + - --leader-elect-resource-namespace=hwameistor + - --config=/etc/hwameistor/hwameistor-scheduler-config.yaml + image: ghcr.io/hwameistor/scheduler:v0.3.3-rc.0 + imagePullPolicy: IfNotPresent + name: hwameistor-kube-scheduler + resources: + {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/kubernetes/scheduler.conf + name: kubeconfig + readOnly: true + - mountPath: /etc/hwameistor/ + name: hwameistor-scheduler-config + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + - configMap: + name: hwameistor-scheduler-config + items: + - key: hwameistor-scheduler-config.yaml + path: hwameistor-scheduler-config.yaml + name: hwameistor-scheduler-config + serviceAccountName: hwameistor-admin + serviceAccount: hwameistor-admin + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists +--- +# Source: hwameistor/templates/webhook.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hwameistor-webhook + namespace: hwameistor + labels: + app: hwameistor-webhook +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: hwameistor-webhook + template: + metadata: + labels: + app: hwameistor-webhook + spec: + serviceAccountName: hwameistor-admin + initContainers: + - image: jackmmzhou/self-signed:v1 + imagePullPolicy: Always + name: webhook-init + resources: + {} + env: + - name: WEBHOOK_NAMESPACE + value: hwameistor + - name: MUTATE_CONFIG + value: hwameistor-admission-mutate + - name: WEBHOOK_SERVICE + value: hwameistor-webhook + - name: MUTATE_PATH + value: /mutate + volumeMounts: + - mountPath: /etc/webhook/certs + name: webhook-tls-certs + containers: + - name: server + args: + - --cert-dir=/etc/webhook/certs + - --tls-private-key-file=tls.key + - --tls-cert-file=tls.crt + image: ghcr.io/hwameistor/admission:v0.3.3-rc.0 + imagePullPolicy: IfNotPresent + resources: + {} + ports: + - containerPort: 18443 + name: webhook-api + volumeMounts: + - name: webhook-tls-certs + mountPath: /etc/webhook/certs + readOnly: true + volumes: + - name: webhook-tls-certs + emptyDir: {} +