From 632dc4bcb94b0822cc9cd1c77ec6212395edb8a7 Mon Sep 17 00:00:00 2001 From: frankleaf <62129564+frankleaf@users.noreply.github.com> Date: Mon, 25 Apr 2022 20:15:37 +0800 Subject: [PATCH 01/10] Add jindofsx engine (#1801) * add jindofsx engine Signed-off-by: frankleaf * add jindofsx engine Signed-off-by: frankleaf * fix jindofsx ut Signed-off-by: frankleaf * fix jindofsx ut Signed-off-by: frankleaf * fix jindofsx ut Signed-off-by: frankleaf * fix jindofsx ut Signed-off-by: frankleaf * fix jindofsx ut Signed-off-by: frankleaf * fix jindofsx ut Signed-off-by: frankleaf * fix jindofsx ut Signed-off-by: frankleaf * fix jindofsx ut Signed-off-by: frankleaf * fix back to normal 37 Signed-off-by: frankleaf * fix back to normal fsx Signed-off-by: frankleaf * fix back to normal fsx Signed-off-by: frankleaf --- charts/fluid-dataloader/jindofsx/CHANGELOG.md | 4 + charts/fluid-dataloader/jindofsx/Chart.yaml | 23 + .../jindofsx/templates/configmap.yaml | 139 +++ .../jindofsx/templates/dataloader.yaml | 157 ++++ charts/fluid-dataloader/jindofsx/values.yaml | 30 + charts/jindofsx/.helmignore | 21 + charts/jindofsx/CHANGELOG.md | 32 + charts/jindofsx/Chart.yaml | 19 + charts/jindofsx/README.md | 61 ++ charts/jindofsx/templates/_helpers.tpl | 32 + .../templates/config/jindofs-client-conf.yaml | 25 + .../templates/config/jindofs-conf.yaml | 97 ++ charts/jindofsx/templates/fuse/daemonset.yaml | 204 +++++ charts/jindofsx/templates/master/service.yaml | 41 + .../templates/master/statefulset.yaml | 202 +++++ .../templates/worker/statefulset.yaml | 189 ++++ charts/jindofsx/values.yaml | 69 ++ cmd/jindo/app/jindo.go | 2 + pkg/common/jindo.go | 2 - pkg/controllers/v1alpha1/jindo/implement.go | 12 +- .../v1alpha1/jindo/jindoruntime_controller.go | 2 +- pkg/ddc/factory.go | 10 +- pkg/ddc/jindo/const.go | 2 + pkg/ddc/jindo/load_data.go | 4 +- pkg/ddc/jindo/operations/base_test.go | 73 -- pkg/ddc/jindofsx/cache.go | 113 +++ pkg/ddc/jindofsx/cache_test.go | 230 +++++ pkg/ddc/jindofsx/const.go | 58 ++ pkg/ddc/jindofsx/create_volume.go | 71 ++ pkg/ddc/jindofsx/create_volume_test.go | 186 ++++ pkg/ddc/jindofsx/dataset.go | 144 +++ pkg/ddc/jindofsx/dataset_test.go | 335 +++++++ pkg/ddc/jindofsx/delete_volume.go | 65 ++ pkg/ddc/jindofsx/delete_volume_test.go | 218 +++++ pkg/ddc/jindofsx/deprecated_label.go | 64 ++ pkg/ddc/jindofsx/deprecated_label_test.go | 148 +++ pkg/ddc/jindofsx/engine.go | 87 ++ pkg/ddc/jindofsx/engine_test.go | 92 ++ pkg/ddc/jindofsx/health_check.go | 153 ++++ pkg/ddc/jindofsx/health_check_test.go | 369 ++++++++ pkg/ddc/jindofsx/label.go | 27 + pkg/ddc/jindofsx/label_test.go | 53 ++ pkg/ddc/jindofsx/load_data.go | 190 ++++ pkg/ddc/jindofsx/load_data_test.go | 457 ++++++++++ pkg/ddc/jindofsx/master.go | 161 ++++ pkg/ddc/jindofsx/master_internal.go | 88 ++ pkg/ddc/jindofsx/master_internal_test.go | 264 ++++++ pkg/ddc/jindofsx/master_test.go | 270 ++++++ pkg/ddc/jindofsx/metadata.go | 159 ++++ pkg/ddc/jindofsx/metadata_test.go | 275 ++++++ pkg/ddc/jindofsx/node.go | 186 ++++ pkg/ddc/jindofsx/node_test.go | 425 +++++++++ pkg/ddc/jindofsx/operations/base.go | 202 +++++ pkg/ddc/jindofsx/operations/base_test.go | 218 +++++ pkg/ddc/jindofsx/operations/cached.go | 32 + pkg/ddc/jindofsx/operations/cached_test.go | 57 ++ pkg/ddc/jindofsx/port_parser.go | 89 ++ pkg/ddc/jindofsx/port_parser_test.go | 70 ++ pkg/ddc/jindofsx/replicas.go | 57 ++ pkg/ddc/jindofsx/replicas_test.go | 325 +++++++ pkg/ddc/jindofsx/runtime_info.go | 74 ++ pkg/ddc/jindofsx/runtime_info_test.go | 166 ++++ pkg/ddc/jindofsx/shutdown.go | 289 ++++++ pkg/ddc/jindofsx/shutdown_test.go | 339 +++++++ pkg/ddc/jindofsx/status.go | 136 +++ pkg/ddc/jindofsx/status_test.go | 209 +++++ pkg/ddc/jindofsx/transform.go | 661 ++++++++++++++ pkg/ddc/jindofsx/transform_fuse_test.go | 168 ++++ pkg/ddc/jindofsx/transform_hadoop_config.go | 67 ++ pkg/ddc/jindofsx/transform_master_test.go | 80 ++ pkg/ddc/jindofsx/transform_test.go | 319 +++++++ pkg/ddc/jindofsx/transform_worker_test.go | 183 ++++ pkg/ddc/jindofsx/types.go | 124 +++ pkg/ddc/jindofsx/ufs.go | 92 ++ pkg/ddc/jindofsx/ufs_internal.go | 96 ++ pkg/ddc/jindofsx/utils.go | 119 +++ pkg/ddc/jindofsx/utils_test.go | 123 +++ pkg/ddc/jindofsx/worker.go | 236 +++++ pkg/ddc/jindofsx/worker_test.go | 839 ++++++++++++++++++ 79 files changed, 11627 insertions(+), 83 deletions(-) create mode 100644 charts/fluid-dataloader/jindofsx/CHANGELOG.md create mode 100644 charts/fluid-dataloader/jindofsx/Chart.yaml create mode 100644 charts/fluid-dataloader/jindofsx/templates/configmap.yaml create mode 100644 charts/fluid-dataloader/jindofsx/templates/dataloader.yaml create mode 100644 charts/fluid-dataloader/jindofsx/values.yaml create mode 100755 charts/jindofsx/.helmignore create mode 100644 charts/jindofsx/CHANGELOG.md create mode 100755 charts/jindofsx/Chart.yaml create mode 100755 charts/jindofsx/README.md create mode 100755 charts/jindofsx/templates/_helpers.tpl create mode 100755 charts/jindofsx/templates/config/jindofs-client-conf.yaml create mode 100755 charts/jindofsx/templates/config/jindofs-conf.yaml create mode 100755 charts/jindofsx/templates/fuse/daemonset.yaml create mode 100755 charts/jindofsx/templates/master/service.yaml create mode 100755 charts/jindofsx/templates/master/statefulset.yaml create mode 100755 charts/jindofsx/templates/worker/statefulset.yaml create mode 100644 charts/jindofsx/values.yaml create mode 100644 pkg/ddc/jindofsx/cache.go create mode 100644 pkg/ddc/jindofsx/cache_test.go create mode 100644 pkg/ddc/jindofsx/const.go create mode 100644 pkg/ddc/jindofsx/create_volume.go create mode 100644 pkg/ddc/jindofsx/create_volume_test.go create mode 100644 pkg/ddc/jindofsx/dataset.go create mode 100644 pkg/ddc/jindofsx/dataset_test.go create mode 100644 pkg/ddc/jindofsx/delete_volume.go create mode 100644 pkg/ddc/jindofsx/delete_volume_test.go create mode 100644 pkg/ddc/jindofsx/deprecated_label.go create mode 100644 pkg/ddc/jindofsx/deprecated_label_test.go create mode 100644 pkg/ddc/jindofsx/engine.go create mode 100644 pkg/ddc/jindofsx/engine_test.go create mode 100644 pkg/ddc/jindofsx/health_check.go create mode 100644 pkg/ddc/jindofsx/health_check_test.go create mode 100644 pkg/ddc/jindofsx/label.go create mode 100644 pkg/ddc/jindofsx/label_test.go create mode 100644 pkg/ddc/jindofsx/load_data.go create mode 100644 pkg/ddc/jindofsx/load_data_test.go create mode 100644 pkg/ddc/jindofsx/master.go create mode 100644 pkg/ddc/jindofsx/master_internal.go create mode 100644 pkg/ddc/jindofsx/master_internal_test.go create mode 100644 pkg/ddc/jindofsx/master_test.go create mode 100644 pkg/ddc/jindofsx/metadata.go create mode 100644 pkg/ddc/jindofsx/metadata_test.go create mode 100644 pkg/ddc/jindofsx/node.go create mode 100644 pkg/ddc/jindofsx/node_test.go create mode 100644 pkg/ddc/jindofsx/operations/base.go create mode 100644 pkg/ddc/jindofsx/operations/base_test.go create mode 100644 pkg/ddc/jindofsx/operations/cached.go create mode 100644 pkg/ddc/jindofsx/operations/cached_test.go create mode 100644 pkg/ddc/jindofsx/port_parser.go create mode 100644 pkg/ddc/jindofsx/port_parser_test.go create mode 100644 pkg/ddc/jindofsx/replicas.go create mode 100644 pkg/ddc/jindofsx/replicas_test.go create mode 100644 pkg/ddc/jindofsx/runtime_info.go create mode 100644 pkg/ddc/jindofsx/runtime_info_test.go create mode 100644 pkg/ddc/jindofsx/shutdown.go create mode 100644 pkg/ddc/jindofsx/shutdown_test.go create mode 100644 pkg/ddc/jindofsx/status.go create mode 100644 pkg/ddc/jindofsx/status_test.go create mode 100644 pkg/ddc/jindofsx/transform.go create mode 100644 pkg/ddc/jindofsx/transform_fuse_test.go create mode 100644 pkg/ddc/jindofsx/transform_hadoop_config.go create mode 100644 pkg/ddc/jindofsx/transform_master_test.go create mode 100644 pkg/ddc/jindofsx/transform_test.go create mode 100644 pkg/ddc/jindofsx/transform_worker_test.go create mode 100644 pkg/ddc/jindofsx/types.go create mode 100644 pkg/ddc/jindofsx/ufs.go create mode 100644 pkg/ddc/jindofsx/ufs_internal.go create mode 100644 pkg/ddc/jindofsx/utils.go create mode 100644 pkg/ddc/jindofsx/utils_test.go create mode 100644 pkg/ddc/jindofsx/worker.go create mode 100644 pkg/ddc/jindofsx/worker_test.go diff --git a/charts/fluid-dataloader/jindofsx/CHANGELOG.md b/charts/fluid-dataloader/jindofsx/CHANGELOG.md new file mode 100644 index 00000000000..56b2247a4c1 --- /dev/null +++ b/charts/fluid-dataloader/jindofsx/CHANGELOG.md @@ -0,0 +1,4 @@ +### 0.1.0 + +- Support parallel prefetch job +- Support configurations by setting values diff --git a/charts/fluid-dataloader/jindofsx/Chart.yaml b/charts/fluid-dataloader/jindofsx/Chart.yaml new file mode 100644 index 00000000000..7a1564448f3 --- /dev/null +++ b/charts/fluid-dataloader/jindofsx/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v2 +name: fluid-dataloader +description: A Helm chart for Fluid to prefetch data + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: 0.1.0 diff --git a/charts/fluid-dataloader/jindofsx/templates/configmap.yaml b/charts/fluid-dataloader/jindofsx/templates/configmap.yaml new file mode 100644 index 00000000000..d36d2fbf41f --- /dev/null +++ b/charts/fluid-dataloader/jindofsx/templates/configmap.yaml @@ -0,0 +1,139 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-data-load-script" .Release.Name }} + labels: + release: {{ .Release.Name }} + role: dataload-job +data: + dataloader.jindo.init: | + #!/usr/bin/env bash + set -xe + jindo_env_vars=( + STORAGE_ADDRESS + ) + function public::jindo::init_conf() { + local IFS=$'\n' # split by line instead of space + for keyvaluepair in $(env); do + # split around the first "=" + key=$(echo ${keyvaluepair} | cut -d= -f1) + value=$(echo ${keyvaluepair} | cut -d= -f2-) + if [[ "${jindo_env_vars[*]}" =~ "${key}" ]]; then + export ${key}=\"${value}\" + fi + done + } + main() { + public::jindo::init_conf + } + main + dataloader.distributedLoad: | + #!/usr/bin/env bash + set -xe + + function distributedLoad() { + local path=$1 + local replica=$2 + local default=$3 + local cmd="jindo fs -load" + + if [[ $needLoadMetadata == 'true' ]]; then + echo -e "--- enable metaCache" + cmd="$cmd -meta" + echo -e "now cmd $cmd" + else + echo -e "--- disable metaCache" + fi + + if [[ $loadMetadataOnly == 'true' ]]; then + echo -e "--- disable datacache" + else + echo -e "--- enable datacache" + cmd="$cmd -data" + echo -e "now cmd $cmd" + fi + + if [[ $atomicCache == 'true' ]]; then + echo -e "--- enable atomicCache" + cmd="$cmd -atomic" + echo -e "now cmd $cmd" + else + echo -e "--- disable atomicCache" + fi + + if [[ $loadMemorydata == 'true' ]]; then + echo -e "--- enable loadMemorydata" + cmd="$cmd -m" + echo -e "now cmd $cmd" + else + echo -e "--- disable loadMemorydata" + fi + + if [[ $enbaleCacheListLocation == 'false' ]]; then + cmd="$cmd -s -R -replica $replica $default$path" + echo -e "execute cmd $cmd" + time $cmd + else + echo -e "--- begin download ossutil" + apt-get install wget -y + wget http://gosspublic.alicdn.com/ossutil/1.7.5/ossutil64 + chmod 755 ossutil64 + ./ossutil64 -e $cacheListEndpoint -i $cacheListAccessKeyId -k $cacheListAccessKeySecret cp $cacheListUrl /cachelist.txt + echo -e "--- enable File List" + cmd="$cmd -R -replica $cacheListReplica -cachelist /cachelist.txt -thread $cacheListThread $default/" + echo -e "execute cmd $cmd" + time $cmd + fi + + #echo -e "distributedLoad and sleep start now" + #sleep 10m + } + + function main() { + needLoadMetadata="$NEED_LOAD_METADATA" + loadMemorydata="$LOAD_MEMORY_DATA" + loadMetadataOnly="$LOAD_METADATA_ONLY" + atomicCache="$ENABLE_ATOMIC_CACHE" + cacheListReplica=$CACHE_LIST_REPLICA + cacheListThread=$CACHE_LIST_THREAD + enbaleCacheListLocation=$Enable_CACHE_LIST_LOCATION + cacheListAccessKeyId=$CACHE_LIST_ACCESSKEYID + cacheListAccessKeySecret=$CACHE_LIST_ACCESSKEYSECRET + cacheListEndpoint=$CACHE_LIST_ENDPOINT + cacheListUrl=$CACHE_LIST_URL + #judge whether to use locaion list + if [[ -z "$cacheListAccessKeyId" ]] || [[ -z "$cacheListAccessKeySecret" ]] || [[ -z "$cacheListEndpoint" ]] || [[ -z "$cacheListUrl" ]]; then + enbaleCacheListLocation=false + else + enbaleCacheListLocation=true + fi + if [[ -z "$cacheListReplica" ]]; then + cacheListReplica=1 + else + echo -e "cacheListReplica $cacheListReplica" + fi + if [[ -z "$cacheListThread" ]]; then + cacheListThread=10 + else + echo -e "cacheListThread $cacheListThread" + fi + dafault="jindo://" + paths="$DATA_PATH" + paths=(${paths//:/ }) + replicas="$PATH_REPLICAS" + replicas=(${replicas//:/ }) + for((i=0;i<${#paths[@]};i++)) do + local path="${paths[i]}" + local replica="${replicas[i]}" + echo -e "distributedLoad on $path starts" + distributedLoad ${paths[i]} ${replicas[i]} ${dafault} + #echo -e "distributedLoad on $path ends" + done + } + + main "$@" + + + + + diff --git a/charts/fluid-dataloader/jindofsx/templates/dataloader.yaml b/charts/fluid-dataloader/jindofsx/templates/dataloader.yaml new file mode 100644 index 00000000000..b2755669599 --- /dev/null +++ b/charts/fluid-dataloader/jindofsx/templates/dataloader.yaml @@ -0,0 +1,157 @@ +# .Release.Name will be used to decide which dataset will be preload +# .Release.Name should be like `-load`(e.g. hbase-load for a PersistentVolumeClaim named `hbase`) +# TODO: the length of .Release.Name won't exceed 53(limited by Helm), which means length of `` can't exceed 48. This might be a problem. + {{/* {{ $datasetName := "" -}}*/}} + {{/* {{- $randomSuffix := "" -}}*/}} + {{/* {{- if regexMatch "^[A-Za-z0-9._-]+-load-[A-Za-z0-9]{5}$" .Release.Name -}}*/}} + {{/* {{- $arr := regexSplit "-load-" .Release.Name -1 -}}*/}} + {{/* {{- $datasetName = first $arr -}}*/}} + {{/* {{- $randomSuffix = last $arr -}}*/}} + {{/* {{- else -}}*/}} + {{/* {{- printf "Illegal release name. Should be like -load-. Current name: %s" .Release.Name | fail -}}*/}} + {{/* {{- end }}*/}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ printf "%s-job" .Release.Name }} + labels: + release: {{ .Release.Name }} + role: dataload-job + targetDataset: {{ required "targetDataset should be set" .Values.dataloader.targetDataset }} +spec: + backoffLimit: {{ .Values.dataloader.backoffLimit | default "3" }} + completions: 1 + parallelism: 1 + template: + metadata: + name: {{ printf "%s-loader" .Release.Name }} + labels: + release: {{ .Release.Name }} + role: dataload-pod + targetDataset: {{ required "targetDataset should be set" .Values.dataloader.targetDataset }} + spec: + restartPolicy: OnFailure + containers: + - name: dataloader + image: {{ required "Dataloader image should be set" .Values.dataloader.image }} + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ["/scripts/jindo_env_init.sh && /scripts/jindo_dataload.sh"] + {{- $targetPaths := "" }} + {{- range .Values.dataloader.targetPaths }} + {{- $targetPaths = cat $targetPaths (required "Path must be set" .path) ":" }} + {{- end }} + {{- $targetPaths = $targetPaths | nospace | trimSuffix ":" }} + + {{- $pathReplicas := ""}} + {{- range .Values.dataloader.targetPaths }} + {{- $pathReplicas = cat $pathReplicas ( default 1 .replicas ) ":"}} + {{- end }} + {{- $pathReplicas = $pathReplicas | nospace | trimSuffix ":"}} + env: + - name: STORAGE_ADDRESS + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NEED_LOAD_METADATA + value: {{ default false .Values.dataloader.loadMetadata | quote }} + {{- range $key, $val := .Values.dataloader.options }} + {{- if eq $key "loadMemorydata" }} + - name: LOAD_MEMORY_DATA + value: {{ default false $val | quote }} + {{- end }} + {{- if eq $key "atomicCache" }} + - name: ENABLE_ATOMIC_CACHE + value: {{ default false $val | quote }} + {{- end }} + {{- if eq $key "loadMetadataOnly" }} + - name: LOAD_METADATA_ONLY + value: {{ default false $val | quote }} + {{- end }} + {{- if eq $key "cacheListReplica" }} + - name: CACHE_LIST_REPLICA + value: {{ $val | quote }} + {{- end }} + {{- if eq $key "cacheListThread" }} + - name: CACHE_LIST_THREAD + value: {{ $val | quote }} + {{- end }} + {{- if eq $key "accessKeyId" }} + - name: CACHE_LIST_ACCESSKEYID + value: {{ $val | quote }} + {{- end }} + {{- if eq $key "accessKeySecret" }} + - name: CACHE_LIST_ACCESSKEYSECRET + value: {{ $val | quote }} + {{- end }} + {{- if eq $key "endpoint" }} + - name: CACHE_LIST_ENDPOINT + value: {{ $val | quote }} + {{- end }} + {{- if eq $key "url" }} + - name: CACHE_LIST_URL + value: {{ $val | quote }} + {{- end }} + {{- if eq $key "cacheListLocationEnable" }} + - name: Enable_CACHE_LIST_LOCATION + value: {{ default false $val | quote }} + {{- end }} + {{- end }} + - name: DATA_PATH + value: {{ $targetPaths | quote }} + - name: PATH_REPLICAS + value: {{ $pathReplicas | quote }} + envFrom: + - configMapRef: + name: {{ required "targetDataset should be set" .Values.dataloader.targetDataset }}-jindofs-client-config + volumeMounts: + - name: bigboot-config + mountPath: /bigboot.cfg + subPath: bigboot.cfg + - name: bigboot-config + mountPath: /hdfs-3.2.1/etc/hadoop/core-site.xml + subPath: core-site.xml + {{- range $key, $val := .Values.dataloader.options }} + {{- if eq $key "hdfsConfig" }} + - name: hdfs-confs + mountPath: /hdfs-site.xml + subPath: hdfs-site.xml + {{- end }} + {{- end }} + - mountPath: /scripts + name: data-load-script + {{- range .Values.dataloader.targetPaths }} + {{- if .fluidNative }} + - mountPath: {{ .path | trimAll "/" | replace "/" "-" | printf "/data/%s"}} + name: {{ .path | trimAll "/" | replace "/" "-" | printf "native-%s"}} + {{- end }} + {{- end }} + volumes: + - name: bigboot-config + configMap: + name: {{ required "targetDataset should be set" .Values.dataloader.targetDataset }}-jindofs-config + {{- range $key, $val := .Values.dataloader.options }} + {{- if eq $key "hdfsConfig" }} + - name: hdfs-confs + configMap: + name: {{ $val }} + {{- end }} + {{- end }} + - name: data-load-script + configMap: + name: {{ printf "%s-data-load-script" .Release.Name }} + items: + - key: dataloader.jindo.init + path: jindo_env_init.sh + mode: 365 + - key: dataloader.distributedLoad + path: jindo_dataload.sh + mode: 365 + {{- range .Values.dataloader.targetPaths }} + {{- if .fluidNative }} + - name: {{ .path | trimAll "/" | replace "/" "-" | printf "native-%s"}} + hostPath: + path: {{ .path }} + {{- end }} + {{- end }} + diff --git a/charts/fluid-dataloader/jindofsx/values.yaml b/charts/fluid-dataloader/jindofsx/values.yaml new file mode 100644 index 00000000000..611a9ec2d65 --- /dev/null +++ b/charts/fluid-dataloader/jindofsx/values.yaml @@ -0,0 +1,30 @@ +# Default values for fluid-dataloader. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +dataloader: + # Optional + # Default: 3 + # Description: how many times the prefetch job can fail, i.e. `Job.spec.backoffLimit` + backoffLimit: 3 + + # Required + # Description: the dataset that this DataLoad targets + targetDataset: #imagenet + + # Optional + # Default: false + # Description: should load metadata from UFS when doing data load + loadMetadata: false + + # Optional + # Default: (path: "/", replicas: 1, fluidNative: false) + # Description: which paths should the DataLoad load + targetPaths: + - path: "/" + replicas: 1 + fluidNative: false + + # Required + # Description: the image that the DataLoad job uses + image: # diff --git a/charts/jindofsx/.helmignore b/charts/jindofsx/.helmignore new file mode 100755 index 00000000000..f0c13194444 --- /dev/null +++ b/charts/jindofsx/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/jindofsx/CHANGELOG.md b/charts/jindofsx/CHANGELOG.md new file mode 100644 index 00000000000..124f7d54510 --- /dev/null +++ b/charts/jindofsx/CHANGELOG.md @@ -0,0 +1,32 @@ +0.7.0 + +Support logConfig
+Support fuse lazy start
+Support fuse critical pod
+ +0.8.0 + +Change worker from Daemonset to Statefulset + +0.8.1 + +Repair fuse MountPoint leak issue + +0.8.2 + +Change podManagementPolicy from OrderedReady to Parallel + +0.8.3 + +Add owner Reference + +0.8.4 + +Support more Posix + +0.8.5 + +Add mountPropagation for registrar
+Add auto fuse recovery + + diff --git a/charts/jindofsx/Chart.yaml b/charts/jindofsx/Chart.yaml new file mode 100755 index 00000000000..48571915a8f --- /dev/null +++ b/charts/jindofsx/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: 4.3.0 +version: 0.8.3 +description: FileSystem on the cloud based on Aliyun Object Storage aimed for data + acceleration. +home: https://help.aliyun.com/document_detail/164207.html +keywords: +- jindofs +- namespace:jindofs +- releaseName:jindofs +- category:data +maintainers: +- email: kunhui.skh@alibaba-inc.com + name: kunhui.skh +- email: cheyang@163.com + name: Yang Che +- email: frank.wt@alibaba-inc.com + name: Yang LI +name: jindofs diff --git a/charts/jindofsx/README.md b/charts/jindofsx/README.md new file mode 100755 index 00000000000..23bec75192b --- /dev/null +++ b/charts/jindofsx/README.md @@ -0,0 +1,61 @@ +# JindoFS +## JindoFS概述:云原生的大数据计算存储分离方案 +### JindoFS 之前 +在 JindoFS 之前,云上客户主要使用 HDFS 和 OSS/S3 作为大数据存储。HDFS 是 Hadoop 原生的存储系统,10 年来,HDFS 已经成为大数据生态的存储标准,但是我们也可以看到 HDFS 虽然不断优化,但是 JVM 的瓶颈也始终无法突破,社区后来重新设计了 OZone。OSS/S3 作为云上对象存储的代表,也在大数据生态进行了适配,但是由于对象存储设计上的特点,元数据相关操作无法达到 HDFS 一样的效率;对象存储给客户的带宽不断增加,但是也是有限的,一些时候较难完全满足用户大数据使用上的需求。 +### Jindo 的由来 +EMR Jindo 是阿里云基于 Apache Spark / Apache Hadoop 在云上定制的分布式计算和存储引擎。Jindo 原是内部的研发代号,取自筋斗(云)的谐音,EMR Jindo 在开源基础上做了大量优化和扩展,深度集成和连接了众多阿里云基础服务。阿里云 EMR (E-MapReduce) 在 TPC 官方提交的 TPCDS 成绩,也是使用 Jindo 提交的。 + +http://www.tpc.org/tpcds/results/tpcds_perf_results.asp?resulttype=all +### JindoFS +EMR Jindo 有计算和存储两大部分,存储的部分叫 JindoFS。JindoFS 是阿里云针对云上存储定制的自研大数据存储服务,完全兼容 Hadoop 文件系统接口,给客户带来更加灵活、高效的计算存储方案,目前已验证支持阿里云 EMR 中所有的计算服务和引擎:Spark、Flink、Hive、MapReduce、Presto、Impala 等。Jindo FS 有两种使用模式,块存储模式和缓存模式。下面我们来分析下,JindoFS 是如何来解决大数据上的存储问题的。 + +
+ +
+ +### 块存储模式 +计算和存储分离是业界的趋势,OSS 这样的云上存储能力是无限大的,成本上非常有优势,如何利用 OSS 提供的无限存储能力,同时又高效地操作文件系统的元数据。JindoFS 块存储模式提供了一套完整的云原生解决方案。 +JindoFS 的块存储模式,在元数据上使用 JindoNameService 服务管理 Jindo 文件系统元数据,元数据操作的性能和体验上可以对标 HDFS NameNode。同时,JindoStorageService 保障了数据可以始终有一份存在 OSS 上,即使数据节点被释放,数据也可以随时从 OSS 上拉取,成本上也可以做到更加灵活。 + +JindoFS 的块存储模式,也支持多种存储策略,比如,本地存两份,OSS上存一份;本地存两份,OSS上不存储;本地不存,OSS上存一份等等。用户可以充分利用不同的存储策略根据业务或者数据冷热进行使用。 + +块存储使用了全新的 jfs:// 格式,原始 HDFS/OSS 数据通过 distcp 方式即可完成数据导入,同时,JindoFS 提供了 SDK,在 EMR 集群外部,用户也可以读写 Jindo FS。 +### 缓存模式 +缓存模式,正如“缓存”本身的含义,通过缓存的方式,在本地集群基于 JindoFS 的存储能力构建了一个分布式缓存服务,远端的数据可以保存在本地集群,使远端数据变成“本地化”。简单地描述 JindoFS 缓存模式解决的问题 +就是“OSS / 远端HDFS 已经有了大量数据,每次读数据的时候网络带宽经常被打满,Jindo FS 就可以通过缓存模式优化网络带宽的限制。” + +“原来的文件路径是 oss://bucket1/file1 或 hdfs://namenode/file2,不想改作业的路径可以吗?”。是的,不需要修改。EMR 对 OSS 进行了适配(后续会支持远端 HDFS 的场景),可以通过配置的方式使用缓存模式。缓存对于上层的作业做到了完全无感。 + +但是缓存模式也不是万能的,为了保证多端数据一致性,rename 这种操作一定要同步刷新到远端的 OSS / HDFS,特别是 OSS 的Rename 操作比较耗时,缓存模式对 rename这种文件元数据操作暂时不能优化。 +### 附录:JindoFS参数说明 + +| Parameter | Description | Default | +| --- | --- | --- | +| properties.logDir | 容器内服务的日志目录,按照惯例请保存在默认位置,并且可以将该目录映射到宿主机,方便查看日志。 | /mnt/disk1/bigboot/log | +|
|
|
| +| namespace.rpc.port | namespace的rpc端口,请保留默认值。 | 8101 | +| namespace.meta-dir | 容器内master服务的元数据目录,按照惯例请保存在默认位置,并且可以将该目录映射到宿主机,持久化该数据。 | /mnt/disk1/bigboot/server | +| namespace.filelet.cache.size | Master服务上内存中Inode缓存数量,当内存足够时适当调大该值,可以利用内存缓存提高性能。 | 100000 | +| namespace.blocklet.cache.size | Master服务上内存中Blocklet缓存数量,当内存足够时适当调大该值,可以利用内存缓存提高性能。 | 1000000 | +| namespace.backend.type | Master服务的元数据存储类型。目前仅支持rocksdb的方式。请保留默认值。 | rocksdb | +| jfs.namespaces | test表示当前JindoFS支持的命名空间,多个命名空间时以逗号隔开。 | test | +| jfs.namespaces.test.mode | cache表示test命名空间为缓存模式。block表示块模式。 | cache | +| jfs.namespaces.test.oss.uri | 表示test命名空间的后端存储。 | oss://xxx/ | +| jfs.namespaces.test.oss.access.key | 表示存储后端OSS的AccessKey ID | xxx | +| jfs.namespaces.test.oss.access.secret | 表示存储后端OSS的AccessKey Secret | xxx | +| storage.rpc.port | worker的rpc端口,请保留默认值。 | 6101 | +| storage.data-dirs | worker容器内的缓存数据目录,多个目录用逗号隔开。 | /mnt/disk1/bigboot, /mnt/disk2/bigboot, /mnt/disk3/bigboot | +| storage.temp-data-dirs | worker容器内的临时文件目录,多个目录用逗号隔开。 | /mnt/disk1/bigboot/tmp | +| storage.watermark.high.ratio | worker使用的磁盘空间的水位上限百分比。假设500GB磁盘,0.4表示最大使用200GB | 0.4 | +| storage.watermark.low.ratio | worker使用的磁盘空间的水位下限百分比。假设500GB磁盘,0.2表示最少使用100GB | 0.2 | +| storage.data-dirs.capacities | 每块盘的容量大小,多个盘用逗号隔开。与storage.data-dirs的个数相对应。 | 80g,80g,80g | +| storage.meta-dir | worker的索引数据。按照惯例请保存在默认位置,并且可以将该目录映射到宿主机,方便持久化缓存信息。 | /mnt/disk1/bigboot/bignode | +| client.storage.rpc.port | worker的rpc端口,请保留默认值。 | 6101 | +| client.oss.retry | 客户端连接OSS失败时的重试次数 | 5 | +| client.oss.upload.threads | 客户端并行上传OSS的线程数 | 4 | +| client.oss.upload.queue.size | 客户端上传OSS的队列个数 | 5 | +| client.oss.upload.max.parallelism | 客户端并行上传OSS的最大线程数 | 16 | +| client.oss.timeout.millisecond | 客户端发送OSS请求的超时时间 | 30000 | +| client.oss.connection.timeout.millisecond | 客户端连接OSS的超时时间 | 3000 | +| mounts.master | master服务挂载的宿主机hostPath和容器内的mountPath,如需持久化,请按惯例请填写/mnt/disk1 | 无 | +| mounts.workersAndClients | worker服务挂载的宿主机hostPath和容器内的mountPath,如需持久化,请按惯例请填写/mnt/disk1到/mnt/diskN | 无 | \ No newline at end of file diff --git a/charts/jindofsx/templates/_helpers.tpl b/charts/jindofsx/templates/_helpers.tpl new file mode 100755 index 00000000000..90f77474492 --- /dev/null +++ b/charts/jindofsx/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "jindofs.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "jindofs.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "jindofs.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/jindofsx/templates/config/jindofs-client-conf.yaml b/charts/jindofsx/templates/config/jindofs-client-conf.yaml new file mode 100755 index 00000000000..1823ef8e04e --- /dev/null +++ b/charts/jindofsx/templates/config/jindofs-client-conf.yaml @@ -0,0 +1,25 @@ +{{- $masterCount := int .Values.master.replicaCount }} +{{- $isSingleMaster := eq $masterCount 1 }} +{{- $isRaftHa := eq $masterCount 3 }} + +apiVersion: v1 +kind: ConfigMap +metadata: + annotations: + "helm.sh/hook": "pre-install" + "helm.sh/hook-delete-policy": before-hook-creation + name: {{ template "jindofs.fullname" . }}-client-config + labels: + name: {{ template "jindofs.fullname" . }}-client-config + app: {{ template "jindofs.name" . }}-client + chart: {{ template "jindofs.chart" . }}-client + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + {{- if $isSingleMaster }} + STORAGE_NAMESPACE_RPC_ADDRESS: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }} + CLIENT_NAMESPACE_RPC_ADDRESS: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }} + {{- else if $isRaftHa }} + STORAGE_NAMESPACE_RPC_ADDRESS: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }},{{ template "jindofs.fullname" . }}-master-1:{{ .Values.master.ports.rpc }},{{ template "jindofs.fullname" . }}-master-2:{{ .Values.master.ports.rpc }} + CLIENT_NAMESPACE_RPC_ADDRESS: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }},{{ template "jindofs.fullname" . }}-master-1:{{ .Values.master.ports.rpc }},{{ template "jindofs.fullname" . }}-master-2:{{ .Values.master.ports.rpc }} + {{- end }} diff --git a/charts/jindofsx/templates/config/jindofs-conf.yaml b/charts/jindofsx/templates/config/jindofs-conf.yaml new file mode 100755 index 00000000000..7b12dbc2a2c --- /dev/null +++ b/charts/jindofsx/templates/config/jindofs-conf.yaml @@ -0,0 +1,97 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + annotations: + "helm.sh/hook": "pre-install" + "helm.sh/hook-delete-policy": before-hook-creation + name: {{ template "jindofs.fullname" . }}-config + labels: + name: {{ template "jindofs.fullname" . }}-config + app: {{ template "jindofs.name" . }} + chart: {{ template "jindofs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + jindofsx.cfg: | + [jindofsx-common] + logger.jnilogger=false + application.report.on=true + metric.report.on = true + logger.dir = /tmp/jindofsx-log + logger.sync = true + logger.verbose= 99 + logger.cleanner.enable = true + logger.consolelogger = true + {{- range $key, $val := .Values.master.secretProperties }} + {{ $key }} = {{ $val }} + {{- end}} + {{- range $key, $val := .Values.master.fileStoreProperties }} + {{ $key }} = {{ $val }} + {{- end}} + + [jindofsx-storage] + {{- range $key, $val := .Values.worker.properties }} + {{ $key }} = {{ $val }} + {{- end}} + + [jindofsx-namespace] + {{- range $key, $val := .Values.master.properties }} + {{ $key }} = {{ $val }} + {{- end}} + + core-site.xml: | + + + + fs.oss.impl + com.aliyun.jindodata.oss.JindoOssFileSystem + + + + fs.AbstractFileSystem.oss.impl + com.aliyun.jindodata.oss.OSS + + + + fs.jindo.impl + com.aliyun.jindodata.jindo.JindoFileSystem + + + + fs.AbstractFileSystem.jindo.impl + com.aliyun.jindodata.jindo.JINDO + + + + fs.xengine + jindofsx + + + + fs.jindofsx.namespace.rpc.address + {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }} + + {{- range $key, $val := .Values.fuse.properties }} + + {{ $key }} + {{ $val }} + + {{- end}} + + jindosdk.cfg: | + [common] + logger.dir = /tmp/fuse-log + logger.sync = true + logger.consolelogger = true + logger.level = 2 + logger.verbose = 0 + logger.cleaner.enable = true + {{- range $key, $val := .Values.master.secretProperties }} + {{ $key }} = {{ $val }} + {{- end}} + + [jindosdk] + fs.jindofsx.namespace.rpc.address = {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }} + {{- range $key, $val := .Values.fuse.properties }} + {{ $key }} = {{ $val }} + {{- end}} diff --git a/charts/jindofsx/templates/fuse/daemonset.yaml b/charts/jindofsx/templates/fuse/daemonset.yaml new file mode 100755 index 00000000000..ae7c9556e59 --- /dev/null +++ b/charts/jindofsx/templates/fuse/daemonset.yaml @@ -0,0 +1,204 @@ +{{- $masterCount := int .Values.master.replicaCount }} +{{- $isSingleMaster := eq $masterCount 1 }} +{{- $isRaftHa := eq $masterCount 3 }} +{{- $notEnableDnsConfig := (eq .Values.master.dnsServer "1.1.1.1") }} + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "jindofs.fullname" . }}-fuse + labels: + app: {{ template "jindofs.name" . }} + chart: {{ template "jindofs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + role: jindofs-fuse + ownerReferences: + {{- if .Values.owner.enabled }} + - apiVersion: {{ .Values.owner.apiVersion }} + blockOwnerDeletion: {{ .Values.owner.blockOwnerDeletion }} + controller: {{ .Values.owner.controller }} + kind: {{ .Values.owner.kind }} + name: {{ .Values.owner.name }} + uid: {{ .Values.owner.uid }} + {{- end }} +spec: + selector: + matchLabels: + app: {{ template "jindofs.name" . }} + chart: {{ template "jindofs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + role: jindofs-fuse + template: + metadata: + labels: + app: {{ template "jindofs.name" . }} + chart: {{ template "jindofs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + role: jindofs-fuse +{{- if .Values.labels }} +{{ toYaml .Values.labels | trim | indent 8 }} +{{- end }} +{{- if .Values.fuse.labels }} +{{ toYaml .Values.fuse.labels | trim | indent 8 }} +{{- end }} + spec: + {{- if .Values.fuse.criticalPod }} + priorityClassName: system-node-critical + {{- end }} + hostNetwork: {{ .Values.useHostNetwork }} + hostPID: {{ .Values.useHostPID }} + nodeSelector: +{{- if .Values.fuse.nodeSelector }} +{{ toYaml .Values.fuse.nodeSelector | trim | indent 8 }} +{{- else if .Values.nodeSelector }} +{{ toYaml .Values.nodeSelector | trim | indent 8 }} +{{- end }} + securityContext: + runAsUser: {{ .Values.user }} + runAsGroup: {{ .Values.group }} + fsGroup: {{ .Values.fsGroup }} + {{- if .Values.fuse.tolerations }} + tolerations: +{{ toYaml .Values.fuse.tolerations | indent 8 }} + {{- end }} + containers: + - name: jindofs-fuse + image: {{ .Values.fuseImage }}:{{ .Values.fuseImageTag }} + lifecycle: + preStop: + exec: + command: ["/prestop.sh"] + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.fuse.resources }} + resources: + {{- if .Values.fuse.resources.limits }} + limits: + cpu: {{ .Values.fuse.resources.limits.cpu }} + memory: {{ .Values.fuse.resources.limits.memory }} + {{- end }} + {{- if .Values.fuse.resources.requests }} + requests: + cpu: {{ .Values.fuse.resources.requests.cpu }} + memory: {{ .Values.fuse.resources.requests.memory }} + {{- end }} + {{- end }} + command: ["/entrypoint.sh"] + {{- if .Values.fuse.args }} + args: +{{ toYaml .Values.fuse.args | indent 12 }} + {{- end }} + env: + {{- if $isSingleMaster }} + - name: CLIENT_NAMESPACE_RPC_ADDRESS + value: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }} + {{- else if $isRaftHa }} + - name: CLIENT_NAMESPACE_RPC_ADDRESS + value: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }},{{ template "jindofs.fullname" . }}-master-1:{{ .Values.master.ports.rpc }},{{ template "jindofs.fullname" . }}-master-2:{{ .Values.master.ports.rpc }} + {{- end }} + {{- if .Values.fuse.runAs }} + - name: RUN_AS_USER + value: {{ .Values.fuse.runAs }} + {{- end }} + - name: CLIENT_STORAGE_RPC_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: FLUID_FUSE_MOUNTPOINT + value: {{ .Values.fuse.mountPath }} + - name: FLUID_FUSE_MODE + value: {{ .Values.fuse.mode }} + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN + volumeMounts: + - name: jindofs-fuse-device + mountPath: /dev/fuse + - name: jindofs-fuse-mount + mountPath: /jfs + mountPropagation: Bidirectional + - mountPath: /etc/localtime + name: volume-localtime + - name: bigboot-config + mountPath: /jindofsx.cfg + subPath: jindofsx.cfg + - name: bigboot-config + mountPath: /hdfs-3.2.1/etc/hadoop/core-site.xml + subPath: core-site.xml + - name: bigboot-config + mountPath: /jindosdk.cfg + subPath: jindosdk.cfg + {{- range $name, $path := .Values.mounts.workersAndClients }} + - name: datavolume-{{ $name }} + mountPath: "{{ $path }}" + {{- end }} + {{- if .Values.hadoopConfig }} + {{- if or .Values.hadoopConfig.includeCoreSite .Values.hadoopConfig.includeHdfsSite }} + - name: hdfs-confs + mountPath: /hdfs-site.xml + subPath: hdfs-site.xml + {{- end }} + {{- end }} + {{- if .Values.secret }} + - name: jindofs-secret-token + mountPath: /token + readOnly: true + {{- end }} + restartPolicy: Always + {{- if $notEnableDnsConfig }} + dnsPolicy: ClusterFirstWithHostNet + {{- else }} + dnsConfig: + nameservers: + - {{ .Values.master.dnsServer }} + options: + - name: ndots + value: "5" + searches: + - {{ .Values.master.namespace }}.svc.cluster.local + - svc.cluster.local + - cluster.local + - tbsite.net + - aliyun.com + dnsPolicy: None + enableServiceLinks: true + {{- end }} + terminationGracePeriodSeconds: 60 + volumes: + - name: jindofs-fuse-device + hostPath: + path: /dev/fuse + type: CharDevice + {{- if .Values.secret }} + - name: jindofs-secret-token + secret: + secretName: {{ .Values.secret }} + {{- end }} + - hostPath: + path: /etc/localtime + type: '' + name: volume-localtime + - name: jindofs-fuse-mount + hostPath: + path: {{ .Values.fuse.hostPath }} + type: DirectoryOrCreate + {{- range $name, $path := .Values.mounts.workersAndClients }} + - hostPath: + path: "{{ $path }}" + type: DirectoryOrCreate + name: datavolume-{{ $name }} + {{- end }} + {{- if .Values.hadoopConfig }} + {{- if or .Values.hadoopConfig.includeCoreSite .Values.hadoopConfig.includeHdfsSite }} + - name: hdfs-confs + configMap: + name: {{ .Values.hadoopConfig.configMap }} + {{- end }} + {{- end }} + - name: bigboot-config + configMap: + name: {{ template "jindofs.fullname" . }}-config diff --git a/charts/jindofsx/templates/master/service.yaml b/charts/jindofsx/templates/master/service.yaml new file mode 100755 index 00000000000..43e032b6d6e --- /dev/null +++ b/charts/jindofsx/templates/master/service.yaml @@ -0,0 +1,41 @@ +{{- $dot := . }} +{{ $masterCount := int .Values.master.replicaCount }} +{{- $isSingleMaster := eq $masterCount 1 }} +{{- $isRaftHa := eq $masterCount 3 }} +{{- range $i := until $masterCount }} + {{- $masterName := printf "master-%v" $i }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "jindofs.fullname" $dot }}-{{ $masterName }} + labels: + app: {{ template "jindofs.name" $dot }} + chart: {{ template "jindofs.chart" $dot }} + release: {{ $dot.Release.Name }} + heritage: {{ $dot.Release.Service }} + role: jindofs-master + ownerReferences: + {{- if $dot.Values.owner.enabled }} + - apiVersion: {{ $dot.Values.owner.apiVersion }} + blockOwnerDeletion: {{ $dot.Values.owner.blockOwnerDeletion }} + controller: {{ $dot.Values.owner.controller }} + kind: {{ $dot.Values.owner.kind }} + name: {{ $dot.Values.owner.name }} + uid: {{ $dot.Values.owner.uid }} + {{- end }} +spec: + ports: + - port: {{ $dot.Values.master.ports.rpc }} + name: rpc + {{- if $isRaftHa }} + - port: {{ $dot.Values.master.ports.raft }} + name: raft + {{- end }} + clusterIP: None + selector: + role: jindofs-master + app: {{ template "jindofs.name" $dot }} + release: {{ $dot.Release.Name }} + statefulset.kubernetes.io/pod-name: {{ template "jindofs.fullname" $dot }}-{{ $masterName }} +--- +{{- end }} diff --git a/charts/jindofsx/templates/master/statefulset.yaml b/charts/jindofsx/templates/master/statefulset.yaml new file mode 100755 index 00000000000..196925d185a --- /dev/null +++ b/charts/jindofsx/templates/master/statefulset.yaml @@ -0,0 +1,202 @@ +{{- $masterCount := int .Values.master.replicaCount }} +{{- $isSingleMaster := eq $masterCount 1 }} +{{- $isRaftHa := eq $masterCount 3 }} +{{- $notEnableDnsConfig := (eq .Values.master.dnsServer "1.1.1.1") }} + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "jindofs.fullname" . }}-master + labels: + name: {{ template "jindofs.fullname" . }}-master + app: {{ template "jindofs.name" . }} + chart: {{ template "jindofs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + role: jindofs-master + ownerReferences: + {{- if .Values.owner.enabled }} + - apiVersion: {{ .Values.owner.apiVersion }} + blockOwnerDeletion: {{ .Values.owner.blockOwnerDeletion }} + controller: {{ .Values.owner.controller }} + kind: {{ .Values.owner.kind }} + name: {{ .Values.owner.name }} + uid: {{ .Values.owner.uid }} + {{- end }} +spec: + selector: + matchLabels: + app: {{ template "jindofs.name" . }} + role: jindofs-master + name: {{ template "jindofs.fullname" . }}-master + serviceName: "jindofs-master" + replicas: {{ $masterCount }} + template: + metadata: + labels: + name: {{ template "jindofs.fullname" . }}-master + app: {{ template "jindofs.name" . }} + chart: {{ template "jindofs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + role: jindofs-master +{{- if .Values.labels }} +{{ toYaml .Values.labels | trim | indent 8 }} +{{- end }} +{{- if .Values.master.labels }} +{{ toYaml .Values.master.labels | trim | indent 8 }} +{{- end }} + spec: + hostNetwork: {{ .Values.useHostNetwork }} + hostPID: {{ .Values.useHostPID }} + nodeSelector: +{{- if .Values.master.nodeSelector }} +{{ toYaml .Values.master.nodeSelector | trim | indent 8 }} +{{- else if .Values.nodeSelector }} +{{ toYaml .Values.nodeSelector | trim | indent 8 }} +{{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + securityContext: + runAsUser: {{ .Values.user }} + runAsGroup: {{ .Values.group }} + fsGroup: {{ .Values.fsGroup }} + initContainers: + {{ if .Values.initPortCheck.enabled -}} + - name: init-port-check + image: {{ .Values.initPortCheck.image }}:{{ .Values.initPortCheck.imageTag }} + imagePullPolicy: {{ .Values.initPortCheck.imagePullPolicy }} + securityContext: + privileged: true + command: ["/entrypoint.sh"] + args: + - "check_port" + env: + - name: PORTS_TO_CHECK + value: {{ .Values.initPortCheck.portsToCheck | quote }} + {{- end }} + containers: + - name: jindofs-master + image: {{ .Values.image }}:{{ .Values.imageTag }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.master.resources }} + resources: + limits: + {{- if .Values.master.resources }} + {{- if .Values.master.resources.limits }} + {{- if .Values.master.resources.limits.cpu }} + cpu: {{ .Values.master.resources.limits.cpu }} + memory: {{ .Values.master.resources.limits.memory }} + {{- end }} + {{- end }} + requests: + {{- if .Values.master.resources.requests }} + {{- if .Values.master.resources.requests.cpu }} + cpu: {{ .Values.master.resources.requests.cpu }} + memory: {{ .Values.master.resources.requests.memory }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + command: ["/entrypoint.sh"] + args: ["master"] + env: + {{- if $isSingleMaster }} + - name: CLIENT_NAMESPACE_RPC_ADDRESS + value: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }} + - name: NAMESPACE_BACKEND_RAFT_HOSTNAME + value: {{ template "jindofs.fullname" . }}-master-0 + {{- else if $isRaftHa }} + - name: CLIENT_NAMESPACE_RPC_ADDRESS + value: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }},{{ template "jindofs.fullname" . }}-master-1:{{ .Values.master.ports.rpc }},{{ template "jindofs.fullname" . }}-master-2:{{ .Values.master.ports.rpc }} + - name: NAMESPACE_BACKEND_RAFT_INITIAL_CONF + value: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.raft }}:0,{{ template "jindofs.fullname" . }}-master-1:{{ .Values.master.ports.raft }}:0,{{ template "jindofs.fullname" . }}-master-2:{{ .Values.master.ports.raft }}:0 + - name: NAMESPACE_BACKEND_RAFT_HOSTNAME + value: {{ template "jindofs.fullname" . }}-master-0,{{ template "jindofs.fullname" . }}-master-1,{{ template "jindofs.fullname" . }}-master-2 + {{- end }} + {{- if .Values.fuse.runAs }} + - name: RUN_AS_USER + value: {{ .Values.fuse.runAs }} + {{- end }} + ports: + - containerPort: {{ .Values.master.ports.rpc }} + name: rpc + {{- if $isRaftHa }} + - containerPort: {{ .Values.master.ports.raft }} + name: raft + {{- end }} + volumeMounts: + - name: bigboot-config + mountPath: /jindofsx.cfg + subPath: jindofsx.cfg + - name: bigboot-config + mountPath: /hdfs-3.2.1/etc/hadoop/core-site.xml + subPath: core-site.xml + - name: bigboot-config + mountPath: /jindosdk.cfg + subPath: jindosdk.cfg + {{- range $name, $path := .Values.mounts.master }} + - name: datavolume-{{ $name }} + mountPath: "{{ $path }}" + {{- end }} + {{- if .Values.hadoopConfig }} + {{- if or .Values.hadoopConfig.includeCoreSite .Values.hadoopConfig.includeHdfsSite }} + - name: hdfs-confs + mountPath: /hdfs-site.xml + subPath: hdfs-site.xml + {{- end }} + {{- end }} + {{- if .Values.secret }} + - name: jindofs-secret-token + mountPath: /token + readOnly: true + {{- end }} + - mountPath: /etc/localtime + name: volume-localtime + restartPolicy: Always + {{- if $notEnableDnsConfig }} + dnsPolicy: {{ .Values.useHostNetwork | ternary "ClusterFirstWithHostNet" "ClusterFirst" }} + {{- else }} + dnsConfig: + nameservers: + - {{ .Values.master.dnsServer }} + options: + - name: ndots + value: "5" + searches: + - {{ .Values.master.namespace }}.svc.cluster.local + - svc.cluster.local + - cluster.local + - tbsite.net + - aliyun.com + dnsPolicy: None + enableServiceLinks: true + {{- end }} + volumes: + - hostPath: + path: /etc/localtime + type: '' + name: volume-localtime + {{- range $name, $path := .Values.mounts.master }} + - hostPath: + path: "{{ $path }}" + type: DirectoryOrCreate + name: datavolume-{{ $name }} + {{- end }} + {{- if .Values.hadoopConfig }} + {{- if or .Values.hadoopConfig.includeCoreSite .Values.hadoopConfig.includeHdfsSite }} + - name: hdfs-confs + configMap: + name: {{ .Values.hadoopConfig.configMap }} + {{- end }} + {{- end }} + {{- if .Values.secret }} + - name: jindofs-secret-token + secret: + secretName: {{ .Values.secret }} + {{- end }} + - name: bigboot-config + configMap: + name: {{ template "jindofs.fullname" . }}-config diff --git a/charts/jindofsx/templates/worker/statefulset.yaml b/charts/jindofsx/templates/worker/statefulset.yaml new file mode 100755 index 00000000000..d1ed7fba0d3 --- /dev/null +++ b/charts/jindofsx/templates/worker/statefulset.yaml @@ -0,0 +1,189 @@ +{{- $masterCount := int .Values.master.replicaCount }} +{{- $isSingleMaster := eq $masterCount 1 }} +{{- $isRaftHa := eq $masterCount 3 }} +{{- $notEnableDnsConfig := (eq .Values.master.dnsServer "1.1.1.1") }} + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "jindofs.fullname" . }}-worker + labels: + app: {{ template "jindofs.name" . }} + chart: {{ template "jindofs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + role: jindofs-worker + fluid.io/dataset: {{ .Release.Namespace }}-{{ .Release.Name }} + fluid.io/dataset-placement: {{ .Values.placement }} + ownerReferences: + {{- if .Values.owner.enabled }} + - apiVersion: {{ .Values.owner.apiVersion }} + blockOwnerDeletion: {{ .Values.owner.blockOwnerDeletion }} + controller: {{ .Values.owner.controller }} + kind: {{ .Values.owner.kind }} + name: {{ .Values.owner.name }} + uid: {{ .Values.owner.uid }} + {{- end }} +spec: + selector: + matchLabels: + app: {{ template "jindofs.name" . }} + release: {{ .Release.Name }} + role: jindofs-worker + serviceName: {{ template "jindofs.fullname" . }}-worker + replicas: {{ .Values.worker.replicaCount }} + podManagementPolicy: {{ .Values.worker.podManagementPolicy }} + template: + metadata: + labels: + app: {{ template "jindofs.name" . }} + chart: {{ template "jindofs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + role: jindofs-worker + fluid.io/dataset: {{ .Release.Namespace }}-{{ .Release.Name }} + fluid.io/dataset-placement: {{ .Values.placement }} +{{- if .Values.labels }} +{{ toYaml .Values.labels | trim | indent 8 }} +{{- end }} +{{- if .Values.worker.labels }} +{{ toYaml .Values.worker.labels | trim | indent 8 }} +{{- end }} + spec: + hostNetwork: {{ .Values.useHostNetwork }} + hostPID: {{ .Values.useHostPID }} + securityContext: + runAsUser: {{ .Values.user }} + runAsGroup: {{ .Values.group }} + fsGroup: {{ .Values.fsGroup }} + nodeSelector: +{{- if .Values.worker.nodeSelector }} +{{ toYaml .Values.worker.nodeSelector | trim | indent 8 }} +{{- else if .Values.nodeSelector }} +{{ toYaml .Values.nodeSelector | trim | indent 8 }} +{{- end }} + {{- if .Values.worker.tolerations }} + tolerations: +{{ toYaml .Values.worker.tolerations | indent 8 }} + {{- end }} + containers: + - name: jindofs-worker + image: {{ .Values.image }}:{{ .Values.imageTag }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.worker.resources }} + resources: + limits: + {{- if .Values.worker.resources }} + {{- if .Values.worker.resources.limits }} + {{- if .Values.worker.resources.limits.cpu }} + cpu: {{ .Values.worker.resources.limits.cpu }} + memory: {{ .Values.worker.resources.limits.memory }} + {{- end }} + {{- end }} + requests: + {{- if .Values.worker.resources.requests }} + {{- if .Values.worker.resources.requests.cpu }} + cpu: {{ .Values.worker.resources.requests.cpu }} + memory: {{ .Values.worker.resources.requests.memory }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + command: ["/entrypoint.sh"] + args: ["worker"] + env: + {{- if $isSingleMaster }} + - name: STORAGE_NAMESPACE_RPC_ADDRESS + value: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }} + - name: CLIENT_NAMESPACE_RPC_ADDRESS + value: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }} + {{- else if $isRaftHa }} + - name: STORAGE_NAMESPACE_RPC_ADDRESS + value: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }},{{ template "jindofs.fullname" . }}-master-1:{{ .Values.master.ports.rpc }},{{ template "jindofs.fullname" . }}-master-2:{{ .Values.master.ports.rpc }} + - name: CLIENT_NAMESPACE_RPC_ADDRESS + value: {{ template "jindofs.fullname" . }}-master-0:{{ .Values.master.ports.rpc }},{{ template "jindofs.fullname" . }}-master-1:{{ .Values.master.ports.rpc }},{{ template "jindofs.fullname" . }}-master-2:{{ .Values.master.ports.rpc }} + {{- end }} + {{- if .Values.fuse.runAs }} + - name: RUN_AS_USER + value: {{ .Values.fuse.runAs }} + {{- end }} + - name: STORAGE_ADDRESS + valueFrom: + fieldRef: + fieldPath: status.hostIP + ports: + - containerPort: {{ .Values.worker.ports.rpc }} + name: rpc + volumeMounts: + - name: bigboot-config + mountPath: /jindofsx.cfg + subPath: jindofsx.cfg + - name: bigboot-config + mountPath: /hdfs-3.2.1/etc/hadoop/core-site.xml + subPath: core-site.xml + - name: bigboot-config + mountPath: /jindosdk.cfg + subPath: jindosdk.cfg + {{- range $name, $path := .Values.mounts.workersAndClients }} + - name: datavolume-{{ $name }} + mountPath: "{{ $path }}" + {{- end }} + {{- if .Values.hadoopConfig }} + {{- if or .Values.hadoopConfig.includeCoreSite .Values.hadoopConfig.includeHdfsSite }} + - name: hdfs-confs + mountPath: /hdfs-site.xml + subPath: hdfs-site.xml + {{- end }} + {{- end }} + {{- if .Values.secret }} + - name: jindofs-secret-token + mountPath: /token + readOnly: true + {{- end }} + - mountPath: /etc/localtime + name: volume-localtime + restartPolicy: Always + {{- if $notEnableDnsConfig }} + dnsPolicy: {{ .Values.useHostNetwork | ternary "ClusterFirstWithHostNet" "ClusterFirst" }} + {{- else }} + dnsConfig: + nameservers: + - {{ .Values.master.dnsServer }} + options: + - name: ndots + value: "5" + searches: + - {{ .Values.master.namespace }}.svc.cluster.local + - svc.cluster.local + - cluster.local + - tbsite.net + - aliyun.com + dnsPolicy: None + enableServiceLinks: true + {{- end }} + volumes: + - hostPath: + path: /etc/localtime + type: '' + name: volume-localtime + {{- range $name, $path := .Values.mounts.workersAndClients }} + - hostPath: + path: "{{ $path }}" + type: DirectoryOrCreate + name: datavolume-{{ $name }} + {{- end }} + {{- if .Values.hadoopConfig }} + {{- if or .Values.hadoopConfig.includeCoreSite .Values.hadoopConfig.includeHdfsSite }} + - name: hdfs-confs + configMap: + name: {{ .Values.hadoopConfig.configMap }} + {{- end }} + {{- end }} + {{- if .Values.secret }} + - name: jindofs-secret-token + secret: + secretName: {{ .Values.secret }} + {{- end }} + - name: bigboot-config + configMap: + name: {{ template "jindofs.fullname" . }}-config diff --git a/charts/jindofsx/values.yaml b/charts/jindofsx/values.yaml new file mode 100644 index 00000000000..986e8d5f7c4 --- /dev/null +++ b/charts/jindofsx/values.yaml @@ -0,0 +1,69 @@ +# Default values for JindoFS. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + + +image: registry-vpc.__ACK_REGION_ID__.aliyuncs.com/jindo/smartdata +imageTag: "4.3.0" +imagePullPolicy: Always + +initPortCheck: + enabled: false + image: registry.cn-hangzhou.aliyuncs.com/fluid/init-users + imageTag: v0.3.0-1467caa + imagePullPolicy: IfNotPresent + portsToCheck: "" + +fuseImage: registry-vpc.__ACK_REGION_ID__.aliyuncs.com/jindo/jindo-fuse +fuseImageTag: "4.3.0" + +user: 0 +group: 0 +fsGroup: 0 + +useHostNetwork: true +useHostPID: true + +labels: {} + +properties: + logDir: /mnt/disk1/bigboot/log + +master: + ports: + rpc: 8101 + dnsServer: "" + labels: {} + replicaCount: 1 + properties: + namespace.rpc.port: 8101 + namespace.filelet.cache.size: 100000 + namespace.blocklet.cache.size: 1000000 + namespace.backend.type: rocksdb + +worker: + ports: + rpc: 6101 + labels: {} + nodeSelector: {} + replicaCount: 0 + podManagementPolicy: Parallel + properties: + storage.rpc.port: 6101 + +fuse: + criticalPod: false + labels: {} + properties: + fs.jindofsx.request.user: root, + +mounts: + master: + # 1: /mnt/disk1 + workersAndClients: + # 1: /mnt/disk1 + # 2: /mnt/disk2 + # 3: /mnt/disk3 + +owner: + enabled: false diff --git a/cmd/jindo/app/jindo.go b/cmd/jindo/app/jindo.go index d38ac767354..e2d47536d04 100644 --- a/cmd/jindo/app/jindo.go +++ b/cmd/jindo/app/jindo.go @@ -17,6 +17,7 @@ limitations under the License. package app import ( + "github.com/fluid-cloudnative/fluid/pkg/ddc/jindofsx" "os" "github.com/fluid-cloudnative/fluid" @@ -42,6 +43,7 @@ var ( setupLog = ctrl.Log.WithName("setup") // Use compiler to check if the struct implements all the interface _ base.Implement = (*jindo.JindoEngine)(nil) + _ base.Implement = (*jindofsx.JindoFSxEngine)(nil) metricsAddr string enableLeaderElection bool diff --git a/pkg/common/jindo.go b/pkg/common/jindo.go index 1af413a2c6f..6e578809b81 100644 --- a/pkg/common/jindo.go +++ b/pkg/common/jindo.go @@ -28,8 +28,6 @@ const ( JINDO_FUSE_IMAGE_ENV = "JINDO_FUSE_IMAGE_ENV" - DEFAULT_JINDO_RUNTIME_IMAGE = "registry.cn-shanghai.aliyuncs.com/jindofs/smartdata:3.8.0" - JINDO_DNS_SERVER = "JINDO_DNS_SERVER_ENV" JindoFuseMountVolumeName = "jindofs-fuse-mount" diff --git a/pkg/controllers/v1alpha1/jindo/implement.go b/pkg/controllers/v1alpha1/jindo/implement.go index 82489e74a5b..d403d9aa173 100644 --- a/pkg/controllers/v1alpha1/jindo/implement.go +++ b/pkg/controllers/v1alpha1/jindo/implement.go @@ -18,13 +18,14 @@ package jindo import ( datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime" + "os" "github.com/fluid-cloudnative/fluid/pkg/ddc" "github.com/fluid-cloudnative/fluid/pkg/ddc/base" ) const ( - runtimeType = "jindo" + engineType = "JINDO_ENGINE_TYPE" runtimeResourceFinalizerName = "jindo-runtime-controller-finalizer" ) @@ -66,3 +67,12 @@ func (r *RuntimeReconciler) RemoveEngine(ctx cruntime.ReconcileRequestContext) { id := ddc.GenerateEngineID(ctx.NamespacedName) delete(r.engines, id) } + +func (r *RuntimeReconciler) GetRuntimeType() string { + engine := "jindo" + if env := os.Getenv(engineType); env == "jindofsx" { + engine = env + } + r.Log.V(1).Info("Put Engine to engine map with engine type " + engine) + return engine +} diff --git a/pkg/controllers/v1alpha1/jindo/jindoruntime_controller.go b/pkg/controllers/v1alpha1/jindo/jindoruntime_controller.go index fc1b0c08f36..76cb3380a40 100644 --- a/pkg/controllers/v1alpha1/jindo/jindoruntime_controller.go +++ b/pkg/controllers/v1alpha1/jindo/jindoruntime_controller.go @@ -78,7 +78,7 @@ func (r *RuntimeReconciler) Reconcile(context context.Context, req ctrl.Request) NamespacedName: req.NamespacedName, Recorder: r.Recorder, Category: common.AccelerateCategory, - RuntimeType: runtimeType, + RuntimeType: r.GetRuntimeType(), Client: r.Client, FinalizerName: runtimeResourceFinalizerName, } diff --git a/pkg/ddc/factory.go b/pkg/ddc/factory.go index d60c8458324..815633e19e3 100644 --- a/pkg/ddc/factory.go +++ b/pkg/ddc/factory.go @@ -17,6 +17,7 @@ import ( "github.com/fluid-cloudnative/fluid/pkg/ddc/base" "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs" "github.com/fluid-cloudnative/fluid/pkg/ddc/jindo" + "github.com/fluid-cloudnative/fluid/pkg/ddc/jindofsx" "github.com/fluid-cloudnative/fluid/pkg/ddc/juicefs" cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -30,10 +31,11 @@ var buildFuncMap map[string]buildFunc func init() { buildFuncMap = map[string]buildFunc{ - "alluxio": alluxio.Build, - "jindo": jindo.Build, - "goosefs": goosefs.Build, - "juicefs": juicefs.Build, + "alluxio": alluxio.Build, + "jindo": jindo.Build, + "jindofsx": jindofsx.Build, + "goosefs": goosefs.Build, + "juicefs": juicefs.Build, } } diff --git a/pkg/ddc/jindo/const.go b/pkg/ddc/jindo/const.go index 6ea34c7e5f7..6681c8488ed 100644 --- a/pkg/ddc/jindo/const.go +++ b/pkg/ddc/jindo/const.go @@ -49,4 +49,6 @@ const ( runtimeFSType = "jindofs" JINDO_FUSE_MONNTPATH = "/jfs/jindofs-fuse" + + DEFAULT_JINDO_RUNTIME_IMAGE = "registry.cn-shanghai.aliyuncs.com/jindofs/smartdata:4.3.0" ) diff --git a/pkg/ddc/jindo/load_data.go b/pkg/ddc/jindo/load_data.go index fb131c9b2de..7705382e9de 100644 --- a/pkg/ddc/jindo/load_data.go +++ b/pkg/ddc/jindo/load_data.go @@ -79,7 +79,7 @@ func (e *JindoEngine) generateDataLoadValueFile(r cruntime.ReconcileRequestConte imageName, imageTag := docker.GetWorkerImage(r.Client, dataload.Spec.Dataset.Name, "jindo", dataload.Spec.Dataset.Namespace) if len(imageName) == 0 { - defaultImageInfo := strings.Split(common.DEFAULT_JINDO_RUNTIME_IMAGE, ":") + defaultImageInfo := strings.Split(DEFAULT_JINDO_RUNTIME_IMAGE, ":") if len(defaultImageInfo) < 1 { panic("invalid default dataload image!") } else { @@ -88,7 +88,7 @@ func (e *JindoEngine) generateDataLoadValueFile(r cruntime.ReconcileRequestConte } if len(imageTag) == 0 { - defaultImageInfo := strings.Split(common.DEFAULT_JINDO_RUNTIME_IMAGE, ":") + defaultImageInfo := strings.Split(DEFAULT_JINDO_RUNTIME_IMAGE, ":") if len(defaultImageInfo) < 2 { panic("invalid default dataload image!") } else { diff --git a/pkg/ddc/jindo/operations/base_test.go b/pkg/ddc/jindo/operations/base_test.go index 298975fa27b..b3aecc1af49 100644 --- a/pkg/ddc/jindo/operations/base_test.go +++ b/pkg/ddc/jindo/operations/base_test.go @@ -19,7 +19,6 @@ import ( "github.com/brahma-adshonor/gohook" "github.com/fluid-cloudnative/fluid/pkg/utils/fake" - "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" ) func TestNewJindoFileUtils(t *testing.T) { @@ -73,78 +72,6 @@ func TestJindoFileUtils_exec(t *testing.T) { wrappedUnhookExec() } -func TestJindoFileUtils_execWithoutTimeout(t *testing.T) { - mockExecCommon := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) { - return "conf", "", nil - } - mockExecErr := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) { - return "err", "", errors.New("other error") - } - wrappedUnhook := func() { - err := gohook.UnHook(kubeclient.ExecCommandInContainer) - if err != nil { - t.Fatal(err.Error()) - } - } - - err := gohook.Hook(kubeclient.ExecCommandInContainer, mockExecErr, nil) - if err != nil { - t.Fatal(err.Error()) - } - a := &JindoFileUtils{log: fake.NullLogger()} - _, _, err = a.execWithoutTimeout([]string{"/sdk/bin/jindo", "jfs", "-report"}, false) - if err == nil { - t.Error("check failure, want err, got nil") - } - wrappedUnhook() - - err = gohook.Hook(kubeclient.ExecCommandInContainer, mockExecCommon, nil) - if err != nil { - t.Fatal(err.Error()) - } - _, _, err = a.execWithoutTimeout([]string{"/sdk/bin/jindo", "jfs", "-report"}, true) - if err != nil { - t.Errorf("check failure, want nil, got err: %v", err) - } - wrappedUnhook() -} - -func TestJindoFileUtils_ReportSummary(t *testing.T) { - ExecCommon := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { - return "Test stdout", "", nil - } - ExecErr := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { - return "", "", errors.New("fail to run the command") - } - wrappedUnhookExec := func() { - err := gohook.UnHook(JindoFileUtils.exec) - if err != nil { - t.Fatal(err.Error()) - } - } - - err := gohook.Hook(JindoFileUtils.exec, ExecErr, nil) - if err != nil { - t.Fatal(err.Error()) - } - a := JindoFileUtils{} - _, err = a.ReportSummary() - if err == nil { - t.Error("check failure, want err, got nil") - } - wrappedUnhookExec() - - err = gohook.Hook(JindoFileUtils.exec, ExecCommon, nil) - if err != nil { - t.Fatal(err.Error()) - } - _, err = a.ReportSummary() - if err != nil { - t.Errorf("check failure, want nil, got err: %v", err) - } - wrappedUnhookExec() -} - func TestJindoFileUtils_GetUfsTotalSize(t *testing.T) { ExecWithoutTimeoutCommon := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { return "2 1 108 testUrl", "", nil diff --git a/pkg/ddc/jindofsx/cache.go b/pkg/ddc/jindofsx/cache.go new file mode 100644 index 00000000000..fce1c9714f3 --- /dev/null +++ b/pkg/ddc/jindofsx/cache.go @@ -0,0 +1,113 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "fmt" + "strings" + "time" + + "github.com/fluid-cloudnative/fluid/pkg/ddc/jindofsx/operations" + "github.com/fluid-cloudnative/fluid/pkg/utils" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" +) + +// queryCacheStatus checks the cache status +func (e *JindoFSxEngine) queryCacheStatus() (states cacheStates, err error) { + defer utils.TimeTrack(time.Now(), "JindoFSxEngine.queryCacheStatus", "name", e.name, "namespace", e.namespace) + summary, err := e.GetReportSummary() + if err != nil { + e.Log.Error(err, "Failed to get Jindo summary when query cache status") + return states, err + } + totalCapacityLabel := "" + usedCapacityLabel := "" + if len(e.runtime.Spec.TieredStore.Levels) > 0 && e.runtime.Spec.TieredStore.Levels[0].MediumType == "MEM" { + totalCapacityLabel = SUMMARY_PREFIX_TOTAL_MEM_CAPACITY + usedCapacityLabel = SUMMARY_PREFIX_USED_MEM_CAPACITY + } else { + totalCapacityLabel = SUMMARY_PREFIX_TOTAL_CAPACITY + usedCapacityLabel = SUMMARY_PREFIX_USED_CAPACITY + } + strs := strings.Split(summary, "\n") + for _, str := range strs { + str = strings.TrimSpace(str) + if strings.HasPrefix(str, totalCapacityLabel) { + totalCacheCapacityJindo, _ := utils.FromHumanSize(strings.TrimPrefix(str, totalCapacityLabel)) + // Convert JindoFS's binary byte units to Fluid's binary byte units + // e.g. 10KB -> 10KiB, 2GB -> 2GiB + states.cacheCapacity = utils.BytesSize(float64(totalCacheCapacityJindo)) + } + if strings.HasPrefix(str, usedCapacityLabel) { + usedCacheCapacityJindo, _ := utils.FromHumanSize(strings.TrimPrefix(str, usedCapacityLabel)) + // Convert JindoFS's binary byte units to Fluid's binary byte units + // e.g. 10KB -> 10KiB, 2GB -> 2GiB + states.cached = utils.BytesSize(float64(usedCacheCapacityJindo)) + } + } + + dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) + if err != nil { + e.Log.Info("Failed to get dataset when query cache status") + return states, err + } + + // `dataset.Status.UfsTotal` probably haven't summed, in which case we won't compute cache percentage + if dataset.Status.UfsTotal != "" && dataset.Status.UfsTotal != METADATA_SYNC_NOT_DONE_MSG { + usedInBytes, _ := utils.FromHumanSize(states.cached) + ufsTotalInBytes, _ := utils.FromHumanSize(dataset.Status.UfsTotal) + // jindofs calculate cached storage bytesize with block sum, so percentage will be over 100% if totally cached + percentTage := 0.0 + if ufsTotalInBytes != 0 { + percentTage = float64(usedInBytes) / float64(ufsTotalInBytes) + } + // avoid jindo blocksize greater than ufssize + if percentTage > 1 { + percentTage = 1.0 + } + states.cachedPercentage = fmt.Sprintf("%.1f%%", percentTage*100.0) + } + + return states, nil +} + +// clean cache +func (e *JindoFSxEngine) invokeCleanCache() (err error) { + // 1. Check if master is ready, if not, just return + masterName := e.getMasterName() + master, err := kubeclient.GetStatefulSet(e.Client, masterName, e.namespace) + if err != nil { + if utils.IgnoreNotFound(err) == nil { + e.Log.Info("Failed to get master", "err", err.Error()) + return nil + } + // other error + return err + } + if master.Status.ReadyReplicas == 0 { + e.Log.Info("The master is not ready, just skip clean cache.", "master", masterName) + return nil + } else { + e.Log.Info("The master is ready, so start cleaning cache", "master", masterName) + } + + // 2. run clean action + podName, containerName := e.getMasterPodInfo() + fileUitls := operations.NewJindoFileUtils(podName, containerName, e.namespace, e.Log) + e.Log.Info("cleaning cache and wait for a while") + return fileUitls.CleanCache() +} diff --git a/pkg/ddc/jindofsx/cache_test.go b/pkg/ddc/jindofsx/cache_test.go new file mode 100644 index 00000000000..6eb07f18999 --- /dev/null +++ b/pkg/ddc/jindofsx/cache_test.go @@ -0,0 +1,230 @@ +package jindofsx + +import ( + "github.com/fluid-cloudnative/fluid/pkg/common" + "reflect" + "testing" + + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + . "github.com/agiledragon/gomonkey" + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/utils" + . "github.com/smartystreets/goconvey/convey" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestQueryCacheStatus(t *testing.T) { + Convey("test queryCacheStatus ", t, func() { + Convey("with dataset UFSTotal is not empty ", func() { + var engine *JindoFSxEngine + patch1 := ApplyMethod(reflect.TypeOf(engine), "GetReportSummary", + func(_ *JindoFSxEngine) (string, error) { + summary := mockJindoReportSummary() + return summary, nil + }) + defer patch1.Reset() + + patch2 := ApplyFunc(utils.GetDataset, + func(_ client.Client, _ string, _ string) (*datav1alpha1.Dataset, error) { + d := &datav1alpha1.Dataset{ + Status: datav1alpha1.DatasetStatus{ + UfsTotal: "52.18MiB", + }, + } + return d, nil + }) + defer patch2.Reset() + + e := &JindoFSxEngine{ + runtime: &datav1alpha1.JindoRuntime{Spec: datav1alpha1.JindoRuntimeSpec{ + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{ + { + Path: "/mnt/jindo0", + MediumType: common.HDD, + }, + }, + }}, + }, + } + got, err := e.queryCacheStatus() + want := cacheStates{ + cacheCapacity: "250.38GiB", + cached: "11.72GiB", + cachedPercentage: "100.0%", + } + + So(got, ShouldResemble, want) + So(err, ShouldEqual, nil) + }) + + Convey("with dataset UFSTotal is: [Calculating]", func() { + var engine *JindoFSxEngine + patch1 := ApplyMethod(reflect.TypeOf(engine), "GetReportSummary", + func(_ *JindoFSxEngine) (string, error) { + summary := mockJindoReportSummary() + return summary, nil + }) + defer patch1.Reset() + + patch2 := ApplyFunc(utils.GetDataset, + func(_ client.Client, _ string, _ string) (*datav1alpha1.Dataset, error) { + d := &datav1alpha1.Dataset{ + Status: datav1alpha1.DatasetStatus{ + UfsTotal: "[Calculating]", + }, + } + return d, nil + }) + defer patch2.Reset() + + e := &JindoFSxEngine{ + runtime: &datav1alpha1.JindoRuntime{Spec: datav1alpha1.JindoRuntimeSpec{ + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{ + { + Path: "/mnt/jindo0", + MediumType: common.HDD, + }, + }, + }}, + }, + } + got, err := e.queryCacheStatus() + want := cacheStates{ + cacheCapacity: "250.38GiB", + cached: "11.72GiB", + } + + So(got, ShouldResemble, want) + So(err, ShouldEqual, nil) + }) + + Convey("with dataset UFSTotal is empty", func() { + var engine *JindoFSxEngine + patch1 := ApplyMethod(reflect.TypeOf(engine), "GetReportSummary", + func(_ *JindoFSxEngine) (string, error) { + summary := mockJindoReportSummary() + return summary, nil + }) + defer patch1.Reset() + + patch2 := ApplyFunc(utils.GetDataset, + func(_ client.Client, _ string, _ string) (*datav1alpha1.Dataset, error) { + d := &datav1alpha1.Dataset{ + Status: datav1alpha1.DatasetStatus{ + UfsTotal: "", + }, + } + return d, nil + }) + defer patch2.Reset() + + e := &JindoFSxEngine{ + runtime: &datav1alpha1.JindoRuntime{Spec: datav1alpha1.JindoRuntimeSpec{ + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{ + { + Path: "/mnt/jindo0", + MediumType: common.HDD, + }, + }, + }}, + }, + } + got, err := e.queryCacheStatus() + want := cacheStates{ + cacheCapacity: "250.38GiB", + cached: "11.72GiB", + } + + So(got, ShouldResemble, want) + So(err, ShouldEqual, nil) + }) + }) +} + +func TestInvokeCleanCache(t *testing.T) { + masterInputs := []*appsv1.StatefulSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop-jindofs-master", + Namespace: "fluid", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 0, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-jindofs-master", + Namespace: "fluid", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + }, + }, + } + objs := []runtime.Object{} + for _, masterInput := range masterInputs { + objs = append(objs, masterInput.DeepCopy()) + } + fakeClient := fake.NewFakeClientWithScheme(testScheme, objs...) + testCases := []struct { + name string + namespace string + isErr bool + }{ + { + name: "hadoop", + namespace: "fluid", + isErr: false, + }, + { + name: "hbase", + namespace: "fluid", + isErr: true, + }, + { + name: "none", + namespace: "fluid", + isErr: false, + }, + } + for _, testCase := range testCases { + engine := &JindoFSxEngine{ + Client: fakeClient, + namespace: testCase.namespace, + name: testCase.name, + Log: fake.NullLogger(), + } + err := engine.invokeCleanCache() + isErr := err != nil + if isErr != testCase.isErr { + t.Errorf("test-name:%s want %t, got %t", testCase.name, testCase.isErr, isErr) + } + } +} + +// +// $ jindo fs -report +// +func mockJindoReportSummary() string { + s := `Namespace Address: localhost:18000 + Rpc Port: 8101 + Started: Mon Jul 19 07:41:39 2021 + Version: 3.6.1 + Live Nodes: 2 + Decommission Nodes: 0 + Mode: BLOCK + Total Disk Capacity: 250.38GB + Used Disk Capacity: 11.72GB + Total MEM Capacity: 250.38GB + Used MEM Capacity: 11.72GB + ` + return s +} diff --git a/pkg/ddc/jindofsx/const.go b/pkg/ddc/jindofsx/const.go new file mode 100644 index 00000000000..a8a2feb4080 --- /dev/null +++ b/pkg/ddc/jindofsx/const.go @@ -0,0 +1,58 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +const ( + CSI_DRIVER = "fuse.csi.fluid.io" + + //fluid_PATH = "fluid_path" + + Mount_TYPE = "mount_type" + + SUMMARY_PREFIX_TOTAL_CAPACITY = "Total Disk Capacity: " + + SUMMARY_PREFIX_USED_CAPACITY = "Used Disk Capacity: " + + SUMMARY_PREFIX_TOTAL_MEM_CAPACITY = "Total MEM Capacity: " + + SUMMARY_PREFIX_USED_MEM_CAPACITY = "Used MEM Capacity: " + + METADATA_SYNC_NOT_DONE_MSG = "[Calculating]" + + CHECK_METADATA_SYNC_DONE_TIMEOUT_MILLISEC = 500 + + HADOOP_CONF_HDFS_SITE_FILENAME = "hdfs-site.xml" + + HADOOP_CONF_CORE_SITE_FILENAME = "core-site.xml" + + JINDO_MASTERNUM_DEFAULT = 1 + JINDO_HA_MASTERNUM = 3 + + DEFAULT_MASTER_RPC_PORT = 8101 + DEFAULT_WORKER_RPC_PORT = 6101 + DEFAULT_RAFT_RPC_PORT = 8103 + + POD_ROLE_TYPE = "role" + + WOKRER_POD_ROLE = "jindo-worker" + + runtimeFSType = "jindofs" + + JINDO_FUSE_MONNTPATH = "/jfs/jindofs-fuse" + + DEFAULT_JINDOFSX_RUNTIME_IMAGE = "registry.cn-shanghai.aliyuncs.com/jindofs/smartdata:4.3.0" +) diff --git a/pkg/ddc/jindofsx/create_volume.go b/pkg/ddc/jindofsx/create_volume.go new file mode 100644 index 00000000000..e69d66f71d6 --- /dev/null +++ b/pkg/ddc/jindofsx/create_volume.go @@ -0,0 +1,71 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "github.com/fluid-cloudnative/fluid/pkg/common" + volumeHelper "github.com/fluid-cloudnative/fluid/pkg/utils/dataset/volume" +) + +// CreateVolume creates volume +func (e *JindoFSxEngine) CreateVolume() (err error) { + if e.runtime == nil { + e.runtime, err = e.getRuntime() + if err != nil { + return + } + } + + err = e.createFusePersistentVolume() + if err != nil { + return err + } + + err = e.createFusePersistentVolumeClaim() + if err != nil { + return err + } + + return nil + +} + +// createFusePersistentVolume +func (e *JindoFSxEngine) createFusePersistentVolume() (err error) { + + runtimeInfo, err := e.getRuntimeInfo() + if err != nil { + return err + } + + return volumeHelper.CreatePersistentVolumeForRuntime(e.Client, + runtimeInfo, + e.getMountPoint(), + common.JindoRuntime, + e.Log) +} + +// createFusePersistentVolume +func (e *JindoFSxEngine) createFusePersistentVolumeClaim() (err error) { + + runtimeInfo, err := e.getRuntimeInfo() + if err != nil { + return err + } + + return volumeHelper.CreatePersistentVolumeClaimForRuntime(e.Client, runtimeInfo, e.Log) +} diff --git a/pkg/ddc/jindofsx/create_volume_test.go b/pkg/ddc/jindofsx/create_volume_test.go new file mode 100644 index 00000000000..6b3aabc012b --- /dev/null +++ b/pkg/ddc/jindofsx/create_volume_test.go @@ -0,0 +1,186 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestCreateVolume(t *testing.T) { + runtimeInfo, err := base.BuildRuntimeInfo("hbase", "fluid", "jindo", datav1alpha1.TieredStore{}) + if err != nil { + t.Errorf("fail to create the runtimeInfo with error %v", err) + } + runtimeInfo.SetupFuseDeployMode(false, nil) + + testDatasetInputs := []*datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.DatasetSpec{}, + }, + } + + testObjs := []runtime.Object{} + for _, datasetInput := range testDatasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engine := &JindoFSxEngine{ + Client: client, + Log: fake.NullLogger(), + namespace: "fluid", + name: "hbase", + runtimeInfo: runtimeInfo, + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + }, + } + + err = engine.CreateVolume() + if err != nil { + t.Errorf("fail to exec CreateVolume with error %v", err) + } + + var pvs v1.PersistentVolumeList + err = client.List(context.TODO(), &pvs) + if err != nil { + t.Errorf("fail to exec the function with error %v", err) + return + } + if len(pvs.Items) != 1 { + t.Errorf("fail to create the pv") + } + + var pvcs v1.PersistentVolumeClaimList + err = client.List(context.TODO(), &pvcs) + if err != nil { + t.Errorf("fail to exec the function with error %v", err) + return + } + if len(pvcs.Items) != 1 { + t.Errorf("fail to create the pvc") + } +} + +func TestCreateFusePersistentVolume(t *testing.T) { + runtimeInfo, err := base.BuildRuntimeInfo("hbase", "fluid", "jindo", datav1alpha1.TieredStore{}) + if err != nil { + t.Errorf("fail to create the runtimeInfo with error %v", err) + } + runtimeInfo.SetupFuseDeployMode(false, nil) + + testDatasetInputs := []*datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.DatasetSpec{}, + }, + } + + testObjs := []runtime.Object{} + for _, datasetInput := range testDatasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engine := &JindoFSxEngine{ + Client: client, + Log: fake.NullLogger(), + namespace: "fluid", + name: "hbase", + runtimeInfo: runtimeInfo, + } + + err = engine.createFusePersistentVolume() + if err != nil { + t.Errorf("fail to exec createFusePersistentVolume with error %v", err) + } + + var pvs v1.PersistentVolumeList + err = client.List(context.TODO(), &pvs) + if err != nil { + t.Errorf("fail to exec the function with error %v", err) + return + } + if len(pvs.Items) != 1 { + t.Errorf("fail to create the pv") + } +} + +func TestCreateFusePersistentVolumeClaim(t *testing.T) { + runtimeInfo, err := base.BuildRuntimeInfo("hbase", "fluid", "jindo", datav1alpha1.TieredStore{}) + if err != nil { + t.Errorf("fail to create the runtimeInfo with error %v", err) + } + runtimeInfo.SetupFuseDeployMode(false, nil) + + testDatasetInputs := []*datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.DatasetSpec{}, + }, + } + + testObjs := []runtime.Object{} + for _, datasetInput := range testDatasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engine := &JindoFSxEngine{ + Client: client, + Log: fake.NullLogger(), + namespace: "fluid", + name: "hbase", + runtimeInfo: runtimeInfo, + } + + err = engine.createFusePersistentVolumeClaim() + if err != nil { + t.Errorf("fail to exec createFusePersistentVolumeClaim with error %v", err) + } + + var pvcs v1.PersistentVolumeClaimList + err = client.List(context.TODO(), &pvcs) + if err != nil { + t.Errorf("fail to exec the function with error %v", err) + return + } + if len(pvcs.Items) != 1 { + t.Errorf("fail to create the pvc") + } +} diff --git a/pkg/ddc/jindofsx/dataset.go b/pkg/ddc/jindofsx/dataset.go new file mode 100644 index 00000000000..2bd64220d8f --- /dev/null +++ b/pkg/ddc/jindofsx/dataset.go @@ -0,0 +1,144 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "reflect" + "time" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/common" + "github.com/fluid-cloudnative/fluid/pkg/utils" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/util/retry" +) + +func (e *JindoFSxEngine) UpdateDatasetStatus(phase datav1alpha1.DatasetPhase) (err error) { + // 1. update the runtime status + runtime, err := e.getRuntime() + if err != nil { + return err + } + + // 2.update the dataset status + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) + if err != nil { + return err + } + datasetToUpdate := dataset.DeepCopy() + var cond datav1alpha1.DatasetCondition + + if phase != dataset.Status.Phase { + switch phase { + case datav1alpha1.BoundDatasetPhase: + cond = utils.NewDatasetCondition(datav1alpha1.DatasetReady, datav1alpha1.DatasetReadyReason, + "The ddc runtime is ready.", + corev1.ConditionTrue) + case datav1alpha1.FailedDatasetPhase: + cond = utils.NewDatasetCondition(datav1alpha1.DatasetReady, datav1alpha1.DatasetReadyReason, + "The ddc runtime is not ready.", + corev1.ConditionFalse) + default: + cond = utils.NewDatasetCondition(datav1alpha1.DatasetReady, datav1alpha1.DatasetReadyReason, + "The ddc runtime is unknown.", + corev1.ConditionFalse) + } + datasetToUpdate.Status.Phase = phase + datasetToUpdate.Status.Conditions = utils.UpdateDatasetCondition(datasetToUpdate.Status.Conditions, + cond) + } + datasetToUpdate.Status.CacheStates = runtime.Status.CacheStates + + e.Log.Info("the dataset status", "status", datasetToUpdate.Status) + + if !reflect.DeepEqual(dataset.Status, datasetToUpdate.Status) { + err = e.Client.Status().Update(context.TODO(), datasetToUpdate) + if err != nil { + e.Log.Error(err, "Update dataset") + return err + } + } + + return nil + }) + + if err != nil { + e.Log.Error(err, "Update dataset") + return err + } + + return +} + +func (e *JindoFSxEngine) UpdateCacheOfDataset() (err error) { + defer utils.TimeTrack(time.Now(), "JindoFSxEngine.UpdateCacheOfDataset", "name", e.name, "namespace", e.namespace) + // 1. update the runtime status + runtime, err := e.getRuntime() + if err != nil { + return err + } + + // 2.update the dataset status + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) + if err != nil { + return err + } + datasetToUpdate := dataset.DeepCopy() + + datasetToUpdate.Status.CacheStates = runtime.Status.CacheStates + // datasetToUpdate.Status.CacheStates = + + if len(datasetToUpdate.Status.Runtimes) == 0 { + datasetToUpdate.Status.Runtimes = []datav1alpha1.Runtime{} + } + + datasetToUpdate.Status.Runtimes = utils.AddRuntimesIfNotExist(datasetToUpdate.Status.Runtimes, utils.NewRuntime(e.name, + e.namespace, + common.AccelerateCategory, + common.JindoRuntime, + e.runtime.Spec.Master.Replicas)) + + e.Log.Info("the dataset status", "status", datasetToUpdate.Status) + + if !reflect.DeepEqual(dataset.Status, datasetToUpdate.Status) { + err = e.Client.Status().Update(context.TODO(), datasetToUpdate) + if err != nil { + e.Log.Error(err, "Update dataset") + return err + } + } else { + e.Log.Info("No need to update the cache of the data") + } + + return nil + }) + + if err != nil { + e.Log.Error(err, "Update dataset") + return err + } + + return +} + +func (e *JindoFSxEngine) BindToDataset() (err error) { + return e.UpdateDatasetStatus(datav1alpha1.BoundDatasetPhase) +} diff --git a/pkg/ddc/jindofsx/dataset_test.go b/pkg/ddc/jindofsx/dataset_test.go new file mode 100644 index 00000000000..028a64587e1 --- /dev/null +++ b/pkg/ddc/jindofsx/dataset_test.go @@ -0,0 +1,335 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "reflect" + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/common" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestUpdateCacheOfDataset(t *testing.T) { + testDatasetInputs := []*datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.DatasetSpec{}, + }, + } + testObjs := []runtime.Object{} + for _, datasetInput := range testDatasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + + testRuntimeInputs := []*datav1alpha1.JindoRuntime{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Master: datav1alpha1.JindoCompTemplateSpec{ + Replicas: 1, + }, + }, + Status: datav1alpha1.RuntimeStatus{ + CacheStates: map[common.CacheStateName]string{ + common.Cached: "true", + }, + }, + }, + } + for _, runtimeInput := range testRuntimeInputs { + testObjs = append(testObjs, runtimeInput.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engine := &JindoFSxEngine{ + Client: client, + Log: fake.NullLogger(), + name: "hbase", + namespace: "fluid", + runtime: testRuntimeInputs[0], + } + + err := engine.UpdateCacheOfDataset() + if err != nil { + t.Errorf("fail to exec UpdateCacheOfDataset with error %v", err) + return + } + + expectedDataset := datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Status: datav1alpha1.DatasetStatus{ + CacheStates: map[common.CacheStateName]string{ + common.Cached: "true", + }, + Runtimes: []datav1alpha1.Runtime{ + { + Name: "hbase", + Namespace: "fluid", + Category: common.AccelerateCategory, + Type: common.JindoRuntime, + MasterReplicas: 1, + }, + }, + }, + } + + var datasets datav1alpha1.DatasetList + err = client.List(context.TODO(), &datasets) + if err != nil { + t.Errorf("fail to list the datasets with error %v", err) + return + } + if !reflect.DeepEqual(datasets.Items[0].Status, expectedDataset.Status) { + t.Errorf("fail to exec the function with error %v", err) + return + } +} + +func TestUpdateDatasetStatus(t *testing.T) { + testDatasetInputs := []*datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.DatasetSpec{}, + Status: datav1alpha1.DatasetStatus{ + HCFSStatus: &datav1alpha1.HCFSStatus{ + Endpoint: "test Endpoint", + UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version", + }, + }, + }, + } + testObjs := []runtime.Object{} + for _, datasetInput := range testDatasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + + testRuntimeInputs := []*datav1alpha1.JindoRuntime{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Master: datav1alpha1.JindoCompTemplateSpec{ + Replicas: 1, + }, + }, + Status: datav1alpha1.RuntimeStatus{ + CacheStates: map[common.CacheStateName]string{ + common.Cached: "true", + }, + }, + }, + } + for _, runtimeInput := range testRuntimeInputs { + testObjs = append(testObjs, runtimeInput.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engine := &JindoFSxEngine{ + Client: client, + Log: fake.NullLogger(), + name: "hbase", + namespace: "fluid", + runtime: testRuntimeInputs[0], + } + + var testCase = []struct { + phase datav1alpha1.DatasetPhase + expectedResult datav1alpha1.Dataset + }{ + { + phase: datav1alpha1.BoundDatasetPhase, + expectedResult: datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Status: datav1alpha1.DatasetStatus{ + Phase: datav1alpha1.BoundDatasetPhase, + CacheStates: map[common.CacheStateName]string{ + common.Cached: "true", + }, + HCFSStatus: &datav1alpha1.HCFSStatus{ + Endpoint: "test Endpoint", + UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version", + }, + }, + }, + }, + { + phase: datav1alpha1.FailedDatasetPhase, + expectedResult: datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Status: datav1alpha1.DatasetStatus{ + Phase: datav1alpha1.FailedDatasetPhase, + CacheStates: map[common.CacheStateName]string{ + common.Cached: "true", + }, + HCFSStatus: &datav1alpha1.HCFSStatus{ + Endpoint: "test Endpoint", + UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version", + }, + }, + }, + }, + { + phase: datav1alpha1.NoneDatasetPhase, + expectedResult: datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Status: datav1alpha1.DatasetStatus{ + Phase: datav1alpha1.NoneDatasetPhase, + CacheStates: map[common.CacheStateName]string{ + common.Cached: "true", + }, + HCFSStatus: &datav1alpha1.HCFSStatus{ + Endpoint: "test Endpoint", + UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version", + }, + }, + }, + }, + } + + for _, test := range testCase { + err := engine.UpdateDatasetStatus(test.phase) + if err != nil { + t.Errorf("fail to exec UpdateCacheOfDataset with error %v", err) + return + } + + var datasets datav1alpha1.DatasetList + err = client.List(context.TODO(), &datasets) + if err != nil { + t.Errorf("fail to list the datasets with error %v", err) + return + } + if !reflect.DeepEqual(datasets.Items[0].Status.Phase, test.expectedResult.Status.Phase) || + !reflect.DeepEqual(datasets.Items[0].Status.CacheStates, test.expectedResult.Status.CacheStates) || + !reflect.DeepEqual(datasets.Items[0].Status.HCFSStatus, test.expectedResult.Status.HCFSStatus) { + t.Errorf("fail to exec the function with error %v", err) + return + } + } +} + +func TestBindToDataset(t *testing.T) { + testDatasetInputs := []*datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.DatasetSpec{}, + Status: datav1alpha1.DatasetStatus{ + HCFSStatus: &datav1alpha1.HCFSStatus{ + Endpoint: "test Endpoint", + UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version", + }, + }, + }, + } + testObjs := []runtime.Object{} + for _, datasetInput := range testDatasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + + testRuntimeInputs := []*datav1alpha1.JindoRuntime{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{}, + Status: datav1alpha1.RuntimeStatus{ + CacheStates: map[common.CacheStateName]string{ + common.Cached: "true", + }, + }, + }, + } + for _, runtimeInput := range testRuntimeInputs { + testObjs = append(testObjs, runtimeInput.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engine := &JindoFSxEngine{ + Client: client, + Log: fake.NullLogger(), + name: "hbase", + namespace: "fluid", + runtime: testRuntimeInputs[0], + } + + var expectedResult = datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Status: datav1alpha1.DatasetStatus{ + Phase: datav1alpha1.BoundDatasetPhase, + CacheStates: map[common.CacheStateName]string{ + common.Cached: "true", + }, + HCFSStatus: &datav1alpha1.HCFSStatus{ + Endpoint: "test Endpoint", + UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version", + }, + }, + } + err := engine.BindToDataset() + if err != nil { + t.Errorf("fail to exec UpdateCacheOfDataset with error %v", err) + return + } + + var datasets datav1alpha1.DatasetList + err = client.List(context.TODO(), &datasets) + if err != nil { + t.Errorf("fail to list the datasets with error %v", err) + return + } + if !reflect.DeepEqual(datasets.Items[0].Status.Phase, expectedResult.Status.Phase) || + !reflect.DeepEqual(datasets.Items[0].Status.CacheStates, expectedResult.Status.CacheStates) || + !reflect.DeepEqual(datasets.Items[0].Status.HCFSStatus, expectedResult.Status.HCFSStatus) { + t.Errorf("fail to exec the function with error %v", err) + return + } +} diff --git a/pkg/ddc/jindofsx/delete_volume.go b/pkg/ddc/jindofsx/delete_volume.go new file mode 100644 index 00000000000..e09619d9b8b --- /dev/null +++ b/pkg/ddc/jindofsx/delete_volume.go @@ -0,0 +1,65 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + volumeHelper "github.com/fluid-cloudnative/fluid/pkg/utils/dataset/volume" +) + +// DeleteVolume +func (e *JindoFSxEngine) DeleteVolume() (err error) { + + if e.runtime == nil { + e.runtime, err = e.getRuntime() + if err != nil { + return + } + } + + err = e.deleteFusePersistentVolumeClaim() + if err != nil { + return + } + + err = e.deleteFusePersistentVolume() + if err != nil { + return + } + + return + +} + +// deleteFusePersistentVolume +func (e *JindoFSxEngine) deleteFusePersistentVolume() (err error) { + runtimeInfo, err := e.getRuntimeInfo() + if err != nil { + return err + } + + return volumeHelper.DeleteFusePersistentVolume(e.Client, runtimeInfo, e.Log) +} + +// deleteFusePersistentVolumeClaim +func (e *JindoFSxEngine) deleteFusePersistentVolumeClaim() (err error) { + runtimeInfo, err := e.getRuntimeInfo() + if err != nil { + return err + } + + return volumeHelper.DeleteFusePersistentVolumeClaim(e.Client, runtimeInfo, e.Log) +} diff --git a/pkg/ddc/jindofsx/delete_volume_test.go b/pkg/ddc/jindofsx/delete_volume_test.go new file mode 100644 index 00000000000..3b4e09210b2 --- /dev/null +++ b/pkg/ddc/jindofsx/delete_volume_test.go @@ -0,0 +1,218 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "reflect" + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type TestCase struct { + engine *JindoFSxEngine + isDeleted bool + isErr bool +} + +func newTestJindoFSxEngine(client client.Client, name string, namespace string, withRunTime bool) *JindoFSxEngine { + runTime := &datav1alpha1.JindoRuntime{} + runTimeInfo, _ := base.BuildRuntimeInfo(name, namespace, "jindo", datav1alpha1.TieredStore{}) + if !withRunTime { + runTimeInfo = nil + runTime = nil + } + engine := &JindoFSxEngine{ + runtime: runTime, + name: name, + namespace: namespace, + Client: client, + runtimeInfo: runTimeInfo, + Log: fake.NullLogger(), + } + return engine +} + +func doTestCases(testCases []TestCase, t *testing.T) { + for _, test := range testCases { + err := test.engine.DeleteVolume() + pv := &v1.PersistentVolume{} + nullPV := v1.PersistentVolume{} + key := types.NamespacedName{ + Namespace: test.engine.namespace, + Name: test.engine.name, + } + _ = test.engine.Client.Get(context.TODO(), key, pv) + if test.isDeleted != reflect.DeepEqual(nullPV, *pv) { + t.Errorf("PV/PVC still exist after delete.") + } + isErr := err != nil + if isErr != test.isErr { + t.Errorf("expected %t, got %t.", test.isErr, isErr) + } + } +} + +func TestJindoFSxEngine_DeleteVolume(t *testing.T) { + testPVInputs := []*v1.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fluid-hbase", + Annotations: map[string]string{ + "CreatedBy": "fluid", + }, + }, + Spec: v1.PersistentVolumeSpec{}, + }, + } + + tests := []runtime.Object{} + + for _, pvInput := range testPVInputs { + tests = append(tests, pvInput.DeepCopy()) + } + + testPVCInputs := []*v1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + Finalizers: []string{"kubernetes.io/pvc-protection"}, // no err + }, + Spec: v1.PersistentVolumeClaimSpec{}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "error", + Namespace: "fluid", + Finalizers: []string{"kubernetes.io/pvc-protection"}, + Annotations: map[string]string{ + "CreatedBy": "fluid", // have err + }, + }, + Spec: v1.PersistentVolumeClaimSpec{}, + }, + } + + for _, pvcInput := range testPVCInputs { + tests = append(tests, pvcInput.DeepCopy()) + } + + fakeClient := fake.NewFakeClientWithScheme(testScheme, tests...) + JindoFSxEngineCommon := newTestJindoFSxEngine(fakeClient, "hbase", "fluid", true) + JindoFSxEngineErr := newTestJindoFSxEngine(fakeClient, "error", "fluid", true) + JindoFSxEngineNoRunTime := newTestJindoFSxEngine(fakeClient, "hbase", "fluid", false) + var testCases = []TestCase{ + { + engine: JindoFSxEngineCommon, + isDeleted: true, + isErr: false, + }, + { + engine: JindoFSxEngineErr, + isDeleted: true, + isErr: true, + }, + { + engine: JindoFSxEngineNoRunTime, + isDeleted: true, + isErr: true, + }, + } + doTestCases(testCases, t) +} + +func TestJindoFSxEngine_DeleteFusePersistentVolume(t *testing.T) { + testPVInputs := []*v1.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fluid-hbase", + Annotations: map[string]string{ + "CreatedBy": "fluid", + }, + }, + Spec: v1.PersistentVolumeSpec{}, + }, + } + + tests := []runtime.Object{} + + for _, pvInput := range testPVInputs { + tests = append(tests, pvInput.DeepCopy()) + } + + fakeClient := fake.NewFakeClientWithScheme(testScheme, tests...) + JindoFSxEngine := newTestJindoFSxEngine(fakeClient, "hbase", "fluid", true) + JindoFSxEngineNoRuntime := newTestJindoFSxEngine(fakeClient, "hbase", "fluid", false) + testCases := []TestCase{ + { + engine: JindoFSxEngine, + isDeleted: true, + isErr: false, + }, + { + engine: JindoFSxEngineNoRuntime, + isDeleted: true, + isErr: true, + }, + } + doTestCases(testCases, t) +} + +func TestJindoFSxEngine_DeleteFusePersistentVolumeClaim(t *testing.T) { + testPVCInputs := []*v1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + Finalizers: []string{"kubernetes.io/pvc-protection"}, // no err + }, + Spec: v1.PersistentVolumeClaimSpec{}, + }, + } + + tests := []runtime.Object{} + + for _, pvcInput := range testPVCInputs { + tests = append(tests, pvcInput.DeepCopy()) + } + + fakeClient := fake.NewFakeClientWithScheme(testScheme, tests...) + JindoFSxEngine := newTestJindoFSxEngine(fakeClient, "hbase", "fluid", true) + JindoFSxEngineNoRuntime := newTestJindoFSxEngine(fakeClient, "hbase", "fluid", false) + testCases := []TestCase{ + { + engine: JindoFSxEngine, + isDeleted: true, + isErr: false, + }, + { + engine: JindoFSxEngineNoRuntime, + isDeleted: true, + isErr: true, + }, + } + doTestCases(testCases, t) +} diff --git a/pkg/ddc/jindofsx/deprecated_label.go b/pkg/ddc/jindofsx/deprecated_label.go new file mode 100644 index 00000000000..f7637057a0c --- /dev/null +++ b/pkg/ddc/jindofsx/deprecated_label.go @@ -0,0 +1,64 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "github.com/fluid-cloudnative/fluid/pkg/common/deprecated" + apierrs "k8s.io/apimachinery/pkg/api/errors" +) + +func (e *JindoFSxEngine) getDeprecatedCommonLabelname() string { + return deprecated.LabelAnnotationStorageCapacityPrefix + e.namespace + "-" + e.name +} + +func (e *JindoFSxEngine) HasDeprecatedCommonLabelname() (deprecated bool, err error) { + + // return deprecated.LabelAnnotationStorageCapacityPrefix + e.namespace + "-" + e.name + + var ( + workerName string = e.getWorkerName() + namespace string = e.namespace + ) + + // runtime, err := e.getRuntime() + // if err != nil { + // return + // } + + workers, err := e.getDaemonset(workerName, namespace) + if err != nil { + if apierrs.IsNotFound(err) { + e.Log.Info("Workers with deprecated label not found") + deprecated = false + err = nil + return + } + e.Log.Error(err, "Failed to get worker", "workerName", workerName) + return deprecated, err + } + + nodeSelectors := workers.Spec.Template.Spec.NodeSelector + e.Log.Info("The current node selectors for worker", "workerName", workerName, "nodeSelector", nodeSelectors) + + if _, deprecated = nodeSelectors[e.getDeprecatedCommonLabelname()]; deprecated { + // + e.Log.Info("the deprecated node selector exists", "nodeselector", e.getDeprecatedCommonLabelname()) + } else { + e.Log.Info("The deprecated node selector doesn't exist", "nodeselector", e.getDeprecatedCommonLabelname()) + } + + return +} diff --git a/pkg/ddc/jindofsx/deprecated_label_test.go b/pkg/ddc/jindofsx/deprecated_label_test.go new file mode 100644 index 00000000000..c5014f53830 --- /dev/null +++ b/pkg/ddc/jindofsx/deprecated_label_test.go @@ -0,0 +1,148 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func getTestJindoFSxEngine(client client.Client, name string, namespace string) *JindoFSxEngine { + runTime := &datav1alpha1.JindoRuntime{} + runTimeInfo, _ := base.BuildRuntimeInfo(name, namespace, "jindo", datav1alpha1.TieredStore{}) + engine := &JindoFSxEngine{ + runtime: runTime, + name: name, + namespace: namespace, + Client: client, + runtimeInfo: runTimeInfo, + Log: fake.NullLogger(), + } + return engine +} + +func TestJindoFSxEngine_GetDeprecatedCommonLabelname(t *testing.T) { + testCases := []struct { + name string + namespace string + out string + }{ + { + name: "hbase", + namespace: "fluid", + out: "data.fluid.io/storage-fluid-hbase", + }, + { + name: "hadoop", + namespace: "fluid", + out: "data.fluid.io/storage-fluid-hadoop", + }, + { + name: "fluid", + namespace: "test", + out: "data.fluid.io/storage-test-fluid", + }, + } + fakeClient := fake.NewFakeClientWithScheme(testScheme) + for _, test := range testCases { + engine := getTestJindoFSxEngine(fakeClient, test.name, test.namespace) + out := engine.getDeprecatedCommonLabelname() + if out != test.out { + t.Errorf("input parameter is %s-%s,expected %s, got %s", test.namespace, test.name, test.out, out) + } + } + +} + +func TestJindoFSxEngine_HasDeprecatedCommonLabelname(t *testing.T) { + + // worker-name = e.name+"-worker" + daemonSetWithSelector := &v1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-worker", + Namespace: "fluid", + }, + Spec: v1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{NodeSelector: map[string]string{"data.fluid.io/storage-fluid-hbase": "selector"}}, + }, + }, + } + daemonSetWithoutSelector := &v1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop-worker", + Namespace: "fluid", + }, + Spec: v1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{NodeSelector: map[string]string{"data.fluid.io/storage-fluid-hbase": "selector"}}, + }, + }, + } + runtimeObjs := []runtime.Object{} + runtimeObjs = append(runtimeObjs, daemonSetWithSelector) + runtimeObjs = append(runtimeObjs, daemonSetWithoutSelector) + scheme := runtime.NewScheme() + scheme.AddKnownTypes(v1.SchemeGroupVersion, daemonSetWithSelector) + fakeClient := fake.NewFakeClientWithScheme(scheme, runtimeObjs...) + + testCases := []struct { + name string + namespace string + out bool + isErr bool + }{ + { + name: "hbase", + namespace: "fluid", + out: false, + isErr: false, + }, + { + name: "none", + namespace: "fluid", + out: false, + isErr: false, + }, + { + name: "hadoop", + namespace: "fluid", + out: false, + isErr: false, + }, + } + + for _, test := range testCases { + engine := getTestJindoFSxEngine(fakeClient, test.name, test.namespace) + out, err := engine.HasDeprecatedCommonLabelname() + if out != test.out { + t.Errorf("input parameter is %s-%s,expected %t, got %t", test.namespace, test.name, test.out, out) + } + isErr := err != nil + if isErr != test.isErr { + t.Errorf("input parameter is %s-%s,expected %t, got %t", test.namespace, test.name, test.isErr, isErr) + } + } +} diff --git a/pkg/ddc/jindofsx/engine.go b/pkg/ddc/jindofsx/engine.go new file mode 100644 index 00000000000..0152ff2cf44 --- /dev/null +++ b/pkg/ddc/jindofsx/engine.go @@ -0,0 +1,87 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "fmt" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/ctrl" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base" + cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" + "github.com/go-logr/logr" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type JindoFSxEngine struct { + runtime *datav1alpha1.JindoRuntime + name string + namespace string + runtimeType string + Log logr.Logger + client.Client + //When reaching this gracefulShutdownLimits, the system is forced to clean up. + gracefulShutdownLimits int32 + retryShutdown int32 + //initImage string + runtimeInfo base.RuntimeInfoInterface + MetadataSyncDoneCh chan MetadataSyncResult + cacheNodeNames []string + Recorder record.EventRecorder + *ctrl.Helper +} + +func Build(id string, ctx cruntime.ReconcileRequestContext) (base.Engine, error) { + engine := &JindoFSxEngine{ + name: ctx.Name, + namespace: ctx.Namespace, + Client: ctx.Client, + Log: ctx.Log, + runtimeType: ctx.RuntimeType, + gracefulShutdownLimits: 5, + retryShutdown: 0, + cacheNodeNames: []string{}, + Recorder: ctx.Recorder, + } + // var implement base.Implement = engine + // engine.TemplateEngine = template + if ctx.Runtime != nil { + runtime, ok := ctx.Runtime.(*datav1alpha1.JindoRuntime) + if !ok { + return nil, fmt.Errorf("engine %s is failed to parse", ctx.Name) + } + engine.runtime = runtime + } else { + return nil, fmt.Errorf("engine %s is failed to parse", ctx.Name) + } + + // Build and setup runtime info + runtimeInfo, err := engine.getRuntimeInfo() + if err != nil { + return nil, fmt.Errorf("engine %s failed to get runtime info", ctx.Name) + } + + // Build the helper + engine.Helper = ctrl.BuildHelper(runtimeInfo, ctx.Client, engine.Log) + + template := base.NewTemplateEngine(engine, id, ctx) + + err = kubeclient.EnsureNamespace(ctx.Client, ctx.Namespace) + return template, err +} diff --git a/pkg/ddc/jindofsx/engine_test.go b/pkg/ddc/jindofsx/engine_test.go new file mode 100644 index 00000000000..7db03e050ff --- /dev/null +++ b/pkg/ddc/jindofsx/engine_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2021 The Fluid Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package jindofsx + +import ( + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/common" + cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +func TestBuild(t *testing.T) { + var namespace = v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fluid", + }, + } + testObjs := []runtime.Object{} + testObjs = append(testObjs, namespace.DeepCopy()) + + var dataset = datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + } + testObjs = append(testObjs, dataset.DeepCopy()) + + var runtime = datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Master: datav1alpha1.JindoCompTemplateSpec{ + Replicas: 1, + }, + Fuse: datav1alpha1.JindoFuseSpec{ + Global: false, + }, + }, + Status: datav1alpha1.RuntimeStatus{ + CacheStates: map[common.CacheStateName]string{ + common.Cached: "true", + }, + }, + } + testObjs = append(testObjs, runtime.DeepCopy()) + + var daemonset = appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-worker", + Namespace: "fluid", + }, + } + testObjs = append(testObjs, daemonset.DeepCopy()) + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + var ctx = cruntime.ReconcileRequestContext{ + NamespacedName: types.NamespacedName{ + Name: "hbase", + Namespace: "fluid", + }, + Client: client, + Log: fake.NullLogger(), + RuntimeType: common.JindoRuntime, + Runtime: &runtime, + } + + engine, err := Build("testId", ctx) + if err != nil || engine == nil { + t.Errorf("fail to exec the build function with the eror %v", err) + } + +} diff --git a/pkg/ddc/jindofsx/health_check.go b/pkg/ddc/jindofsx/health_check.go new file mode 100644 index 00000000000..5f5715681d1 --- /dev/null +++ b/pkg/ddc/jindofsx/health_check.go @@ -0,0 +1,153 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + data "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/common" + "github.com/fluid-cloudnative/fluid/pkg/ctrl" + fluiderrs "github.com/fluid-cloudnative/fluid/pkg/errors" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" +) + +func (e *JindoFSxEngine) CheckRuntimeHealthy() (err error) { + // 1. Check the healthy of the master + err = e.checkMasterHealthy() + if err != nil { + e.Log.Error(err, "The master is not healthy") + updateErr := e.UpdateDatasetStatus(data.FailedDatasetPhase) + if updateErr != nil { + e.Log.Error(updateErr, "Failed to update dataset") + } + return + } + + // 2. Check the healthy of the workers + err = e.checkWorkersHealthy() + if err != nil { + e.Log.Error(err, "The worker is not healthy") + updateErr := e.UpdateDatasetStatus(data.FailedDatasetPhase) + if updateErr != nil { + e.Log.Error(updateErr, "Failed to update dataset") + } + return + } + + // 3. Check the healthy of the fuse + err = e.checkFuseHealthy() + if err != nil { + e.Log.Error(err, "The fuse is not healthy") + updateErr := e.UpdateDatasetStatus(data.FailedDatasetPhase) + if updateErr != nil { + e.Log.Error(updateErr, "Failed to update dataset") + } + return + } + + // 4. Update the dataset as Bounded + return e.UpdateDatasetStatus(data.BoundDatasetPhase) +} + +// checkMasterHealthy checks the master healthy +func (e *JindoFSxEngine) checkMasterHealthy() (err error) { + master, err := kubeclient.GetStatefulSet(e.Client, e.getMasterName(), e.namespace) + if err != nil { + return err + } + + err = retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { + runtime, err := e.getRuntime() + if err != nil { + return + } + runtimeToUpdate := runtime.DeepCopy() + err = e.Helper.CheckMasterHealthy(e.Recorder, runtimeToUpdate, runtimeToUpdate.Status, master) + if err != nil { + e.Log.Error(err, "Failed to check master healthy") + } + return + }) + + if err != nil { + e.Log.Error(err, "Failed to check master healthy") + } + + return +} + +// checkWorkerHealthy checks the Worker healthy +func (e *JindoFSxEngine) checkWorkersHealthy() (err error) { + workers, err := ctrl.GetWorkersAsStatefulset(e.Client, + types.NamespacedName{Namespace: e.namespace, Name: e.getWorkerName()}) + if err != nil { + if fluiderrs.IsDeprecated(err) { + e.Log.Info("Warning: the current runtime is created by runtime controller before v0.7.0, checking worker health state is not supported. To support these features, please create a new dataset", "details", err) + e.Recorder.Event(e.runtime, corev1.EventTypeWarning, common.RuntimeDeprecated, "The runtime is created by controllers before v0.7.0, to fully enable latest capabilities, please delete the runtime and create a new one") + return nil + } + return + } + + err = retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { + runtime, err := e.getRuntime() + if err != nil { + return + } + runtimeToUpdate := runtime.DeepCopy() + err = e.Helper.CheckWorkersHealthy(e.Recorder, runtimeToUpdate, runtimeToUpdate.Status, workers) + if err != nil { + e.Log.Error(err, "Failed to check Worker healthy") + } + return + }) + + if err != nil { + e.Log.Error(err, "Failed to check Worker healthy") + } + + return +} + +// checkFuseHealthy checks the Fuse healthy +func (e *JindoFSxEngine) checkFuseHealthy() (err error) { + Fuse, err := kubeclient.GetDaemonset(e.Client, e.getFuseName(), e.namespace) + if err != nil { + return err + } + + err = retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { + runtime, err := e.getRuntime() + if err != nil { + return + } + runtimeToUpdate := runtime.DeepCopy() + err = e.Helper.CheckFuseHealthy(e.Recorder, runtimeToUpdate, runtimeToUpdate.Status, Fuse) + if err != nil { + e.Log.Error(err, "Failed to check Fuse healthy") + } + return + }) + + if err != nil { + e.Log.Error(err, "Failed to check Fuse healthy") + } + + return +} diff --git a/pkg/ddc/jindofsx/health_check_test.go b/pkg/ddc/jindofsx/health_check_test.go new file mode 100644 index 00000000000..54c41d1de61 --- /dev/null +++ b/pkg/ddc/jindofsx/health_check_test.go @@ -0,0 +1,369 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/client-go/tools/record" + + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + + ctrlhelper "github.com/fluid-cloudnative/fluid/pkg/ctrl" +) + +func TestCheckRuntimeHealthy(t *testing.T) { + type fields struct { + runtime *datav1alpha1.JindoRuntime + worker *appsv1.StatefulSet + master *appsv1.StatefulSet + fuse *appsv1.DaemonSet + name string + namespace string + } + tests := []struct { + name string + fields fields + wantErr bool + }{ + { + name: "healthy", + fields: fields{ + name: "health-data", + namespace: "big-data", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "health-data", + Namespace: "big-data", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 1, + Fuse: datav1alpha1.JindoFuseSpec{ + Global: true, + }, + }, + }, + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "health-data-jindofs-worker", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 1, + ReadyReplicas: 1, + }, + }, + master: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "health-data-jindofs-master", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 1, + ReadyReplicas: 1, + }, + }, + fuse: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "health-data-jindofs-fuse", + Namespace: "big-data", + }, + Status: appsv1.DaemonSetStatus{ + NumberUnavailable: 0, + }, + }, + }, + wantErr: false, + }, + { + name: "master-nohealthy", + fields: fields{ + name: "unhealthy-master", + namespace: "big-data", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-master", + Namespace: "big-data", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 1, + Fuse: datav1alpha1.JindoFuseSpec{ + Global: true, + }, + }, + }, + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-master-jindofs-worker", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 0, + Replicas: 1, + }, + }, master: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-master-jindofs-master", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 0, + }, + }, fuse: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-master-jindofs-fuse", + Namespace: "big-data", + }, + Status: appsv1.DaemonSetStatus{ + NumberUnavailable: 1, + }, + }, + }, + wantErr: true, + }, { + name: "worker-nohealthy", + fields: fields{ + name: "unhealthy-worker", + namespace: "big-data", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-worker", + Namespace: "big-data", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 1, + Fuse: datav1alpha1.JindoFuseSpec{ + Global: true, + }, + }, + }, + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-worker-jindofs-worker", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 0, + Replicas: 1, + }, + }, master: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-worker-jindofs-master", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + Replicas: 1, + }, + }, fuse: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-worker-jindofs-fuse", + Namespace: "big-data", + }, + Status: appsv1.DaemonSetStatus{ + NumberUnavailable: 1, + }, + }, + }, + wantErr: true, + }, { + name: "fuse-nohealthy", + fields: fields{ + name: "unhealthy-fuse", + namespace: "big-data", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-fuse", + Namespace: "big-data", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 1, + Fuse: datav1alpha1.JindoFuseSpec{ + Global: true, + }, + }, + }, + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-fuse-jindofs-worker", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + Replicas: 1, + }, + }, master: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-fuse-jindofs-master", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + Replicas: 1, + }, + }, fuse: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-fuse-jindofs-fuse", + Namespace: "big-data", + }, + Status: appsv1.DaemonSetStatus{ + NumberUnavailable: 1, + }, + }, + }, + wantErr: true, + }, { + name: "no-master-nohealthy", + fields: fields{ + name: "unhealthy-no-master", + namespace: "big-data", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-no-master", + Namespace: "big-data", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 1, + }, + }, + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-no-master-jindofs-worker", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + Replicas: 1, + }, + }, master: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-no-master-jindofs", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + Replicas: 1, + }, + }, fuse: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-no-master-jindofs-no-master", + Namespace: "big-data", + }, + Status: appsv1.DaemonSetStatus{ + NumberUnavailable: 1, + }, + }, + }, + wantErr: true, + }, { + name: "no-worker-nohealthy", + fields: fields{ + name: "unhealthy-no-worker", + namespace: "big-data", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-no-worker", + Namespace: "big-data", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 1, + }, + }, + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-no-worker-jindofs-master", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + Replicas: 1, + }, + }, master: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-no-worker-jindofs", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + Replicas: 1, + }, + }, fuse: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unhealthy-no-worker-jindofs-no-worker", + Namespace: "big-data", + }, + Status: appsv1.DaemonSetStatus{ + NumberUnavailable: 1, + }, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + runtimeObjs := []runtime.Object{} + data := &datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.fields.name, + Namespace: tt.fields.namespace, + }, + } + + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime) + s.AddKnownTypes(datav1alpha1.GroupVersion, data) + s.AddKnownTypes(appsv1.SchemeGroupVersion, tt.fields.worker) + s.AddKnownTypes(appsv1.SchemeGroupVersion, tt.fields.fuse) + + _ = v1.AddToScheme(s) + + runtimeObjs = append(runtimeObjs, tt.fields.runtime, data, tt.fields.worker, tt.fields.master, tt.fields.fuse) + mockClient := fake.NewFakeClientWithScheme(s, runtimeObjs...) + e := &JindoFSxEngine{ + runtime: tt.fields.runtime, + name: tt.fields.name, + namespace: tt.fields.namespace, + Client: mockClient, + Log: ctrl.Log.WithName(tt.fields.name), + Recorder: record.NewFakeRecorder(300), + } + + runtimeInfo, err := base.BuildRuntimeInfo(tt.fields.name, tt.fields.namespace, "jindo", datav1alpha1.TieredStore{}) + if err != nil { + t.Errorf("JindoFSxEngine.CheckWorkersReady() error = %v", err) + } + + e.Helper = ctrlhelper.BuildHelper(runtimeInfo, mockClient, e.Log) + + healthError := e.CheckRuntimeHealthy() + hasErr := (healthError != nil) + if tt.wantErr != hasErr { + t.Errorf("testcase %s check runtime healthy ,hasErr %v, wantErr %v, err:%s", tt.name, hasErr, tt.wantErr, healthError) + } + + }) + } + +} diff --git a/pkg/ddc/jindofsx/label.go b/pkg/ddc/jindofsx/label.go new file mode 100644 index 00000000000..bbd204a24ed --- /dev/null +++ b/pkg/ddc/jindofsx/label.go @@ -0,0 +1,27 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import "github.com/fluid-cloudnative/fluid/pkg/common" + +func (e *JindoFSxEngine) getCommonLabelname() string { + return common.LabelAnnotationStorageCapacityPrefix + e.namespace + "-" + e.name +} + +func (e *JindoFSxEngine) getFuseLabelname() string { + return common.LabelAnnotationFusePrefix + e.namespace + "-" + e.name +} diff --git a/pkg/ddc/jindofsx/label_test.go b/pkg/ddc/jindofsx/label_test.go new file mode 100644 index 00000000000..b69b2f640cb --- /dev/null +++ b/pkg/ddc/jindofsx/label_test.go @@ -0,0 +1,53 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import "testing" + +func TestGetCommonLabelname(t *testing.T) { + testCases := []struct { + name string + namespace string + out string + }{ + { + name: "hbase", + namespace: "fluid", + out: "fluid.io/s-fluid-hbase", + }, + { + name: "hadoop", + namespace: "fluid", + out: "fluid.io/s-fluid-hadoop", + }, + { + name: "common", + namespace: "default", + out: "fluid.io/s-default-common", + }, + } + for _, testCase := range testCases { + engine := &JindoFSxEngine{ + name: testCase.name, + namespace: testCase.namespace, + } + out := engine.getCommonLabelname() + if out != testCase.out { + t.Errorf("in: %s-%s, expect: %s, got: %s", testCase.namespace, testCase.name, testCase.out, out) + } + } +} diff --git a/pkg/ddc/jindofsx/load_data.go b/pkg/ddc/jindofsx/load_data.go new file mode 100644 index 00000000000..fbfb6d44f99 --- /dev/null +++ b/pkg/ddc/jindofsx/load_data.go @@ -0,0 +1,190 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/common" + cdataload "github.com/fluid-cloudnative/fluid/pkg/dataload" + "github.com/fluid-cloudnative/fluid/pkg/ddc/jindo/operations" + cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime" + "github.com/fluid-cloudnative/fluid/pkg/utils" + "github.com/fluid-cloudnative/fluid/pkg/utils/docker" + "github.com/fluid-cloudnative/fluid/pkg/utils/helm" + "gopkg.in/yaml.v2" + v1 "k8s.io/api/core/v1" +) + +// CreateDataLoadJob creates the job to load data +func (e *JindoFSxEngine) CreateDataLoadJob(ctx cruntime.ReconcileRequestContext, targetDataload datav1alpha1.DataLoad) (err error) { + log := ctx.Log.WithName("createDataLoadJob") + + // 1. Check if the helm release already exists + releaseName := utils.GetDataLoadReleaseName(targetDataload.Name) + jobName := utils.GetDataLoadJobName(releaseName) + var existed bool + existed, err = helm.CheckRelease(releaseName, targetDataload.Namespace) + if err != nil { + log.Error(err, "failed to check if release exists", "releaseName", releaseName, "namespace", targetDataload.Namespace) + return err + } + + // 2. install the helm chart if not exists + if !existed { + log.Info("DataLoad job helm chart not installed yet, will install") + valueFileName, err := e.generateDataLoadValueFile(ctx, targetDataload) + if err != nil { + log.Error(err, "failed to generate dataload chart's value file") + return err + } + chartName := utils.GetChartsDirectory() + "/" + cdataload.DATALOAD_CHART + "/" + common.JindoRuntime + err = helm.InstallRelease(releaseName, targetDataload.Namespace, valueFileName, chartName) + if err != nil { + log.Error(err, "failed to install dataload chart") + return err + } + log.Info("DataLoad job helm chart successfully installed", "namespace", targetDataload.Namespace, "releaseName", releaseName) + ctx.Recorder.Eventf(&targetDataload, v1.EventTypeNormal, common.DataLoadJobStarted, "The DataLoad job %s started", jobName) + } + return err +} + +// generateDataLoadValueFile builds a DataLoadValue by extracted specifications from the given DataLoad, and +// marshals the DataLoadValue to a temporary yaml file where stores values that'll be used by fluid dataloader helm chart +func (e *JindoFSxEngine) generateDataLoadValueFile(r cruntime.ReconcileRequestContext, dataload datav1alpha1.DataLoad) (valueFileName string, err error) { + targetDataset, err := utils.GetDataset(r.Client, dataload.Spec.Dataset.Name, dataload.Spec.Dataset.Namespace) + if err != nil { + return "", err + } + + imageName, imageTag := docker.GetWorkerImage(r.Client, dataload.Spec.Dataset.Name, "jindo", dataload.Spec.Dataset.Namespace) + + if len(imageName) == 0 { + defaultImageInfo := strings.Split(DEFAULT_JINDOFSX_RUNTIME_IMAGE, ":") + if len(defaultImageInfo) < 1 { + panic("invalid default dataload image!") + } else { + imageName = defaultImageInfo[0] + } + } + + if len(imageTag) == 0 { + defaultImageInfo := strings.Split(DEFAULT_JINDOFSX_RUNTIME_IMAGE, ":") + if len(defaultImageInfo) < 2 { + panic("invalid default dataload image!") + } else { + imageTag = defaultImageInfo[1] + } + } + + image := fmt.Sprintf("%s:%s", imageName, imageTag) + + runtime, err := utils.GetJindoRuntime(r.Client, dataload.Spec.Dataset.Name, dataload.Spec.Dataset.Namespace) + if err != nil { + return + } + hadoopConfig := runtime.Spec.HadoopConfig + loadMemorydata := false + if len(runtime.Spec.TieredStore.Levels) == 0 { + err = fmt.Errorf("the TieredStore is null") + return + } + if runtime.Spec.TieredStore.Levels[0].MediumType == "MEM" { + loadMemorydata = true + } + + dataloadInfo := cdataload.DataLoadInfo{ + BackoffLimit: 3, + TargetDataset: dataload.Spec.Dataset.Name, + LoadMetadata: dataload.Spec.LoadMetadata, + Image: image, + } + + targetPaths := []cdataload.TargetPath{} + for _, target := range dataload.Spec.Target { + fluidNative := utils.IsTargetPathUnderFluidNativeMounts(target.Path, *targetDataset) + targetPaths = append(targetPaths, cdataload.TargetPath{ + Path: target.Path, + Replicas: target.Replicas, + FluidNative: fluidNative, + }) + } + dataloadInfo.TargetPaths = targetPaths + options := map[string]string{} + if loadMemorydata { + options["loadMemorydata"] = "true" + } else { + options["loadMemorydata"] = "false" + } + if hadoopConfig != "" { + options["hdfsConfig"] = hadoopConfig + } + // resolve spec options + if dataload.Spec.Options != nil { + for key, value := range dataload.Spec.Options { + options[key] = value + } + } + dataloadInfo.Options = options + + dataLoadValue := cdataload.DataLoadValue{DataLoadInfo: dataloadInfo} + data, err := yaml.Marshal(dataLoadValue) + if err != nil { + return + } + + valueFile, err := ioutil.TempFile(os.TempDir(), fmt.Sprintf("%s-%s-loader-values.yaml", dataload.Namespace, dataload.Name)) + if err != nil { + return + } + err = ioutil.WriteFile(valueFile.Name(), data, 0400) + if err != nil { + return + } + return valueFile.Name(), nil +} + +func (e *JindoFSxEngine) CheckRuntimeReady() (ready bool) { + podName, containerName := e.getMasterPodInfo() + fileUtils := operations.NewJindoFileUtils(podName, containerName, e.namespace, e.Log) + ready = fileUtils.Ready() + if !ready { + e.Log.Info("runtime not ready", "runtime", ready) + return false + } + return true +} + +func (e *JindoFSxEngine) CheckExistenceOfPath(targetDataload datav1alpha1.DataLoad) (notExist bool, err error) { + podName, containerName := e.getMasterPodInfo() + fileUtils := operations.NewJindoFileUtils(podName, containerName, e.namespace, e.Log) + for _, target := range targetDataload.Spec.Target { + isExist, err := fileUtils.IsExist(target.Path) + if err != nil { + return true, err + } + if !isExist { + return true, nil + } + } + return false, nil +} diff --git a/pkg/ddc/jindofsx/load_data_test.go b/pkg/ddc/jindofsx/load_data_test.go new file mode 100644 index 00000000000..b4376952d34 --- /dev/null +++ b/pkg/ddc/jindofsx/load_data_test.go @@ -0,0 +1,457 @@ +package jindofsx + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/fluid-cloudnative/fluid/pkg/common" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/brahma-adshonor/gohook" + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + "github.com/fluid-cloudnative/fluid/pkg/utils/helm" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" +) + +func TestCreateDataLoadJob(t *testing.T) { + mockExecCheckReleaseCommon := func(name string, namespace string) (exist bool, err error) { + return false, nil + } + mockExecCheckReleaseErr := func(name string, namespace string) (exist bool, err error) { + return false, errors.New("fail to check release") + } + mockExecInstallReleaseErr := func(name string, namespace string, valueFile string, chartName string) error { + return errors.New("fail to install dataload chart") + } + + wrappedUnhookCheckRelease := func() { + err := gohook.UnHook(helm.CheckRelease) + if err != nil { + t.Fatal(err.Error()) + } + } + + targetDataLoad := datav1alpha1.DataLoad{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.DataLoadSpec{ + Dataset: datav1alpha1.TargetDataset{ + Name: "test-dataset", + Namespace: "fluid", + }, + }, + } + datasetInputs := []datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dataset", + Namespace: "fluid", + }, + }, + } + testObjs := []runtime.Object{} + for _, datasetInput := range datasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engine := JindoFSxEngine{ + name: "hbase", + } + ctx := cruntime.ReconcileRequestContext{ + Log: fake.NullLogger(), + Client: client, + Recorder: record.NewFakeRecorder(1), + } + + err := gohook.Hook(helm.CheckRelease, mockExecCheckReleaseErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + err = engine.CreateDataLoadJob(ctx, targetDataLoad) + if err == nil { + t.Errorf("fail to catch the error") + } + wrappedUnhookCheckRelease() + + err = gohook.Hook(helm.CheckRelease, mockExecCheckReleaseCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } + err = gohook.Hook(helm.InstallRelease, mockExecInstallReleaseErr, nil) + if err != nil { + t.Fatal(err.Error()) + } +} + +func TestCreateDataLoadJobWithOption(t *testing.T) { + mockExecCheckReleaseCommon := func(name string, namespace string) (exist bool, err error) { + return false, nil + } + mockExecCheckReleaseErr := func(name string, namespace string) (exist bool, err error) { + return false, errors.New("fail to check release") + } + mockExecInstallReleaseErr := func(name string, namespace string, valueFile string, chartName string) error { + return errors.New("fail to install dataload chart") + } + + wrappedUnhookCheckRelease := func() { + err := gohook.UnHook(helm.CheckRelease) + if err != nil { + t.Fatal(err.Error()) + } + } + + targetDataLoad := datav1alpha1.DataLoad{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.DataLoadSpec{ + Dataset: datav1alpha1.TargetDataset{ + Name: "test-dataset", + Namespace: "fluid", + }, + LoadMetadata: true, + Options: map[string]string{ + "atomicCache": "true", + "loadMetadataOnly": "true", + }, + }, + } + datasetInputs := []datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dataset", + Namespace: "fluid", + }, + }, + } + testObjs := []runtime.Object{} + for _, datasetInput := range datasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engine := JindoFSxEngine{ + name: "hbase", + } + ctx := cruntime.ReconcileRequestContext{ + Log: fake.NullLogger(), + Client: client, + Recorder: record.NewFakeRecorder(1), + } + + err := gohook.Hook(helm.CheckRelease, mockExecCheckReleaseErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + err = engine.CreateDataLoadJob(ctx, targetDataLoad) + if err == nil { + t.Errorf("fail to catch the error") + } + wrappedUnhookCheckRelease() + + err = gohook.Hook(helm.CheckRelease, mockExecCheckReleaseCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } + err = gohook.Hook(helm.InstallRelease, mockExecInstallReleaseErr, nil) + if err != nil { + t.Fatal(err.Error()) + } +} + +func TestGenerateDataLoadValueFile(t *testing.T) { + datasetInputs := []datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dataset", + Namespace: "fluid", + }, + Spec: datav1alpha1.DatasetSpec{}, + }, + } + jindo := &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dataset", + Namespace: "fluid", + }, + } + + jindo.Spec = datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{{ + MediumType: common.Memory, + Quota: resource.NewQuantity(1, resource.BinarySI), + High: "0.8", + Low: "0.1", + }}, + }, + } + + testScheme.AddKnownTypes(datav1alpha1.GroupVersion, jindo) + testObjs := []runtime.Object{} + for _, datasetInput := range datasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + testObjs = append(testObjs, jindo.DeepCopy()) + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + context := cruntime.ReconcileRequestContext{ + Client: client, + } + + dataLoadNoTarget := datav1alpha1.DataLoad{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dataload", + Namespace: "fluid", + }, + Spec: datav1alpha1.DataLoadSpec{ + Dataset: datav1alpha1.TargetDataset{ + Name: "test-dataset", + Namespace: "fluid", + }, + }, + } + dataLoadWithTarget := datav1alpha1.DataLoad{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dataload", + Namespace: "fluid", + }, + Spec: datav1alpha1.DataLoadSpec{ + Dataset: datav1alpha1.TargetDataset{ + Name: "test-dataset", + Namespace: "fluid", + }, + Target: []datav1alpha1.TargetPath{ + { + Path: "/test", + Replicas: 1, + }, + }, + }, + } + + var testCases = []struct { + dataLoad datav1alpha1.DataLoad + expectFileName string + }{ + { + dataLoad: dataLoadNoTarget, + expectFileName: filepath.Join(os.TempDir(), "fluid-test-dataload-loader-values.yaml"), + }, + { + dataLoad: dataLoadWithTarget, + expectFileName: filepath.Join(os.TempDir(), "fluid-test-dataload-loader-values.yaml"), + }, + } + + for _, test := range testCases { + engine := JindoFSxEngine{} + if fileName, _ := engine.generateDataLoadValueFile(context, test.dataLoad); !strings.Contains(fileName, test.expectFileName) { + t.Errorf("fail to generate the dataload value file") + } + } +} + +func TestGenerateDataLoadValueFileWithRuntimeHDD(t *testing.T) { + datasetInputs := []datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dataset", + Namespace: "fluid", + }, + Spec: datav1alpha1.DatasetSpec{}, + }, + } + jindo := &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dataset", + Namespace: "fluid", + }, + } + + jindo.Spec = datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{{ + MediumType: common.HDD, + Quota: resource.NewQuantity(1, resource.BinarySI), + High: "0.8", + Low: "0.1", + }}, + }, + } + + testScheme.AddKnownTypes(datav1alpha1.GroupVersion, jindo) + testObjs := []runtime.Object{} + for _, datasetInput := range datasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + testObjs = append(testObjs, jindo.DeepCopy()) + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + context := cruntime.ReconcileRequestContext{ + Client: client, + } + + dataLoadNoTarget := datav1alpha1.DataLoad{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dataload", + Namespace: "fluid", + }, + Spec: datav1alpha1.DataLoadSpec{ + Dataset: datav1alpha1.TargetDataset{ + Name: "test-dataset", + Namespace: "fluid", + }, + LoadMetadata: true, + Options: map[string]string{ + "atomicCache": "true", + "loadMetadataOnly": "true", + }, + }, + } + dataLoadWithTarget := datav1alpha1.DataLoad{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dataload", + Namespace: "fluid", + }, + Spec: datav1alpha1.DataLoadSpec{ + Dataset: datav1alpha1.TargetDataset{ + Name: "test-dataset", + Namespace: "fluid", + }, + Target: []datav1alpha1.TargetPath{ + { + Path: "/test", + Replicas: 1, + }, + }, + }, + } + + var testCases = []struct { + dataLoad datav1alpha1.DataLoad + expectFileName string + }{ + { + dataLoad: dataLoadNoTarget, + expectFileName: filepath.Join(os.TempDir(), "fluid-test-dataload-loader-values.yaml"), + }, + { + dataLoad: dataLoadWithTarget, + expectFileName: filepath.Join(os.TempDir(), "fluid-test-dataload-loader-values.yaml"), + }, + } + + for _, test := range testCases { + engine := JindoFSxEngine{} + if fileName, _ := engine.generateDataLoadValueFile(context, test.dataLoad); !strings.Contains(fileName, test.expectFileName) { + t.Errorf("fail to generate the dataload value file") + } + } +} + +func TestCheckRuntimeReady(t *testing.T) { + mockExecCommon := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) { + return "", "", nil + } + mockExecErr := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) { + return "err", "", errors.New("error") + } + wrappedUnhook := func() { + err := gohook.UnHook(kubeclient.ExecCommandInContainer) + if err != nil { + t.Fatal(err.Error()) + } + } + + engine := JindoFSxEngine{ + namespace: "fluid", + name: "hbase", + Log: fake.NullLogger(), + } + + err := gohook.Hook(kubeclient.ExecCommandInContainer, mockExecCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } + + err = gohook.Hook(kubeclient.ExecCommandInContainer, mockExecErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + if ready := engine.CheckRuntimeReady(); ready != false { + fmt.Println(ready) + t.Errorf("fail to exec the function CheckRuntimeReady") + } + wrappedUnhook() +} + +func TestCheckExistenceOfPath(t *testing.T) { + mockExecCommon := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) { + return "conf", "", nil + } + mockExecNotExist := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) { + return "does not exist", "", errors.New("other error") + } + mockExecErr := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) { + return "err", "", errors.New("other error") + } + wrappedUnhook := func() { + err := gohook.UnHook(kubeclient.ExecCommandInContainer) + if err != nil { + t.Fatal(err.Error()) + } + } + + engine := JindoFSxEngine{ + namespace: "fluid", + name: "hbase", + Log: fake.NullLogger(), + } + + err := gohook.Hook(kubeclient.ExecCommandInContainer, mockExecErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + targetDataload := datav1alpha1.DataLoad{ + Spec: datav1alpha1.DataLoadSpec{ + Target: []datav1alpha1.TargetPath{ + { + Path: "/tmp", + Replicas: 1, + }, + }, + }, + } + notExist, err := engine.CheckExistenceOfPath(targetDataload) + if !(err != nil && notExist == true) { + t.Errorf("fail to exec the function") + } + wrappedUnhook() + + err = gohook.Hook(kubeclient.ExecCommandInContainer, mockExecNotExist, nil) + if err != nil { + t.Fatal(err.Error()) + } + + err = gohook.Hook(kubeclient.ExecCommandInContainer, mockExecCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } +} diff --git a/pkg/ddc/jindofsx/master.go b/pkg/ddc/jindofsx/master.go new file mode 100644 index 00000000000..5f7aa4dd244 --- /dev/null +++ b/pkg/ddc/jindofsx/master.go @@ -0,0 +1,161 @@ +package jindofsx + +import ( + "context" + "reflect" + + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/util/retry" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/utils" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" +) + +func (e *JindoFSxEngine) CheckMasterReady() (ready bool, err error) { + masterName := e.getMasterName() + // 1. Check the status + runtime, err := e.getRuntime() + if err != nil { + return + } + + master, err := kubeclient.GetStatefulSet(e.Client, masterName, e.namespace) + if err != nil { + return + } + + masterReplicas := runtime.Spec.Master.Replicas + if masterReplicas == 0 { + masterReplicas = 1 + } + if masterReplicas == master.Status.ReadyReplicas { + ready = true + } else { + e.Log.Info("The master is not ready.", "replicas", masterReplicas, + "readyReplicas", master.Status.ReadyReplicas) + } + + // 2. Update the phase + if ready { + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + runtime, err := e.getRuntime() + if err != nil { + return err + } + runtimeToUpdate := runtime.DeepCopy() + + runtimeToUpdate.Status.CurrentMasterNumberScheduled = int32(master.Status.ReadyReplicas) + + runtimeToUpdate.Status.MasterPhase = datav1alpha1.RuntimePhaseReady + + if len(runtimeToUpdate.Status.Conditions) == 0 { + runtimeToUpdate.Status.Conditions = []datav1alpha1.RuntimeCondition{} + } + cond := utils.NewRuntimeCondition(datav1alpha1.RuntimeMasterReady, datav1alpha1.RuntimeMasterReadyReason, + "The master is ready.", corev1.ConditionTrue) + runtimeToUpdate.Status.Conditions = + utils.UpdateRuntimeCondition(runtimeToUpdate.Status.Conditions, + cond) + + if !reflect.DeepEqual(runtime.Status, runtimeToUpdate.Status) { + return e.Client.Status().Update(context.TODO(), runtimeToUpdate) + } + + return nil + }) + + if err != nil { + e.Log.Error(err, "Update runtime status") + return + } + } + + if err != nil { + return + } + + return +} + +// ShouldSetupMaster checks if we need setup the master +func (e *JindoFSxEngine) ShouldSetupMaster() (should bool, err error) { + runtime, err := e.getRuntime() + if err != nil { + return + } + + switch runtime.Status.MasterPhase { + case datav1alpha1.RuntimePhaseNone: + should = true + default: + should = false + } + + return +} + +// SetupMaster setups the master and updates the status +// It will print the information in the Debug window according to the Master status +// It return any cache error encountered +func (e *JindoFSxEngine) SetupMaster() (err error) { + + // Setup the Jindo cluster + masterName := e.getMasterName() + master, err := kubeclient.GetStatefulSet(e.Client, masterName, e.namespace) + if err != nil && apierrs.IsNotFound(err) { + //1. Is not found error + e.Log.V(1).Info("SetupMaster", "master", e.name+"-master") + return e.setupMasterInernal() + } else if err != nil { + //2. Other errors + return + } else { + //3.The master has been set up + e.Log.V(1).Info("The master has been set.", "replicas", master.Status.ReadyReplicas) + } + + // 2. Update the status of the runtime + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + runtime, err := e.getRuntime() + if err != nil { + return err + } + runtimeToUpdate := runtime.DeepCopy() + + runtimeToUpdate.Status.MasterPhase = datav1alpha1.RuntimePhaseNotReady + replicas := runtimeToUpdate.Spec.Master.Replicas + if replicas == 0 { + replicas = 1 + } + + // Init selector for worker + runtimeToUpdate.Status.Selector = e.getWorkerSelectors() + + runtimeToUpdate.Status.DesiredMasterNumberScheduled = replicas + runtimeToUpdate.Status.ValueFileConfigmap = e.getConfigmapName() + + if len(runtimeToUpdate.Status.Conditions) == 0 { + runtimeToUpdate.Status.Conditions = []datav1alpha1.RuntimeCondition{} + } + cond := utils.NewRuntimeCondition(datav1alpha1.RuntimeMasterInitialized, datav1alpha1.RuntimeMasterInitializedReason, + "The master is initialized.", corev1.ConditionTrue) + runtimeToUpdate.Status.Conditions = + utils.UpdateRuntimeCondition(runtimeToUpdate.Status.Conditions, + cond) + + if !reflect.DeepEqual(runtime.Status, runtimeToUpdate.Status) { + return e.Client.Status().Update(context.TODO(), runtimeToUpdate) + } + + return nil + }) + + if err != nil { + e.Log.Error(err, "Update runtime status") + return err + } + + return +} diff --git a/pkg/ddc/jindofsx/master_internal.go b/pkg/ddc/jindofsx/master_internal.go new file mode 100644 index 00000000000..1914ddfbfc5 --- /dev/null +++ b/pkg/ddc/jindofsx/master_internal.go @@ -0,0 +1,88 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/fluid-cloudnative/fluid/pkg/utils" + "github.com/fluid-cloudnative/fluid/pkg/utils/helm" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubectl" + "gopkg.in/yaml.v2" +) + +func (e *JindoFSxEngine) setupMasterInernal() (err error) { + var ( + chartName = utils.GetChartsDirectory() + "/jindofsx" + ) + valuefileName, err := e.generateJindoValueFile() + if err != nil { + return + } + found, err := helm.CheckRelease(e.name, e.namespace) + if err != nil { + return + } + if found { + e.Log.Info("The release is already installed", "name", e.name, "namespace", e.namespace) + return + } + + return helm.InstallRelease(e.name, e.namespace, valuefileName, chartName) +} + +func (e *JindoFSxEngine) generateJindoValueFile() (valueFileName string, err error) { + // why need to delete configmap e.name+"-jindofs-config" ? Or it should be + // err = kubeclient.DeleteConfigMap(e.Client, e.name+"-jindofs-config", e.namespace) + err = kubeclient.DeleteConfigMap(e.Client, e.getConfigmapName(), e.namespace) + if err != nil { + e.Log.Error(err, "Failed to clean value files") + } + value, err := e.transform(e.runtime) + if err != nil { + return + } + data, err := yaml.Marshal(value) + if err != nil { + return + } + valueFile, err := ioutil.TempFile(os.TempDir(), fmt.Sprintf("%s-%s-values.yaml", e.name, e.runtimeType)) + if err != nil { + e.Log.Error(err, "failed to create value file", "valueFile", valueFile.Name()) + return valueFileName, err + } + valueFileName = valueFile.Name() + e.Log.V(1).Info("Save the values file", "valueFile", valueFileName) + + err = ioutil.WriteFile(valueFileName, data, 0400) + if err != nil { + return + } + + err = kubectl.CreateConfigMapFromFile(e.getConfigmapName(), "data", valueFileName, e.namespace) + if err != nil { + return + } + return valueFileName, err +} + +func (e *JindoFSxEngine) getConfigmapName() string { + return e.name + "-" + e.runtimeType + "-values" +} diff --git a/pkg/ddc/jindofsx/master_internal_test.go b/pkg/ddc/jindofsx/master_internal_test.go new file mode 100644 index 00000000000..1e471be02b0 --- /dev/null +++ b/pkg/ddc/jindofsx/master_internal_test.go @@ -0,0 +1,264 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "testing" + + "github.com/fluid-cloudnative/fluid/pkg/common" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/brahma-adshonor/gohook" + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base/portallocator" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + "github.com/fluid-cloudnative/fluid/pkg/utils/helm" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubectl" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/net" +) + +func TestSetupMasterInternal(t *testing.T) { + mockExecCreateConfigMapFromFileCommon := func(name string, key, fileName string, namespace string) (err error) { + return nil + } + mockExecCreateConfigMapFromFileErr := func(name string, key, fileName string, namespace string) (err error) { + return errors.New("fail to exec command") + } + mockExecCheckReleaseCommonFound := func(name string, namespace string) (exist bool, err error) { + return true, nil + } + mockExecCheckReleaseCommonNotFound := func(name string, namespace string) (exist bool, err error) { + return false, nil + } + mockExecCheckReleaseErr := func(name string, namespace string) (exist bool, err error) { + return false, errors.New("fail to check release") + } + mockExecInstallReleaseCommon := func(name string, namespace string, valueFile string, chartName string) error { + return nil + } + mockExecInstallReleaseErr := func(name string, namespace string, valueFile string, chartName string) error { + return errors.New("fail to install dataload chart") + } + + wrappedUnhookCreateConfigMapFromFile := func() { + err := gohook.UnHook(kubectl.CreateConfigMapFromFile) + if err != nil { + t.Fatal(err.Error()) + } + } + wrappedUnhookCheckRelease := func() { + err := gohook.UnHook(helm.CheckRelease) + if err != nil { + t.Fatal(err.Error()) + } + } + wrappedUnhookInstallRelease := func() { + err := gohook.UnHook(helm.InstallRelease) + if err != nil { + t.Fatal(err.Error()) + } + } + + allixioruntime := &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + } + testObjs := []runtime.Object{} + testObjs = append(testObjs, (*allixioruntime).DeepCopy()) + + datasetInputs := []datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + }, + } + for _, datasetInput := range datasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engine := JindoFSxEngine{ + name: "hbase", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Master: datav1alpha1.JindoCompTemplateSpec{ + Replicas: 2, + }, + }, + }, + } + portallocator.SetupRuntimePortAllocator(client, &net.PortRange{Base: 10, Size: 100}, GetReservedPorts) + err := gohook.Hook(kubectl.CreateConfigMapFromFile, mockExecCreateConfigMapFromFileErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + err = engine.setupMasterInernal() + if err == nil { + t.Errorf("fail to catch the error") + } + wrappedUnhookCreateConfigMapFromFile() + + // create configmap successfully + err = gohook.Hook(kubectl.CreateConfigMapFromFile, mockExecCreateConfigMapFromFileCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } + + // check release found + err = gohook.Hook(helm.CheckRelease, mockExecCheckReleaseCommonFound, nil) + if err != nil { + t.Fatal(err.Error()) + } + _ = engine.setupMasterInernal() + wrappedUnhookCheckRelease() + + // check release error + err = gohook.Hook(helm.CheckRelease, mockExecCheckReleaseErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + err = engine.setupMasterInernal() + if err == nil { + t.Errorf("fail to catch the error") + } + wrappedUnhookCheckRelease() + + // check release not found + err = gohook.Hook(helm.CheckRelease, mockExecCheckReleaseCommonNotFound, nil) + if err != nil { + t.Fatal(err.Error()) + } + + // install release with error + err = gohook.Hook(helm.InstallRelease, mockExecInstallReleaseErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + err = engine.setupMasterInernal() + if err == nil { + t.Errorf("fail to catch the error") + } + wrappedUnhookInstallRelease() + + // install release successfully + err = gohook.Hook(helm.InstallRelease, mockExecInstallReleaseCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } + _ = engine.setupMasterInernal() + wrappedUnhookInstallRelease() + wrappedUnhookCreateConfigMapFromFile() +} + +func TestGenerateJindoValueFile(t *testing.T) { + mockExecCreateConfigMapFromFileCommon := func(name string, key, fileName string, namespace string) (err error) { + return nil + } + mockExecCreateConfigMapFromFileErr := func(name string, key, fileName string, namespace string) (err error) { + return errors.New("fail to exec command") + } + + wrappedUnhookCreateConfigMapFromFile := func() { + err := gohook.UnHook(kubectl.CreateConfigMapFromFile) + if err != nil { + t.Fatal(err.Error()) + } + } + + jindoruntime := &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + } + testObjs := []runtime.Object{} + testObjs = append(testObjs, (*jindoruntime).DeepCopy()) + + datasetInputs := []datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + }, + } + for _, datasetInput := range datasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + result := resource.MustParse("20Gi") + engine := JindoFSxEngine{ + name: "hbase", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Master: datav1alpha1.JindoCompTemplateSpec{ + Replicas: 2, + }, + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{{ + MediumType: common.Memory, + Quota: &result, + High: "0.8", + Low: "0.1", + }}, + }, + }, + }, + } + + portallocator.SetupRuntimePortAllocator(client, &net.PortRange{Base: 10, Size: 50}, GetReservedPorts) + err := gohook.Hook(kubectl.CreateConfigMapFromFile, mockExecCreateConfigMapFromFileErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + _, err = engine.generateJindoValueFile() + if err == nil { + t.Errorf("fail to catch the error") + } + wrappedUnhookCreateConfigMapFromFile() + + err = gohook.Hook(kubectl.CreateConfigMapFromFile, mockExecCreateConfigMapFromFileCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } + wrappedUnhookCreateConfigMapFromFile() +} + +func TestGetConfigmapName(t *testing.T) { + engine := JindoFSxEngine{ + name: "hbase", + runtimeType: "Jindo", + } + expectedResult := "hbase-Jindo-values" + if engine.getConfigmapName() != expectedResult { + t.Errorf("fail to get the configmap name") + } +} diff --git a/pkg/ddc/jindofsx/master_test.go b/pkg/ddc/jindofsx/master_test.go new file mode 100644 index 00000000000..cbe27205230 --- /dev/null +++ b/pkg/ddc/jindofsx/master_test.go @@ -0,0 +1,270 @@ +package jindofsx + +import ( + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestCheckMasterReady(t *testing.T) { + statefulsetInputs := []v1.StatefulSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "spark-master", + Namespace: "fluid", + }, + Status: v1.StatefulSetStatus{ + ReadyReplicas: 1, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-master", + Namespace: "fluid", + }, + Status: v1.StatefulSetStatus{ + ReadyReplicas: 1, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop-master", + Namespace: "fluid", + }, + Status: v1.StatefulSetStatus{ + ReadyReplicas: 1, + }, + }, + } + testObjs := []runtime.Object{} + for _, statefulset := range statefulsetInputs { + testObjs = append(testObjs, statefulset.DeepCopy()) + } + + JindoRuntimeInputs := []datav1alpha1.JindoRuntime{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "spark", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Master: datav1alpha1.JindoCompTemplateSpec{ + Replicas: 1, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Master: datav1alpha1.JindoCompTemplateSpec{ + Replicas: 2, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Master: datav1alpha1.JindoCompTemplateSpec{ + Replicas: 1, + }, + }, + Status: datav1alpha1.RuntimeStatus{ + APIGatewayStatus: &datav1alpha1.APIGatewayStatus{ + Endpoint: "test-endpoint", + }, + }, + }, + } + for _, JindoRuntime := range JindoRuntimeInputs { + testObjs = append(testObjs, JindoRuntime.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engines := []JindoFSxEngine{ + { + name: "spark", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + }, + { + name: "hbase", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + }, + { + name: "hadoop", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + }, + } + + var testCases = []struct { + engine JindoFSxEngine + expectedResult bool + }{ + { + engine: engines[0], + expectedResult: false, + }, + { + engine: engines[1], + expectedResult: false, + }, + } + + for _, test := range testCases { + if ready, _ := test.engine.CheckMasterReady(); ready != test.expectedResult { + t.Errorf("fail to exec the function") + return + } + if !test.expectedResult { + continue + } + JindoRuntime, err := test.engine.getRuntime() + if err != nil { + t.Errorf("fail to get runtime %v", err) + return + } + if len(JindoRuntime.Status.Conditions) == 0 { + t.Errorf("fail to update the runtime conditions") + return + } + } +} + +func TestShouldSetupMaster(t *testing.T) { + JindoRuntimeInputs := []datav1alpha1.JindoRuntime{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "spark", + Namespace: "fluid", + }, + Status: datav1alpha1.RuntimeStatus{ + MasterPhase: datav1alpha1.RuntimePhaseNotReady, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Status: datav1alpha1.RuntimeStatus{ + MasterPhase: datav1alpha1.RuntimePhaseNone, + }, + }, + } + testObjs := []runtime.Object{} + for _, JindoRuntime := range JindoRuntimeInputs { + testObjs = append(testObjs, JindoRuntime.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engines := []JindoFSxEngine{ + { + name: "spark", + namespace: "fluid", + Client: client, + }, + { + name: "hbase", + namespace: "fluid", + Client: client, + }, + } + + var testCases = []struct { + engine JindoFSxEngine + expectedResult bool + }{ + { + engine: engines[0], + expectedResult: false, + }, + { + engine: engines[1], + expectedResult: true, + }, + } + + for _, test := range testCases { + if should, _ := test.engine.ShouldSetupMaster(); should != test.expectedResult { + t.Errorf("fail to exec the function") + return + } + } +} + +func TestSetupMaster(t *testing.T) { + statefulSetInputs := []v1.StatefulSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "spark-master", + Namespace: "fluid", + }, + Status: v1.StatefulSetStatus{ + ReadyReplicas: 1, + }, + }, + } + + testObjs := []runtime.Object{} + for _, statefulSet := range statefulSetInputs { + testObjs = append(testObjs, statefulSet.DeepCopy()) + } + + JindoRuntimeInputs := []datav1alpha1.JindoRuntime{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "spark", + Namespace: "fluid", + }, + }, + } + for _, JindoRuntime := range JindoRuntimeInputs { + testObjs = append(testObjs, JindoRuntime.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engines := []JindoFSxEngine{ + { + name: "spark", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + }, + } + + var testCases = []struct { + engine JindoFSxEngine + expectedSelector string + expectedConfigMapName string + }{ + { + engine: engines[0], + expectedConfigMapName: "spark--values", + expectedSelector: "app=jindo,release=spark,role=jindo-worker", + }, + } + + for _, test := range testCases { + _ = test.engine.SetupMaster() + JindoRuntime, _ := test.engine.getRuntime() + if len(JindoRuntime.Status.Conditions) != 0 { + t.Errorf("fail to update the runtime") + return + } + } +} diff --git a/pkg/ddc/jindofsx/metadata.go b/pkg/ddc/jindofsx/metadata.go new file mode 100644 index 00000000000..47992a44de3 --- /dev/null +++ b/pkg/ddc/jindofsx/metadata.go @@ -0,0 +1,159 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "errors" + "reflect" + "time" + + "github.com/fluid-cloudnative/fluid/pkg/utils" + "k8s.io/client-go/util/retry" +) + +// MetadataSyncResult describes result for asynchronous metadata sync +type MetadataSyncResult struct { + Done bool + StartTime time.Time + UfsTotal string + Err error +} + +func (e *JindoFSxEngine) SyncMetadata() (err error) { + defer utils.TimeTrack(time.Now(), "JindoFSxEngine.SyncMetadata", "name", e.name, "namespace", e.namespace) + defer e.Log.V(1).Info("End to sync metadata", "name", e.name, "namespace", e.namespace) + e.Log.V(1).Info("Start to sync metadata", "name", e.name, "namespace", e.namespace) + should, err := e.shouldSyncMetadata() + if err != nil { + e.Log.Error(err, "Failed to check if should sync metadata") + return + } + // should sync metadata + if should { + // load metadata again + return e.syncMetadataInternal() + } + return +} + +// shouldSyncMetadata checks dataset's UfsTotal to decide whether should sync metadata +func (e *JindoFSxEngine) shouldSyncMetadata() (should bool, err error) { + dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) + if err != nil { + should = false + return should, err + } + + if dataset.Status.UfsTotal != "" && dataset.Status.UfsTotal != METADATA_SYNC_NOT_DONE_MSG { + e.Log.V(1).Info("dataset ufs is ready", + "dataset name", dataset.Name, + "dataset namespace", dataset.Namespace, + "ufstotal", dataset.Status.UfsTotal) + should = false + return should, nil + } + should = true + return should, nil +} + +func (e *JindoFSxEngine) syncMetadataInternal() (err error) { + if e.MetadataSyncDoneCh != nil { + // Either get result from channel or timeout + select { + case result := <-e.MetadataSyncDoneCh: + defer func() { + e.MetadataSyncDoneCh = nil + }() + e.Log.Info("Get result from MetadataSyncDoneCh", "result", result) + if result.Done { + e.Log.Info("Metadata sync succeeded", "period", time.Since(result.StartTime)) + err = retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { + dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) + if err != nil { + return + } + datasetToUpdate := dataset.DeepCopy() + datasetToUpdate.Status.UfsTotal = result.UfsTotal + if !reflect.DeepEqual(datasetToUpdate, dataset) { + err = e.Client.Status().Update(context.TODO(), datasetToUpdate) + if err != nil { + return + } + } + return + }) + if err != nil { + e.Log.Error(err, "Failed to update UfsTotal and FileNum of the dataset") + return err + } + } else { + e.Log.Error(result.Err, "Metadata sync failed") + return result.Err + } + case <-time.After(CHECK_METADATA_SYNC_DONE_TIMEOUT_MILLISEC * time.Millisecond): + e.Log.V(1).Info("Metadata sync still in progress") + } + } else { + // Metadata sync haven't started + err = retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { + dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) + if err != nil { + return + } + datasetToUpdate := dataset.DeepCopy() + datasetToUpdate.Status.UfsTotal = METADATA_SYNC_NOT_DONE_MSG + datasetToUpdate.Status.FileNum = METADATA_SYNC_NOT_DONE_MSG + if !reflect.DeepEqual(dataset, datasetToUpdate) { + err = e.Client.Status().Update(context.TODO(), datasetToUpdate) + if err != nil { + return + } + } + return + }) + if err != nil { + e.Log.Error(err, "Failed to set UfsTotal to METADATA_SYNC_NOT_DONE_MSG") + } + e.MetadataSyncDoneCh = make(chan MetadataSyncResult) + go func(resultChan chan MetadataSyncResult) { + defer close(resultChan) + result := MetadataSyncResult{ + StartTime: time.Now(), + UfsTotal: "", + } + + if err != nil { + e.Log.Error(err, "Can't get dataset when syncing metadata", "name", e.name, "namespace", e.namespace) + result.Err = err + result.Done = false + resultChan <- result + return + } + + result.Done = true + + if !result.Done { + result.Err = errors.New("GetMetadataInfoFailed") + } else { + result.Err = nil + } + resultChan <- result + }(e.MetadataSyncDoneCh) + } + return +} diff --git a/pkg/ddc/jindofsx/metadata_test.go b/pkg/ddc/jindofsx/metadata_test.go new file mode 100644 index 00000000000..08c06fda851 --- /dev/null +++ b/pkg/ddc/jindofsx/metadata_test.go @@ -0,0 +1,275 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "testing" + "time" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +func TestSyncMetadata(t *testing.T) { + datasetInputs := []datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Status: datav1alpha1.DatasetStatus{ + UfsTotal: "2Gi", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "spark", + Namespace: "fluid", + }, + Status: datav1alpha1.DatasetStatus{ + UfsTotal: "", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop", + Namespace: "fluid", + }, + Spec: datav1alpha1.DatasetSpec{ + DataRestoreLocation: &datav1alpha1.DataRestoreLocation{ + Path: "local:///host1/erf", + NodeName: "test-node", + }, + }, + Status: datav1alpha1.DatasetStatus{ + UfsTotal: "", + }, + }, + } + + testObjs := []runtime.Object{} + for _, datasetInput := range datasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + runtime := &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "1", + }, + } + + engines := []JindoFSxEngine{ + { + name: "hbase", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + runtime: runtime, + }, + { + name: "spark", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + runtime: runtime, + }, + } + + for _, engine := range engines { + err := engine.SyncMetadata() + if err != nil { + t.Errorf("fail to exec the function") + } + } + + engine := JindoFSxEngine{ + name: "hadoop", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + runtime: runtime, + } + + err := engine.SyncMetadata() + if err != nil { + t.Errorf("fail to exec function RestoreMetadataInternal") + } +} + +func TestShouldSyncMetadata(t *testing.T) { + datasetInputs := []datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Status: datav1alpha1.DatasetStatus{ + UfsTotal: "2Gi", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "spark", + Namespace: "fluid", + }, + Status: datav1alpha1.DatasetStatus{ + UfsTotal: "", + }, + }, + } + testObjs := []runtime.Object{} + for _, datasetInput := range datasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + engines := []JindoFSxEngine{ + { + name: "hbase", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + }, + { + name: "spark", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + }, + } + + var testCases = []struct { + engine JindoFSxEngine + expectedShould bool + }{ + { + engine: engines[0], + expectedShould: false, + }, + { + engine: engines[1], + expectedShould: true, + }, + } + + for _, test := range testCases { + should, err := test.engine.shouldSyncMetadata() + if err != nil || should != test.expectedShould { + t.Errorf("fail to exec the function") + } + } +} + +func TestSyncMetadataInternal(t *testing.T) { + datasetInputs := []datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "spark", + Namespace: "fluid", + }, + Spec: datav1alpha1.DatasetSpec{}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.DatasetSpec{}, + }, + } + testObjs := []runtime.Object{} + for _, datasetInput := range datasetInputs { + testObjs = append(testObjs, datasetInput.DeepCopy()) + } + client := fake.NewFakeClientWithScheme(testScheme, testObjs...) + + runtime := &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "1", + }, + } + + engines := []JindoFSxEngine{ + { + name: "hbase", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + MetadataSyncDoneCh: make(chan MetadataSyncResult), + runtime: runtime, + }, + { + name: "spark", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + MetadataSyncDoneCh: nil, + runtime: runtime, + }, + } + + result := MetadataSyncResult{ + StartTime: time.Now(), + UfsTotal: "2GB", + Done: true, + } + + var testCase = []struct { + engine JindoFSxEngine + expectedResult bool + expectedUfsTotal string + }{ + { + engine: engines[0], + expectedUfsTotal: "2GB", + }, + } + + for index, test := range testCase { + if index == 0 { + go func() { + test.engine.MetadataSyncDoneCh <- result + }() + } + + err := test.engine.syncMetadataInternal() + // fmt.Println(index) + if err != nil { + t.Errorf("fail to exec the function with error %v", err) + } + + key := types.NamespacedName{ + Namespace: test.engine.namespace, + Name: test.engine.name, + } + + dataset := &datav1alpha1.Dataset{} + err = client.Get(context.TODO(), key, dataset) + if err != nil { + t.Errorf("failt to get the dataset with error %v", err) + } + + if dataset.Status.UfsTotal != test.expectedUfsTotal { + t.Errorf("expected UfsTotal %s, get UfsTotal %s", test.expectedUfsTotal, dataset.Status.UfsTotal) + } + } +} diff --git a/pkg/ddc/jindofsx/node.go b/pkg/ddc/jindofsx/node.go new file mode 100644 index 00000000000..6cfe9fc6b22 --- /dev/null +++ b/pkg/ddc/jindofsx/node.go @@ -0,0 +1,186 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "fmt" + "time" + + "github.com/fluid-cloudnative/fluid/pkg/ctrl" + "github.com/fluid-cloudnative/fluid/pkg/utils" + datasetSchedule "github.com/fluid-cloudnative/fluid/pkg/utils/dataset/lifecycle" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + fluiderrs "github.com/fluid-cloudnative/fluid/pkg/errors" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" +) + +func (e *JindoFSxEngine) AssignNodesToCache(desiredNum int32) (currentScheduleNum int32, err error) { + runtimeInfo, err := e.getRuntimeInfo() + if err != nil { + return currentScheduleNum, err + } + + dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) + e.Log.Info("AssignNodesToCache", "dataset", dataset) + if err != nil { + return + } + + return datasetSchedule.AssignDatasetToNodes(runtimeInfo, + dataset, + e.Client, + desiredNum) +} + +// SyncScheduleInfoToCacheNodes syncs the cache info of the nodes by labeling the nodes +// And the Application pod can leverage such info for scheduling +func (e *JindoFSxEngine) SyncScheduleInfoToCacheNodes() (err error) { + defer utils.TimeTrack(time.Now(), "SyncScheduleInfoToCacheNodes", "name", e.name, "namespace", e.namespace) + + var ( + currentCacheNodenames []string + previousCacheNodenames []string + ) + + workers, err := ctrl.GetWorkersAsStatefulset(e.Client, + types.NamespacedName{Namespace: e.namespace, Name: e.getWorkerName()}) + if err != nil { + if fluiderrs.IsDeprecated(err) { + e.Log.Info("Warning: Deprecated mode is not support, so skip handling", "details", err) + return nil + } + return err + } + + workerSelector, err := labels.Parse(fmt.Sprintf("fluid.io/dataset=%s-%s,app=jindofs,role=jindofs-worker", e.namespace, e.name)) + if err != nil { + return err + } + + workerPods, err := kubeclient.GetPodsForStatefulSet(e.Client, workers, workerSelector) + if err != nil { + return err + } + + // find the nodes which should have the runtime label + for _, pod := range workerPods { + nodeName := pod.Spec.NodeName + node := &v1.Node{} + if err := e.Get(context.TODO(), types.NamespacedName{Name: nodeName}, node); err != nil { + return err + } + // nodesShouldHaveLabel = append(nodesShouldHaveLabel, node) + currentCacheNodenames = append(currentCacheNodenames, nodeName) + } + + // find the nodes which already have the runtime label + previousCacheNodenames, err = e.getAssignedNodes() + if err != nil { + return err + } + + // runtimeLabel indicates the specific runtime pod is on the node + // e.g. fluid.io/s-alluxio-default-hbase=true + // runtimeLabel := e.runtimeInfo.GetRuntimeLabelName() + // runtimeLabel := e.runtimeInfo.GetRuntimeLabelName() + + currentCacheNodenames = utils.RemoveDuplicateStr(currentCacheNodenames) + previousCacheNodenames = utils.RemoveDuplicateStr(previousCacheNodenames) + + addedCacheNodenames := utils.SubtractString(currentCacheNodenames, previousCacheNodenames) + removedCacheNodenames := utils.SubtractString(previousCacheNodenames, currentCacheNodenames) + + if len(addedCacheNodenames) > 0 { + + for _, nodeName := range addedCacheNodenames { + node := v1.Node{} + err = e.Get(context.TODO(), types.NamespacedName{ + Name: nodeName, + }, &node) + if err != nil { + e.Log.Error(err, "Failed to find new cache node", "node", nodeName) + return err + } + if !datasetSchedule.CheckIfRuntimeInNode(node, e.runtimeInfo) { + err = datasetSchedule.LabelCacheNode(node, e.runtimeInfo, e.Client) + if err != nil { + e.Log.Error(err, "Failed to label new cache node", "node", nodeName) + return err + } + } else { + e.Log.Info("The node is already added to cache", "node", nodeName) + } + } + } + + if len(removedCacheNodenames) > 0 { + for _, nodeName := range removedCacheNodenames { + node := v1.Node{} + err = e.Get(context.TODO(), types.NamespacedName{ + Name: nodeName, + }, &node) + if utils.IgnoreNotFound(err) != nil { + e.Log.Error(err, "Failed to find new cache node", "node", nodeName) + return err + } + if datasetSchedule.CheckIfRuntimeInNode(node, e.runtimeInfo) { + err = datasetSchedule.UnlabelCacheNode(node, e.runtimeInfo, e.Client) + if err != nil { + e.Log.Error(err, "Failed to unlabel cache node", "node", nodeName) + return err + } + } else { + e.Log.Info("The node is already removed from cache", "node", nodeName) + } + + } + } + + return err +} + +// getAssignedNodes gets the node which is already +func (e *JindoFSxEngine) getAssignedNodes() (nodeNames []string, err error) { + var ( + nodeList = &v1.NodeList{} + runtimeLabel = e.runtimeInfo.GetRuntimeLabelName() + ) + + nodeNames = []string{} + datasetLabels, err := labels.Parse(fmt.Sprintf("%s=true", runtimeLabel)) + if err != nil { + return + } + + err = e.List(context.TODO(), nodeList, &client.ListOptions{ + LabelSelector: datasetLabels, + }) + if err != nil { + return + } + + for _, node := range nodeList.Items { + nodeNames = append(nodeNames, node.Name) + } + + return +} diff --git a/pkg/ddc/jindofsx/node_test.go b/pkg/ddc/jindofsx/node_test.go new file mode 100644 index 00000000000..c1313d3149f --- /dev/null +++ b/pkg/ddc/jindofsx/node_test.go @@ -0,0 +1,425 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "fmt" + "reflect" + "testing" + + "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + utilpointer "k8s.io/utils/pointer" +) + +func getTestJindoFSxEngineNode(client client.Client, name string, namespace string, withRunTime bool) *JindoFSxEngine { + engine := &JindoFSxEngine{ + runtime: nil, + name: name, + namespace: namespace, + Client: client, + runtimeInfo: nil, + Log: fake.NullLogger(), + } + if withRunTime { + engine.runtime = &v1alpha1.JindoRuntime{} + engine.runtimeInfo, _ = base.BuildRuntimeInfo(name, namespace, "Jindo", v1alpha1.TieredStore{}) + } + return engine +} + +func TestAssignNodesToCache(t *testing.T) { + dataSet := &v1alpha1.Dataset{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: v1alpha1.DatasetSpec{}, + Status: v1alpha1.DatasetStatus{}, + } + nodeInputs := []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node-spark", + Labels: map[string]string{ + "fluid.io/dataset-num": "1", + "fluid.io/s-Jindo-fluid-spark": "true", + "fluid.io/s-fluid-spark": "true", + "fluid.io/s-h-Jindo-d-fluid-spark": "5B", + "fluid.io/s-h-Jindo-m-fluid-spark": "1B", + "fluid.io/s-h-Jindo-t-fluid-spark": "6B", + "fluid_exclusive": "fluid_spark", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node-share", + Labels: map[string]string{ + "fluid.io/dataset-num": "2", + "fluid.io/s-Jindo-fluid-hadoop": "true", + "fluid.io/s-fluid-hadoop": "true", + "fluid.io/s-h-Jindo-d-fluid-hadoop": "5B", + "fluid.io/s-h-Jindo-m-fluid-hadoop": "1B", + "fluid.io/s-h-Jindo-t-fluid-hadoop": "6B", + "fluid.io/s-Jindo-fluid-hbase": "true", + "fluid.io/s-fluid-hbase": "true", + "fluid.io/s-h-Jindo-d-fluid-hbase": "5B", + "fluid.io/s-h-Jindo-m-fluid-hbase": "1B", + "fluid.io/s-h-Jindo-t-fluid-hbase": "6B", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node-hadoop", + Labels: map[string]string{ + "fluid.io/dataset-num": "1", + "fluid.io/s-Jindo-fluid-hadoop": "true", + "fluid.io/s-fluid-hadoop": "true", + "fluid.io/s-h-Jindo-d-fluid-hadoop": "5B", + "fluid.io/s-h-Jindo-m-fluid-hadoop": "1B", + "fluid.io/s-h-Jindo-t-fluid-hadoop": "6B", + "node-select": "true", + }, + }, + }, + } + runtimeObjs := []runtime.Object{} + runtimeObjs = append(runtimeObjs, dataSet) + for _, nodeInput := range nodeInputs { + runtimeObjs = append(runtimeObjs, nodeInput.DeepCopy()) + } + fakeClient := fake.NewFakeClientWithScheme(testScheme, runtimeObjs...) + + testCases := []struct { + withRunTime bool + name string + namespace string + out int32 + isErr bool + }{ + { + withRunTime: true, + name: "hbase", + namespace: "fluid", + out: 2, + isErr: false, + }, + { + withRunTime: false, + name: "hbase", + namespace: "fluid", + out: 0, + isErr: true, + }, + { + withRunTime: true, + name: "not-found", + namespace: "fluid", + out: 0, + isErr: true, + }, + } + for _, testCase := range testCases { + engine := getTestJindoFSxEngineNode(fakeClient, testCase.name, testCase.namespace, testCase.withRunTime) + out, err := engine.AssignNodesToCache(3) // num: 2 err: nil + if out != testCase.out { + t.Errorf("expected %d, got %d.", testCase.out, out) + } + isErr := err != nil + if isErr != testCase.isErr { + t.Errorf("expected %t, got %t.", testCase.isErr, isErr) + } + } +} + +func TestSyncScheduleInfoToCacheNodes(t *testing.T) { + type fields struct { + // runtime *datav1alpha1.JindoRuntime + worker *appsv1.StatefulSet + pods []*v1.Pod + ds *appsv1.DaemonSet + nodes []*v1.Node + name string + namespace string + } + testcases := []struct { + name string + fields fields + nodeNames []string + }{ + { + name: "create", + fields: fields{ + name: "spark", + namespace: "big-data", + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "spark-jindofs-worker", + Namespace: "big-data", + UID: "uid1", + }, + Spec: appsv1.StatefulSetSpec{}, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "spark-jindofs-worker-0", + Namespace: "big-data", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "StatefulSet", + APIVersion: "apps/v1", + Name: "spark-jindofs-worker", + UID: "uid1", + Controller: utilpointer.BoolPtr(true), + }}, + Labels: map[string]string{ + "app": "jindofs", + "role": "jindofs-worker", + "fluid.io/dataset": "big-data-spark", + }, + }, + Spec: v1.PodSpec{ + NodeName: "node1", + }, + }, + }, + nodes: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + }, + }, + }, + nodeNames: []string{"node1"}, + }, { + name: "add", + fields: fields{ + name: "hbase", + namespace: "big-data", + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-jindofs-worker", + Namespace: "big-data", + UID: "uid2", + }, + Spec: appsv1.StatefulSetSpec{}, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-jindofs-worker-0", + Namespace: "big-data", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "StatefulSet", + APIVersion: "apps/v1", + Name: "hbase-jindofs-worker", + UID: "uid2", + Controller: utilpointer.BoolPtr(true), + }}, + Labels: map[string]string{ + "app": "jindofs", + "role": "jindofs-worker", + "fluid.io/dataset": "big-data-hbase", + }, + }, + Spec: v1.PodSpec{ + NodeName: "node3", + }, + }, + }, + nodes: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node3", + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + Labels: map[string]string{ + "fluid.io/s-default-hbase": "true", + }, + }, + }, + }, + }, + nodeNames: []string{"node3"}, + }, { + name: "noController", + fields: fields{ + name: "hbase-a", + namespace: "big-data", + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-a-jindofs-worker", + Namespace: "big-data", + UID: "uid3", + }, + Spec: appsv1.StatefulSetSpec{}, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-a-jindofs-worker-0", + Namespace: "big-data", + Labels: map[string]string{ + "app": "jindofs", + "role": "jindofs-worker", + "fluid.io/dataset": "big-data-hbase-a", + }, + }, + Spec: v1.PodSpec{ + NodeName: "node5", + }, + }, + }, + nodes: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node5", + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "node4", + Labels: map[string]string{ + "fluid.io/s-default-hbase-a": "true", + }, + }, + }, + }, + }, + nodeNames: []string{}, + }, { + name: "deprecated", + fields: fields{ + name: "deprecated", + namespace: "big-data", + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deprecated-worker", + Namespace: "big-data", + UID: "uid3", + }, + Spec: appsv1.StatefulSetSpec{}, + }, + ds: &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{ + Name: "deprecated-jindofs-worker", + Namespace: "big-data", + UID: "uid3", + }}, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "deprecated-jindofs-worker-0", + Namespace: "big-data", + Labels: map[string]string{ + "app": "jindofs", + "role": "jindofs-worker", + "fluid.io/dataset": "big-data-hbase-a", + }, + }, + Spec: v1.PodSpec{ + NodeName: "node5", + }, + }, + }, + nodes: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node6", + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "node7", + Labels: map[string]string{ + "fluid.io/s-default-hbase-a": "true", + }, + }, + }, + }, + }, + nodeNames: []string{}, + }, + } + + runtimeObjs := []runtime.Object{} + + for _, testcase := range testcases { + runtimeObjs = append(runtimeObjs, testcase.fields.worker) + + if testcase.fields.ds != nil { + runtimeObjs = append(runtimeObjs, testcase.fields.ds) + } + for _, pod := range testcase.fields.pods { + runtimeObjs = append(runtimeObjs, pod) + } + + for _, node := range testcase.fields.nodes { + runtimeObjs = append(runtimeObjs, node) + } + // runtimeObjs = append(runtimeObjs, testcase.fields.pods) + } + c := fake.NewFakeClientWithScheme(testScheme, runtimeObjs...) + + for _, testcase := range testcases { + engine := getTestJindoFSxEngineNode(c, testcase.fields.name, testcase.fields.namespace, true) + err := engine.SyncScheduleInfoToCacheNodes() + if err != nil { + t.Errorf("Got error %t.", err) + } + + nodeList := &v1.NodeList{} + datasetLabels, err := labels.Parse(fmt.Sprintf("%s=true", engine.getCommonLabelname())) + if err != nil { + return + } + + err = c.List(context.TODO(), nodeList, &client.ListOptions{ + LabelSelector: datasetLabels, + }) + + if err != nil { + t.Errorf("Got error %t.", err) + } + + nodeNames := []string{} + for _, node := range nodeList.Items { + nodeNames = append(nodeNames, node.Name) + } + + if len(testcase.nodeNames) == 0 && len(nodeNames) == 0 { + continue + } + + if !reflect.DeepEqual(testcase.nodeNames, nodeNames) { + t.Errorf("test case %v fail to sync node labels, wanted %v, got %v", testcase.name, testcase.nodeNames, nodeNames) + } + + } +} diff --git a/pkg/ddc/jindofsx/operations/base.go b/pkg/ddc/jindofsx/operations/base.go new file mode 100644 index 00000000000..ff0456afbd6 --- /dev/null +++ b/pkg/ddc/jindofsx/operations/base.go @@ -0,0 +1,202 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operations + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" + "github.com/go-logr/logr" +) + +type JindoFileUtils struct { + podName string + namespace string + container string + log logr.Logger +} + +func NewJindoFileUtils(podName string, containerName string, namespace string, log logr.Logger) JindoFileUtils { + + return JindoFileUtils{ + podName: podName, + namespace: namespace, + container: containerName, + log: log, + } +} + +// exec with timeout +func (a JindoFileUtils) exec(command []string, verbose bool) (stdout string, stderr string, err error) { + ctx, cancel := context.WithTimeout(context.TODO(), time.Second*1500) + ch := make(chan string, 1) + defer cancel() + + go func() { + stdout, stderr, err = a.execWithoutTimeout(command, verbose) + ch <- "done" + }() + + select { + case <-ch: + a.log.V(1).Info("execute in time", "command", command) + case <-ctx.Done(): + err = fmt.Errorf("timeout when executing %v", command) + } + + return +} + +// execWithoutTimeout +func (a JindoFileUtils) execWithoutTimeout(command []string, verbose bool) (stdout string, stderr string, err error) { + stdout, stderr, err = kubeclient.ExecCommandInContainer(a.podName, a.container, a.namespace, command) + if err != nil { + a.log.Info("Stdout", "Command", command, "Stdout", stdout) + a.log.Error(err, "Failed", "Command", command, "FailedReason", stderr) + return + } + if verbose { + a.log.Info("Stdout", "Command", command, "Stdout", stdout) + } + return +} + +// Get summary info of the jindo Engine +func (a JindoFileUtils) ReportSummary() (summary string, err error) { + var ( + command = []string{"jindo", "fs", "-report"} + stdout string + stderr string + ) + + stdout, stderr, err = a.exec(command, false) + if err != nil { + err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr) + return stdout, err + } + return stdout, err +} + +func (a JindoFileUtils) IsMounted(mountPoint string) (mounted bool, err error) { + var ( + command = []string{"jindo", "admin", "-mount"} + stdout string + stderr string + ) + + stdout, stderr, err = a.exec(command, true) + if err != nil { + return mounted, fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr) + } + + results := strings.Split(stdout, "\n") + + for _, line := range results { + fields := strings.Fields(line) + if len(fields) > 2 && fields[2] == mountPoint { + mounted = true + return mounted, nil + } + } + + return mounted, err +} + +func (a JindoFileUtils) Mount(mountName string, ufsPath string) (err error) { + + var ( + command = []string{"jindo", "admin", "-mount"} + ) + // jindo fsxadmin -mount /path oss://xyz/ + if strings.HasPrefix(mountName, "/") { + command = append(command, mountName, ufsPath) + } else { + command = append(command, "/"+mountName, ufsPath) + } + + _, _, _ = a.exec(command, false) + /*if err != nil { + err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr) + return + }*/ + + return nil +} + +func (a JindoFileUtils) GetUfsTotalSize(url string, useStsSecret bool) (summary string, err error) { + var ( + command = []string{"hadoop", "fs", "-count", url} + stdout string + stderr string + ) + + stdout, stderr, err = a.execWithoutTimeout(command, false) + + str := strings.Fields(stdout) + + if len(str) < 3 { + err = fmt.Errorf("failed to parse %s in Count method", str) + return + } + + stdout = str[2] + + if err != nil { + err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr) + return stdout, err + } + return stdout, err +} + +// Check if the JIndo is ready by running `jindo jfs -report` command +func (a JindoFileUtils) Ready() (ready bool) { + var ( + command = []string{"jindo", "fs", "-report"} + ) + + _, _, err := a.exec(command, true) + if err == nil { + ready = true + } + + return ready +} + +// IsExist checks if the JindoPath exists +func (a JindoFileUtils) IsExist(jindoPath string) (found bool, err error) { + var ( + command = []string{"hadoop", "fs", "-ls", "jindo://" + jindoPath} + stdout string + stderr string + ) + + stdout, stderr, err = a.exec(command, true) + if err != nil { + if strings.Contains(stdout, "No such file or directory") { + err = nil + } else { + err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr) + return + } + } else { + found = true + } + return +} diff --git a/pkg/ddc/jindofsx/operations/base_test.go b/pkg/ddc/jindofsx/operations/base_test.go new file mode 100644 index 00000000000..d2fb363847a --- /dev/null +++ b/pkg/ddc/jindofsx/operations/base_test.go @@ -0,0 +1,218 @@ +/* +Copyright 2021 The Fluid Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package operations + +import ( + "errors" + "reflect" + "testing" + + "github.com/brahma-adshonor/gohook" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" +) + +func TestNewJindoFileUtils(t *testing.T) { + expectedResult := JindoFileUtils{ + podName: "hadoop", + namespace: "default", + container: "hadoop", + log: fake.NullLogger(), + } + + result := NewJindoFileUtils("hadoop", "default", "hadoop", fake.NullLogger()) + if reflect.DeepEqual(result, expectedResult) { + t.Errorf("check failure, expected %v, get %v", expectedResult, result) + } +} + +func TestJindoFileUtils_exec(t *testing.T) { + ExecWithoutTimeoutCommon := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { + return "Test stdout", "", nil + } + ExecWithoutTimeoutErr := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { + return "", "", errors.New("fail to run the command") + } + + wrappedUnhookExec := func() { + err := gohook.UnHook(JindoFileUtils.execWithoutTimeout) + if err != nil { + t.Fatal(err.Error()) + } + } + + err := gohook.Hook(JindoFileUtils.execWithoutTimeout, ExecWithoutTimeoutErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + a := &JindoFileUtils{log: fake.NullLogger()} + _, _, err = a.exec([]string{"jindo", "fs", "-report"}, false) + if err == nil { + t.Error("check failure, want err, got nil") + } + wrappedUnhookExec() + + err = gohook.Hook(JindoFileUtils.execWithoutTimeout, ExecWithoutTimeoutCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } + _, _, err = a.exec([]string{"jindo", "fs", "-report"}, true) + if err != nil { + t.Errorf("check failure, want nil, got err: %v", err) + } + wrappedUnhookExec() +} + +func TestJindoFileUtils_ReportSummary(t *testing.T) { + ExecCommon := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { + return "Test stdout", "", nil + } + ExecErr := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { + return "", "", errors.New("fail to run the command") + } + wrappedUnhookExec := func() { + err := gohook.UnHook(JindoFileUtils.exec) + if err != nil { + t.Fatal(err.Error()) + } + } + + err := gohook.Hook(JindoFileUtils.exec, ExecErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + a := JindoFileUtils{} + _, err = a.ReportSummary() + if err == nil { + t.Error("check failure, want err, got nil") + } + wrappedUnhookExec() + + err = gohook.Hook(JindoFileUtils.exec, ExecCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } + _, err = a.ReportSummary() + if err != nil { + t.Errorf("check failure, want nil, got err: %v", err) + } + wrappedUnhookExec() +} + +func TestJindoFileUtils_GetUfsTotalSize(t *testing.T) { + ExecWithoutTimeoutCommon := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { + return "2 1 108 testUrl", "", nil + } + ExecWithoutTimeoutErr := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { + return "", "", errors.New("fail to run the command") + } + + wrappedUnhookExec := func() { + err := gohook.UnHook(JindoFileUtils.execWithoutTimeout) + if err != nil { + t.Fatal(err.Error()) + } + } + + err := gohook.Hook(JindoFileUtils.execWithoutTimeout, ExecWithoutTimeoutErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + a := &JindoFileUtils{log: fake.NullLogger()} + _, err = a.GetUfsTotalSize("/tmpDictionary", false) + if err == nil { + t.Error("check failure, want err, got nil") + } + wrappedUnhookExec() + + err = gohook.Hook(JindoFileUtils.execWithoutTimeout, ExecWithoutTimeoutCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } + _, err = a.GetUfsTotalSize("/tmpDictionary", false) + if err != nil { + t.Errorf("check failure, want nil, got err: %v", err) + } + wrappedUnhookExec() +} + +func TestJindoFileUtils_Ready(t *testing.T) { + ExecCommon := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { + return "Test stdout ", "", nil + } + ExecErr := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { + return "", "", errors.New("fail to run the command") + } + wrappedUnhookExec := func() { + err := gohook.UnHook(JindoFileUtils.exec) + if err != nil { + t.Fatal(err.Error()) + } + } + + err := gohook.Hook(JindoFileUtils.exec, ExecErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + a := &JindoFileUtils{log: fake.NullLogger()} + ready := a.Ready() + if ready != false { + t.Errorf("check failure, want false, got %t", ready) + } + wrappedUnhookExec() + + err = gohook.Hook(JindoFileUtils.exec, ExecCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } + ready = a.Ready() + if ready != true { + t.Errorf("check failure, want true, got %t", ready) + } + wrappedUnhookExec() +} + +func TestJindoFileUtils_IsExist(t *testing.T) { + ExecCommon := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { + return "Test stdout", "", nil + } + ExecErr := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { + return "", "", errors.New("fail to run the command") + } + wrappedUnhookExec := func() { + err := gohook.UnHook(JindoFileUtils.exec) + if err != nil { + t.Fatal(err.Error()) + } + } + + err := gohook.Hook(JindoFileUtils.exec, ExecErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + a := JindoFileUtils{} + _, err = a.IsExist("/data") + if err == nil { + t.Error("check failure, want err, got nil") + } + wrappedUnhookExec() + + err = gohook.Hook(JindoFileUtils.exec, ExecCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } + _, err = a.IsExist("/data") + if err != nil { + t.Errorf("check failure, want nil, got err: %v", err) + } + wrappedUnhookExec() +} diff --git a/pkg/ddc/jindofsx/operations/cached.go b/pkg/ddc/jindofsx/operations/cached.go new file mode 100644 index 00000000000..2855ba1c8fa --- /dev/null +++ b/pkg/ddc/jindofsx/operations/cached.go @@ -0,0 +1,32 @@ +package operations + +import ( + "fmt" + "github.com/fluid-cloudnative/fluid/pkg/utils" + "time" +) + +// clean cache with a preset timeout of 60s +func (a JindoFileUtils) CleanCache() (err error) { + var ( + // jindo jfs -formatCache -force + command = []string{"jindo", "fs", "-formatCache", "-force"} + stdout string + stderr string + ) + + stdout, stderr, err = a.exec(command, false) + + if err != nil { + err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr) + if utils.IgnoreNotFound(err) == nil { + fmt.Printf("Failed to clean cache due to %v", err) + return nil + } + return + } else { + time.Sleep(30 * time.Second) + } + + return +} diff --git a/pkg/ddc/jindofsx/operations/cached_test.go b/pkg/ddc/jindofsx/operations/cached_test.go new file mode 100644 index 00000000000..9b9ede0b2e4 --- /dev/null +++ b/pkg/ddc/jindofsx/operations/cached_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2021 The Fluid Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package operations + +import ( + "errors" + "testing" + + "github.com/brahma-adshonor/gohook" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" +) + +func TestJindoFIlUtils_CleanCache(t *testing.T) { + ExecCommon := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { + return "Test stout", "", nil + } + ExecErr := func(a JindoFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) { + return "", "", errors.New("fail to run the command") + } + wrappedUnhookExec := func() { + err := gohook.UnHook(JindoFileUtils.exec) + if err != nil { + t.Fatal(err.Error()) + } + } + + err := gohook.Hook(JindoFileUtils.exec, ExecErr, nil) + if err != nil { + t.Fatal(err.Error()) + } + a := &JindoFileUtils{log: fake.NullLogger()} + err = a.CleanCache() + if err == nil { + t.Error("check failure, want err, got nil") + } + wrappedUnhookExec() + + err = gohook.Hook(JindoFileUtils.exec, ExecCommon, nil) + if err != nil { + t.Fatal(err.Error()) + } + err = a.CleanCache() + if err != nil { + t.Errorf("check failure, want nil, got err: %v", err) + } + wrappedUnhookExec() +} diff --git a/pkg/ddc/jindofsx/port_parser.go b/pkg/ddc/jindofsx/port_parser.go new file mode 100644 index 00000000000..86278aae54e --- /dev/null +++ b/pkg/ddc/jindofsx/port_parser.go @@ -0,0 +1,89 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "fmt" + "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" + v1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "strconv" + "strings" +) + +var propertiesToCheck = []string{ + // "client.storage.rpc.port" not included here cause it is same with "storage.rpc.port" + "storage.rpc.port", + "namespace.rpc.port", +} + +// GetReservedPorts defines restoration logic for JindoRuntime +func GetReservedPorts(client client.Client) (ports []int, err error) { + var datasets v1alpha1.DatasetList + err = client.List(context.TODO(), &datasets) + if err != nil { + return nil, err + } + + for _, dataset := range datasets.Items { + if len(dataset.Status.Runtimes) != 0 { + // Assume there is only one runtime and it is in category "Accelerate" + accelerateRuntime := dataset.Status.Runtimes[0] + if accelerateRuntime.Type != "jindo" { + continue + } + configMapName := fmt.Sprintf("%s-jindofs-config", accelerateRuntime.Name) + configMap, err := kubeclient.GetConfigmapByName(client, configMapName, accelerateRuntime.Namespace) + if err != nil { + return nil, err + } + + if configMap == nil { + continue + } + + reservedPorts, err := parsePortsFromConfigMap(configMap) + if err != nil { + return nil, err + } + ports = append(ports, reservedPorts...) + } + } + return ports, nil +} + +// parsePortsFromConfigMap extracts port usage information given a configMap +func parsePortsFromConfigMap(configMap *v1.ConfigMap) (ports []int, err error) { + if conf, ok := configMap.Data["bigboot.cfg"]; ok { + cfgConfs := strings.Split(conf, "\n") + for _, cfgConf := range cfgConfs { + for _, toCheck := range propertiesToCheck { + if strings.HasPrefix(cfgConf, toCheck) { + portStr := strings.Split(cfgConf, " = ")[1] + portInt, err := strconv.Atoi(portStr) + if err != nil { + return nil, err + } + ports = append(ports, portInt) + } + } + } + } + return ports, nil +} diff --git a/pkg/ddc/jindofsx/port_parser_test.go b/pkg/ddc/jindofsx/port_parser_test.go new file mode 100644 index 00000000000..d5936d2a488 --- /dev/null +++ b/pkg/ddc/jindofsx/port_parser_test.go @@ -0,0 +1,70 @@ +package jindofsx + +import ( + v1 "k8s.io/api/core/v1" + "reflect" + "testing" +) + +var cfg = ` +[bigboot] +logger.dir = /dev/shm/default/oss-tf-dataset/bigboot/log +logger.cleanner.enable = true + +[bigboot-namespace] +jfs.namespaces = spark +jfs.namespaces.spark.oss.uri = oss://tensorflow-datasets.oss-cn-shanghai-internal.aliyuncs.com/ +namespace.backend.type = rocksdb +namespace.blocklet.cache.size = 1000000 +namespace.filelet.cache.size = 100000 +namespace.meta-dir = /dev/shm/default/oss-tf-dataset/bigboot/server +namespace.rpc.port = 18000 +namespace.filelet.atime.enable = false + +[bigboot-storage] +namespace.meta-dir = /dev/shm/default/oss-tf-dataset/bigboot/bignode +storage.data-dirs = /dev/shm/default/oss-tf-dataset/bigboot +storage.data-dirs.capacities = 10g +storage.ram.cache.size = 10g +storage.rpc.port = 18001 +namespace.meta-dir = /dev/shm/default/oss-tf-dataset/bigboot/bignode +storage.compaction.enable = false + +[bigboot-client] +client.oss.upload.queue.size = 5 +client.oss.upload.threads = 4 +client.storage.rpc.port = 18001 +` + +func Test_parsePortsFromConfigMap(t *testing.T) { + type args struct { + configMap *v1.ConfigMap + } + tests := []struct { + name string + args args + wantPorts []int + wantErr bool + }{ + { + name: "parse configMap", + args: args{configMap: &v1.ConfigMap{Data: map[string]string{ + "bigboot.cfg": cfg, + }}}, + wantPorts: []int{18000, 18001}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotPorts, err := parsePortsFromConfigMap(tt.args.configMap) + if (err != nil) != tt.wantErr { + t.Errorf("parsePortsFromConfigMap() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotPorts, tt.wantPorts) { + t.Errorf("parsePortsFromConfigMap() gotPorts = %v, want %v", gotPorts, tt.wantPorts) + } + }) + } +} diff --git a/pkg/ddc/jindofsx/replicas.go b/pkg/ddc/jindofsx/replicas.go new file mode 100644 index 00000000000..b16a1ffed77 --- /dev/null +++ b/pkg/ddc/jindofsx/replicas.go @@ -0,0 +1,57 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "github.com/fluid-cloudnative/fluid/pkg/ctrl" + fluiderrs "github.com/fluid-cloudnative/fluid/pkg/errors" + cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" +) + +func (e JindoFSxEngine) SyncReplicas(ctx cruntime.ReconcileRequestContext) (err error) { + + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + workers, err := ctrl.GetWorkersAsStatefulset(e.Client, + types.NamespacedName{Namespace: e.namespace, Name: e.getWorkerName()}) + if err != nil { + if fluiderrs.IsDeprecated(err) { + e.Log.Info("Warning: the current runtime is created by runtime controller before v0.7.0, scale out/in are not supported. To support these features, please create a new dataset", "details", err) + return nil + } + return err + } + + runtime, err := e.getRuntime() + if err != nil { + return err + } + runtimeToUpdate := runtime.DeepCopy() + // err = e.Helper.SetupWorkers(runtimeToUpdate, runtimeToUpdate.Status, workers) + err = e.Helper.SyncReplicas(ctx, runtimeToUpdate, runtimeToUpdate.Status, workers) + if err != nil { + e.Log.Error(err, "Failed to sync the replicas") + } + return nil + }) + if err != nil { + e.Log.Error(err, "Failed to sync the replicas") + } + + return +} diff --git a/pkg/ddc/jindofsx/replicas_test.go b/pkg/ddc/jindofsx/replicas_test.go new file mode 100644 index 00000000000..ef8cad5874e --- /dev/null +++ b/pkg/ddc/jindofsx/replicas_test.go @@ -0,0 +1,325 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/ctrl" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base" + cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime" + "github.com/fluid-cloudnative/fluid/pkg/utils" + appsv1 "k8s.io/api/apps/v1" + + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + utilpointer "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func newJindoFSxEngineREP(client client.Client, name string, namespace string) *JindoFSxEngine { + + runTimeInfo, _ := base.BuildRuntimeInfo(name, namespace, "jindo", datav1alpha1.TieredStore{}) + engine := &JindoFSxEngine{ + runtime: &datav1alpha1.JindoRuntime{}, + name: name, + namespace: namespace, + Client: client, + runtimeInfo: runTimeInfo, + Log: fake.NullLogger(), + } + engine.Helper = ctrl.BuildHelper(runTimeInfo, client, engine.Log) + return engine +} + +func TestSyncReplicas(t *testing.T) { + nodeInputs := []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node-spark", + Labels: map[string]string{ + "fluid.io/dataset-num": "1", + "fluid.io/s-Jindo-fluid-spark": "true", + "fluid.io/s-fluid-spark": "true", + "fluid.io/s-h-Jindo-d-fluid-spark": "5B", + "fluid.io/s-h-Jindo-m-fluid-spark": "1B", + "fluid.io/s-h-Jindo-t-fluid-spark": "6B", + "fluid_exclusive": "fluid_spark", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node-share", + Labels: map[string]string{ + "fluid.io/dataset-num": "2", + "fluid.io/s-Jindo-fluid-hadoop": "true", + "fluid.io/s-fluid-hadoop": "true", + "fluid.io/s-h-Jindo-d-fluid-hadoop": "5B", + "fluid.io/s-h-Jindo-m-fluid-hadoop": "1B", + "fluid.io/s-h-Jindo-t-fluid-hadoop": "6B", + "fluid.io/s-Jindo-fluid-hbase": "true", + "fluid.io/s-fluid-hbase": "true", + "fluid.io/s-h-Jindo-d-fluid-hbase": "5B", + "fluid.io/s-h-Jindo-m-fluid-hbase": "1B", + "fluid.io/s-h-Jindo-t-fluid-hbase": "6B", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node-hadoop", + Labels: map[string]string{ + "fluid.io/dataset-num": "1", + "fluid.io/s-Jindo-fluid-hadoop": "true", + "fluid.io/s-fluid-hadoop": "true", + "fluid.io/s-h-Jindo-d-fluid-hadoop": "5B", + "fluid.io/s-h-Jindo-m-fluid-hadoop": "1B", + "fluid.io/s-h-Jindo-t-fluid-hadoop": "6B", + "node-select": "true", + }, + }, + }, + } + runtimeInputs := []*datav1alpha1.JindoRuntime{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 3, // 2 + }, + Status: datav1alpha1.RuntimeStatus{ + CurrentWorkerNumberScheduled: 2, + CurrentMasterNumberScheduled: 2, // 0 + CurrentFuseNumberScheduled: 2, + DesiredMasterNumberScheduled: 3, + DesiredWorkerNumberScheduled: 2, + DesiredFuseNumberScheduled: 3, + Conditions: []datav1alpha1.RuntimeCondition{ + utils.NewRuntimeCondition(datav1alpha1.RuntimeWorkersInitialized, datav1alpha1.RuntimeWorkersInitializedReason, "The workers are initialized.", v1.ConditionTrue), + utils.NewRuntimeCondition(datav1alpha1.RuntimeFusesInitialized, datav1alpha1.RuntimeFusesInitializedReason, "The fuses are initialized.", v1.ConditionTrue), + }, + WorkerPhase: "NotReady", + FusePhase: "NotReady", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 2, + }, + Status: datav1alpha1.RuntimeStatus{ + CurrentWorkerNumberScheduled: 3, + CurrentMasterNumberScheduled: 3, + CurrentFuseNumberScheduled: 3, + DesiredMasterNumberScheduled: 2, + DesiredWorkerNumberScheduled: 3, + DesiredFuseNumberScheduled: 2, + Conditions: []datav1alpha1.RuntimeCondition{ + utils.NewRuntimeCondition(datav1alpha1.RuntimeWorkersInitialized, datav1alpha1.RuntimeWorkersInitializedReason, "The workers are initialized.", v1.ConditionTrue), + utils.NewRuntimeCondition(datav1alpha1.RuntimeFusesInitialized, datav1alpha1.RuntimeFusesInitializedReason, "The fuses are initialized.", v1.ConditionTrue), + }, + WorkerPhase: "NotReady", + FusePhase: "NotReady", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "obj", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 2, + }, + Status: datav1alpha1.RuntimeStatus{ + CurrentWorkerNumberScheduled: 2, + CurrentMasterNumberScheduled: 2, + CurrentFuseNumberScheduled: 2, + DesiredMasterNumberScheduled: 2, + DesiredWorkerNumberScheduled: 2, + DesiredFuseNumberScheduled: 2, + WorkerPhase: "NotReady", + FusePhase: "NotReady", + }, + }, + } + workersInputs := []*appsv1.StatefulSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-jindofs-worker", + Namespace: "fluid", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: utilpointer.Int32Ptr(2), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop-jindofs-worker", + Namespace: "fluid", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: utilpointer.Int32Ptr(2), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "obj-jindofs-worker", + Namespace: "fluid", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: utilpointer.Int32Ptr(2), + }, + }, + } + dataSetInputs := []*datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop", + Namespace: "fluid", + }, + }, + } + fuseInputs := []*appsv1.DaemonSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-jindofs-fuse", + Namespace: "fluid", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop-jindofs-fuse", + Namespace: "fluid", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "obj-jindofs-fuse", + Namespace: "fluid", + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "deprecated-jindofs-worker", + Namespace: "fluid", + }, + }, + } + + objs := []runtime.Object{} + for _, nodeInput := range nodeInputs { + objs = append(objs, nodeInput.DeepCopy()) + } + for _, runtimeInput := range runtimeInputs { + objs = append(objs, runtimeInput.DeepCopy()) + } + for _, workerInput := range workersInputs { + objs = append(objs, workerInput.DeepCopy()) + } + for _, fuseInput := range fuseInputs { + objs = append(objs, fuseInput.DeepCopy()) + } + for _, dataSetInput := range dataSetInputs { + objs = append(objs, dataSetInput.DeepCopy()) + } + + fakeClient := fake.NewFakeClientWithScheme(testScheme, objs...) + testCases := []struct { + testName string + name string + namespace string + Type datav1alpha1.RuntimeConditionType + isErr bool + condtionLength int + deprecated bool + }{ + { + testName: "scaleout", + name: "hbase", + namespace: "fluid", + Type: datav1alpha1.RuntimeWorkerScaledOut, + isErr: false, + condtionLength: 3, + }, + { + testName: "scalein", + name: "hadoop", + namespace: "fluid", + Type: datav1alpha1.RuntimeWorkerScaledIn, + isErr: false, + condtionLength: 3, + }, + { + testName: "noscale", + name: "obj", + namespace: "fluid", + Type: "", + isErr: false, + condtionLength: 0, + }, { + testName: "deprecated", + name: "deprecated", + namespace: "fluid", + Type: "", + isErr: false, + condtionLength: 0, + deprecated: true, + }, + } + for _, testCase := range testCases { + engine := newJindoFSxEngineREP(fakeClient, testCase.name, testCase.namespace) + err := engine.SyncReplicas(cruntime.ReconcileRequestContext{ + Log: fake.NullLogger(), + Recorder: record.NewFakeRecorder(300), + }) + if err != nil { + t.Errorf("sync replicas failed,err:%s", err.Error()) + } + rt, _ := engine.getRuntime() + found := false + if testCase.deprecated { + break + } + + for _, cond := range rt.Status.Conditions { + + if cond.Type == testCase.Type { + found = true + break + } + } + + if !found && testCase.condtionLength > 0 { + t.Errorf("testCase: %s runtime condition want conditionType %v, got conditions %v", testCase.testName, testCase.Type, rt.Status.Conditions) + } + } +} diff --git a/pkg/ddc/jindofsx/runtime_info.go b/pkg/ddc/jindofsx/runtime_info.go new file mode 100644 index 00000000000..d1e335c3a31 --- /dev/null +++ b/pkg/ddc/jindofsx/runtime_info.go @@ -0,0 +1,74 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "github.com/fluid-cloudnative/fluid/pkg/ddc/base" + "github.com/fluid-cloudnative/fluid/pkg/utils" + "github.com/fluid-cloudnative/fluid/pkg/utils/dataset/volume" +) + +// getRuntimeInfo gets runtime info +func (e *JindoFSxEngine) getRuntimeInfo() (base.RuntimeInfoInterface, error) { + if e.runtimeInfo == nil { + runtime, err := e.getRuntime() + if err != nil { + return e.runtimeInfo, err + } + e.runtimeInfo, err = base.BuildRuntimeInfo(e.name, e.namespace, e.runtimeType, runtime.Spec.TieredStore) + if err != nil { + return e.runtimeInfo, err + } + + // Setup Fuse Deploy Mode + e.runtimeInfo.SetupFuseDeployMode(runtime.Spec.Fuse.Global, runtime.Spec.Fuse.NodeSelector) + + // Check if the runtime is using deprecated labels + isLabelDeprecated, err := e.HasDeprecatedCommonLabelname() + if err != nil { + return e.runtimeInfo, err + } + e.runtimeInfo.SetDeprecatedNodeLabel(isLabelDeprecated) + + // Check if the runtime is using deprecated naming style for PersistentVolumes + isPVNameDeprecated, err := volume.HasDeprecatedPersistentVolumeName(e.Client, e.runtimeInfo, e.Log) + if err != nil { + return e.runtimeInfo, err + } + e.runtimeInfo.SetDeprecatedPVName(isPVNameDeprecated) + + e.Log.Info("Deprecation check finished", "isLabelDeprecated", e.runtimeInfo.IsDeprecatedNodeLabel(), "isPVNameDeprecated", e.runtimeInfo.IsDeprecatedPVName()) + + // Setup with Dataset Info + dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) + if err != nil { + if utils.IgnoreNotFound(err) == nil { + e.Log.Info("Dataset is notfound", "name", e.name, "namespace", e.namespace) + return e.runtimeInfo, nil + } + + e.Log.Info("Failed to get dataset when getruntimeInfo") + return e.runtimeInfo, err + } + + e.runtimeInfo.SetupWithDataset(dataset) + + e.Log.Info("Setup with dataset done", "exclusive", e.runtimeInfo.IsExclusive()) + } + + return e.runtimeInfo, nil +} diff --git a/pkg/ddc/jindofsx/runtime_info_test.go b/pkg/ddc/jindofsx/runtime_info_test.go new file mode 100644 index 00000000000..800cb95ca2d --- /dev/null +++ b/pkg/ddc/jindofsx/runtime_info_test.go @@ -0,0 +1,166 @@ +/* +Copyright 2021 The Fluid Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func newJindoFSxEngineRT(client client.Client, name string, namespace string, withRuntimeInfo bool) *JindoFSxEngine { + runTimeInfo, _ := base.BuildRuntimeInfo(name, namespace, "GooseFS", datav1alpha1.TieredStore{}) + engine := &JindoFSxEngine{ + runtime: &datav1alpha1.JindoRuntime{}, + name: name, + namespace: namespace, + Client: client, + runtimeInfo: nil, + Log: fake.NullLogger(), + } + + if withRuntimeInfo { + engine.runtimeInfo = runTimeInfo + } + return engine +} + +func TestGetRuntimeInfo(t *testing.T) { + runtimeInputs := []*datav1alpha1.JindoRuntime{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{ + Global: true, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{ + Global: false, + }, + }, + }, + } + daemonSetInputs := []*v1.DaemonSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-worker", + Namespace: "fluid", + }, + Spec: v1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{NodeSelector: map[string]string{"data.fluid.io/storage-fluid-hbase": "selector"}}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop-worker", + Namespace: "fluid", + }, + Spec: v1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{NodeSelector: map[string]string{"data.fluid.io/storage-fluid-hadoop": "selector"}}, + }, + }, + }, + } + dataSetInputs := []*datav1alpha1.Dataset{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop", + Namespace: "fluid", + }, + }, + } + objs := []runtime.Object{} + for _, runtimeInput := range runtimeInputs { + objs = append(objs, runtimeInput.DeepCopy()) + } + for _, daemonSetInput := range daemonSetInputs { + objs = append(objs, daemonSetInput.DeepCopy()) + } + for _, dataSetInput := range dataSetInputs { + objs = append(objs, dataSetInput.DeepCopy()) + } + //scheme := runtime.NewScheme() + //scheme.AddKnownTypes(v1.SchemeGroupVersion, daemonSetWithSelector) + //scheme.AddKnownTypes(v1alpha1.GroupVersion,runtimeInput) + fakeClient := fake.NewFakeClientWithScheme(testScheme, objs...) + + testCases := []struct { + name string + namespace string + withRuntimeInfo bool + isErr bool + isNil bool + }{ + { + name: "hbase", + namespace: "fluid", + withRuntimeInfo: false, + isErr: false, + isNil: false, + }, + { + name: "hbase", + namespace: "fluid", + withRuntimeInfo: false, + isErr: false, + isNil: false, + }, + { + name: "hbase", + namespace: "fluid", + withRuntimeInfo: true, + isErr: false, + isNil: false, + }, + { + name: "hadoop", + namespace: "fluid", + withRuntimeInfo: false, + isErr: false, + isNil: false, + }, + } + for _, testCase := range testCases { + engine := newJindoFSxEngineRT(fakeClient, testCase.name, testCase.namespace, testCase.withRuntimeInfo) + runtimeInfo, err := engine.getRuntimeInfo() + isNil := runtimeInfo == nil + isErr := err != nil + if isNil != testCase.isNil { + t.Errorf(" want %t, got %t", testCase.isNil, isNil) + } + if isErr != testCase.isErr { + t.Errorf(" want %t, got %t", testCase.isErr, isErr) + } + } +} diff --git a/pkg/ddc/jindofsx/shutdown.go b/pkg/ddc/jindofsx/shutdown.go new file mode 100644 index 00000000000..80958d7eb3f --- /dev/null +++ b/pkg/ddc/jindofsx/shutdown.go @@ -0,0 +1,289 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "fmt" + + "github.com/fluid-cloudnative/fluid/pkg/common" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base/portallocator" + "github.com/fluid-cloudnative/fluid/pkg/utils/dataset/lifecycle" + "github.com/pkg/errors" + "k8s.io/client-go/util/retry" + + "github.com/fluid-cloudnative/fluid/pkg/utils" + "github.com/fluid-cloudnative/fluid/pkg/utils/helm" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Shutdown shuts down the Jindo engine +func (e *JindoFSxEngine) Shutdown() (err error) { + + err = e.invokeCleanCache() + if err != nil { + return + } + + _, err = e.destroyWorkers(-1) + if err != nil { + return + } + + err = e.releasePorts() + if err != nil { + return + } + + err = e.destroyMaster() + if err != nil { + return + } + + err = e.cleanAll() + return err +} + +// destroyMaster destroys the master +func (e *JindoFSxEngine) destroyMaster() (err error) { + var found bool + found, err = helm.CheckRelease(e.name, e.namespace) + if err != nil { + return err + } + + if found { + err = helm.DeleteRelease(e.name, e.namespace) + if err != nil { + return + } + } + return +} + +func (e *JindoFSxEngine) releasePorts() (err error) { + var valueConfigMapname = e.name + "-jindofs-config" + + allocator, err := portallocator.GetRuntimePortAllocator() + if err != nil { + return errors.Wrap(err, "GetRuntimePortAllocator when releasePorts") + } + + cm, err := kubeclient.GetConfigmapByName(e.Client, valueConfigMapname, e.namespace) + if err != nil { + return errors.Wrap(err, "GetConfigmapByName when releasePorts") + } + + // The value configMap is not found + if cm == nil { + e.Log.Info("value configMap not found, there might be some unreleased ports", "valueConfigMapName", valueConfigMapname) + return nil + } + + portsToRelease, err := parsePortsFromConfigMap(cm) + if err != nil { + return errors.Wrap(err, "parsePortsFromConfigMap when releasePorts") + } + + allocator.ReleaseReservedPorts(portsToRelease) + return nil +} + +// cleanAll cleans up the all +func (e *JindoFSxEngine) cleanAll() (err error) { + count, err := e.Helper.CleanUpFuse() + if err != nil { + e.Log.Error(err, "Err in cleaning fuse") + return err + } + e.Log.Info("clean up fuse count", "n", count) + + err = e.cleanConfigMap() + if err != nil { + e.Log.Error(err, "Err in cleaning configMap") + return err + } + return +} + +// cleanConfigmap cleans up the configmaps, such as: +// {dataset name}-jindo-values, {dataset name}-jindofs-client-config, {dataset name}-jindofs-config +func (e *JindoFSxEngine) cleanConfigMap() (err error) { + var ( + valueConfigmapName = e.name + "-" + e.runtimeType + "-values" + configmapName = e.name + "-" + runtimeFSType + "-config" + clientConfigmapName = e.name + "-" + runtimeFSType + "-client-config" + namespace = e.namespace + ) + + cms := []string{valueConfigmapName, configmapName, clientConfigmapName} + + for _, cm := range cms { + err = kubeclient.DeleteConfigMap(e.Client, cm, namespace) + if err != nil { + return + } + } + + return nil +} + +// destroyWorkers will delete the workers by number of the workers, if workers is -1, it means all the workers are deleted +func (e *JindoFSxEngine) destroyWorkers(expectedWorkers int32) (currentWorkers int32, err error) { + // SchedulerMutex only for patch mode + lifecycle.SchedulerMutex.Lock() + defer lifecycle.SchedulerMutex.Unlock() + + runtimeInfo, err := e.getRuntimeInfo() + if err != nil { + return currentWorkers, err + } + + var ( + nodeList = &corev1.NodeList{} + labelExclusiveName = utils.GetExclusiveKey() + labelName = runtimeInfo.GetRuntimeLabelName() + labelCommonName = runtimeInfo.GetCommonLabelName() + labelMemoryName = runtimeInfo.GetLabelNameForMemory() + labelDiskName = runtimeInfo.GetLabelNameForDisk() + labelTotalName = runtimeInfo.GetLabelNameForTotal() + ) + + labelNames := []string{labelName, labelTotalName, labelDiskName, labelMemoryName, labelCommonName} + e.Log.Info("check node labels", "labelNames", labelNames) + datasetLabels, err := labels.Parse(fmt.Sprintf("%s=true", labelCommonName)) + if err != nil { + return + } + + err = e.List(context.TODO(), nodeList, &client.ListOptions{ + LabelSelector: datasetLabels, + }) + if err != nil { + return currentWorkers, err + } + + currentWorkers = int32(len(nodeList.Items)) + if expectedWorkers >= currentWorkers { + e.Log.Info("No need to scale in. Skip.") + return currentWorkers, nil + } + + var nodes []corev1.Node + if expectedWorkers >= 0 { + e.Log.Info("Scale in Jindo workers", "expectedWorkers", expectedWorkers) + // This is a scale in operation + runtimeInfo, err := e.getRuntimeInfo() + if err != nil { + e.Log.Error(err, "getRuntimeInfo when scaling in") + return currentWorkers, err + } + + fuseGlobal, _ := runtimeInfo.GetFuseDeployMode() + nodes, err = e.sortNodesToShutdown(nodeList.Items, fuseGlobal) + if err != nil { + return currentWorkers, err + } + + } else { + // Destroy all workers. This is a subprocess during deletion of JindoRuntime + nodes = nodeList.Items + } + + // 1.select the nodes + for _, node := range nodes { + if expectedWorkers == currentWorkers { + break + } + + if len(node.Labels) == 0 { + continue + } + + nodeName := node.Name + var labelsToModify common.LabelsToModify + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + node, err := kubeclient.GetNode(e.Client, nodeName) + if err != nil { + e.Log.Error(err, "Fail to get node", "nodename", nodeName) + return err + } + + toUpdate := node.DeepCopy() + for _, label := range labelNames { + labelsToModify.Delete(label) + } + + exclusiveLabelValue := utils.GetExclusiveValue(e.namespace, e.name) + if val, exist := toUpdate.Labels[labelExclusiveName]; exist && val == exclusiveLabelValue { + labelsToModify.Delete(labelExclusiveName) + + } + + err = lifecycle.DecreaseDatasetNum(toUpdate, runtimeInfo, &labelsToModify) + if err != nil { + return err + } + + // Update the toUpdate in UPDATE mode + // modifiedLabels, err := utils.ChangeNodeLabelWithUpdateMode(e.Client, toUpdate, labelToModify) + // Update the toUpdate in PATCH mode + modifiedLabels, err := utils.ChangeNodeLabelWithPatchMode(e.Client, toUpdate, labelsToModify) + if err != nil { + return err + } + e.Log.Info("Destroy worker", "Dataset", e.name, "deleted worker node", node.Name, "removed or updated labels", modifiedLabels) + return nil + + }) + + if err != nil { + return currentWorkers, err + } + currentWorkers-- + } + + return currentWorkers, nil +} + +func (e *JindoFSxEngine) sortNodesToShutdown(candidateNodes []corev1.Node, fuseGlobal bool) (nodes []corev1.Node, err error) { + if !fuseGlobal { + // If fuses are deployed in non-global mode, workers and fuses will be scaled in together. + // It can be dangerous if we scale in nodes where there are pods using the related pvc. + // So firstly we filter out such nodes + pvcMountNodes, err := kubeclient.GetPvcMountNodes(e.Client, e.name, e.namespace) + if err != nil { + e.Log.Error(err, "GetPvcMountNodes when scaling in") + return nil, err + } + + for _, node := range candidateNodes { + if _, found := pvcMountNodes[node.Name]; !found { + nodes = append(nodes, node) + } + } + } else { + // If fuses are deployed in global mode. Scaling in workers has nothing to do with fuses. + // All nodes with related label can be candidate nodes. + nodes = candidateNodes + } + // TODO support jindo calculate node usedCapacity + return nodes, nil +} diff --git a/pkg/ddc/jindofsx/shutdown_test.go b/pkg/ddc/jindofsx/shutdown_test.go new file mode 100644 index 00000000000..a6b691e511d --- /dev/null +++ b/pkg/ddc/jindofsx/shutdown_test.go @@ -0,0 +1,339 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "reflect" + "testing" + + . "github.com/agiledragon/gomonkey" + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/ctrl" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + testScheme *runtime.Scheme +) + +func init() { + testScheme = runtime.NewScheme() + _ = v1.AddToScheme(testScheme) + _ = datav1alpha1.AddToScheme(testScheme) + _ = appsv1.AddToScheme(testScheme) +} + +func TestDestroyWorker(t *testing.T) { + runtimeInfoSpark, err := base.BuildRuntimeInfo("spark", "fluid", "jindo", datav1alpha1.TieredStore{}) + if err != nil { + t.Errorf("fail to create the runtimeInfo with error %v", err) + } + runtimeInfoSpark.SetupWithDataset(&datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{PlacementMode: datav1alpha1.ExclusiveMode}, + }) + + runtimeInfoHadoop, err := base.BuildRuntimeInfo("hadoop", "fluid", "jindo", datav1alpha1.TieredStore{}) + if err != nil { + t.Errorf("fail to create the runtimeInfo with error %v", err) + } + runtimeInfoHadoop.SetupWithDataset(&datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{PlacementMode: datav1alpha1.ExclusiveMode}, + }) + nodeSelector := map[string]string{ + "node-select": "true", + } + runtimeInfoHadoop.SetupFuseDeployMode(true, nodeSelector) + + var nodeInputs = []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ // 里面只有fluid的spark + Name: "test-node-spark", + Labels: map[string]string{ + "fluid.io/dataset-num": "1", + "fluid.io/s-jindo-fluid-spark": "true", + "fluid.io/s-fluid-spark": "true", + "fluid.io/s-h-jindo-d-fluid-spark": "5B", + "fluid.io/s-h-jindo-m-fluid-spark": "1B", + "fluid.io/s-h-jindo-t-fluid-spark": "6B", + "fluid_exclusive": "fluid_spark", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node-share", + Labels: map[string]string{ + "fluid.io/dataset-num": "2", + "fluid.io/s-jindo-fluid-hadoop": "true", + "fluid.io/s-fluid-hadoop": "true", + "fluid.io/s-h-jindo-d-fluid-hadoop": "5B", + "fluid.io/s-h-jindo-m-fluid-hadoop": "1B", + "fluid.io/s-h-jindo-t-fluid-hadoop": "6B", + "fluid.io/s-jindo-fluid-hbase": "true", + "fluid.io/s-fluid-hbase": "true", + "fluid.io/s-h-jindo-d-fluid-hbase": "5B", + "fluid.io/s-h-jindo-m-fluid-hbase": "1B", + "fluid.io/s-h-jindo-t-fluid-hbase": "6B", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node-hadoop", + Labels: map[string]string{ + "fluid.io/dataset-num": "1", + "fluid.io/s-jindo-fluid-hadoop": "true", + "fluid.io/s-fluid-hadoop": "true", + "fluid.io/s-h-jindo-d-fluid-hadoop": "5B", + "fluid.io/s-h-jindo-m-fluid-hadoop": "1B", + "fluid.io/s-h-jindo-t-fluid-hadoop": "6B", + "node-select": "true", + }, + }, + }, + } + + testNodes := []runtime.Object{} + for _, nodeInput := range nodeInputs { + testNodes = append(testNodes, nodeInput.DeepCopy()) + } + + client := fake.NewFakeClientWithScheme(testScheme, testNodes...) + + var testCase = []struct { + expectedWorkers int32 + runtimeInfo base.RuntimeInfoInterface + wantedNodeNumber int32 + wantedNodeLabels map[string]map[string]string + }{ + { + expectedWorkers: -1, + runtimeInfo: runtimeInfoSpark, + wantedNodeNumber: 0, + wantedNodeLabels: map[string]map[string]string{ + "test-node-spark": {}, + "test-node-share": { + "fluid.io/dataset-num": "2", + "fluid.io/s-jindo-fluid-hadoop": "true", + "fluid.io/s-fluid-hadoop": "true", + "fluid.io/s-h-jindo-d-fluid-hadoop": "5B", + "fluid.io/s-h-jindo-m-fluid-hadoop": "1B", + "fluid.io/s-h-jindo-t-fluid-hadoop": "6B", + "fluid.io/s-jindo-fluid-hbase": "true", + "fluid.io/s-fluid-hbase": "true", + "fluid.io/s-h-jindo-d-fluid-hbase": "5B", + "fluid.io/s-h-jindo-m-fluid-hbase": "1B", + "fluid.io/s-h-jindo-t-fluid-hbase": "6B", + }, + "test-node-hadoop": { + "fluid.io/dataset-num": "1", + "fluid.io/s-jindo-fluid-hadoop": "true", + "fluid.io/s-fluid-hadoop": "true", + "fluid.io/s-h-jindo-d-fluid-hadoop": "5B", + "fluid.io/s-h-jindo-m-fluid-hadoop": "1B", + "fluid.io/s-h-jindo-t-fluid-hadoop": "6B", + "node-select": "true", + }, + }, + }, + { + expectedWorkers: -1, + runtimeInfo: runtimeInfoHadoop, + wantedNodeNumber: 0, + wantedNodeLabels: map[string]map[string]string{ + "test-node-spark": {}, + "test-node-share": { + "fluid.io/dataset-num": "1", + "fluid.io/s-jindo-fluid-hbase": "true", + "fluid.io/s-fluid-hbase": "true", + "fluid.io/s-h-jindo-d-fluid-hbase": "5B", + "fluid.io/s-h-jindo-m-fluid-hbase": "1B", + "fluid.io/s-h-jindo-t-fluid-hbase": "6B", + }, + "test-node-hadoop": { + "node-select": "true", + }, + }, + }, + } + for _, test := range testCase { + engine := &JindoFSxEngine{Log: fake.NullLogger(), runtimeInfo: test.runtimeInfo} + engine.Client = client + engine.name = test.runtimeInfo.GetName() + engine.namespace = test.runtimeInfo.GetNamespace() + if err != nil { + t.Errorf("fail to exec the function with the error %v", err) + } + currentWorkers, err := engine.destroyWorkers(test.expectedWorkers) + if err != nil { + t.Errorf("fail to exec the function with the error %v", err) + } + if currentWorkers != test.wantedNodeNumber { + t.Errorf("shutdown the worker with the wrong number of the workers") + } + for _, node := range nodeInputs { + newNode, err := kubeclient.GetNode(client, node.Name) + if err != nil { + t.Errorf("fail to get the node with the error %v", err) + } + + if len(newNode.Labels) != len(test.wantedNodeLabels[node.Name]) { + t.Errorf("fail to decrease the labels") + } + if len(newNode.Labels) != 0 && !reflect.DeepEqual(newNode.Labels, test.wantedNodeLabels[node.Name]) { + t.Errorf("fail to decrease the labels") + } + } + + } +} + +func TestCleanConfigmap(t *testing.T) { + + namespace := "default" + runtimeType := "jindo" + + configMapInputs := []*v1.ConfigMap{ + { + ObjectMeta: metav1.ObjectMeta{Name: "hbase-alluxio-values", Namespace: namespace}, + Data: map[string]string{ + "data": "image: fluid\nimageTag: 0.6.0", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "hbase-alluxio-config", Namespace: namespace}, + Data: map[string]string{}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "spark-alluxio-values", Namespace: namespace}, + Data: map[string]string{ + "test-data": "image: fluid\n imageTag: 0.6.0", + }, + }, { + ObjectMeta: metav1.ObjectMeta{Name: "hadoop-alluxio-config", Namespace: namespace}, + }, + } + + testConfigMaps := []runtime.Object{} + for _, cm := range configMapInputs { + testConfigMaps = append(testConfigMaps, cm.DeepCopy()) + } + + client := fake.NewFakeClientWithScheme(testScheme, testConfigMaps...) + type args struct { + name string + namespace string + } + tests := []struct { + name string + args args + }{ + { + name: "ConfigMap doesn't exist", + args: args{ + name: "notExist", + namespace: namespace, + }, + }, + { + name: "ConfigMap value exists", + args: args{ + name: "test1", + namespace: namespace, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + engine := &JindoFSxEngine{ + Log: fake.NullLogger(), + name: tt.args.name, + namespace: tt.args.namespace, + runtimeType: runtimeType, + Client: client} + err := engine.cleanConfigMap() + if err != nil { + t.Errorf("fail to clean configmap due to %v", err) + } + }) + } +} + +func TestCleanAll(t *testing.T) { + var nodeInputs = []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "no-fuse", + Labels: map[string]string{}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fuse", + Labels: map[string]string{ + "fluid.io/f-jindo-fluid-hadoop": "true", + "node-select": "true", + "fluid.io/f-jindo-fluid-hbase": "true", + "fluid.io/s-fluid-hbase": "true", + "fluid.io/s-h-jindo-d-fluid-hbase": "5B", + "fluid.io/s-h-jindo-m-fluid-hbase": "1B", + "fluid.io/s-h-jindo-t-fluid-hbase": "6B", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "multiple-fuse", + Labels: map[string]string{ + "fluid.io/dataset-num": "1", + "fluid.io/f-jindo-fluid-hadoop": "true", + "fluid.io/f-jindo-fluid-hadoop-1": "true", + "node-select": "true", + }, + }, + }, + } + + testNodes := []runtime.Object{} + for _, nodeInput := range nodeInputs { + testNodes = append(testNodes, nodeInput.DeepCopy()) + } + + client := fake.NewFakeClientWithScheme(testScheme, testNodes...) + + helper := &ctrl.Helper{} + patch1 := ApplyMethod(reflect.TypeOf(helper), "CleanUpFuse", func(_ *ctrl.Helper) (int, error) { + return 0, nil + }) + defer patch1.Reset() + + engine := &JindoFSxEngine{Log: fake.NullLogger()} + engine.Client = client + engine.name = "fluid-hadoop" + engine.namespace = "default" + err := engine.cleanAll() + if err != nil { + t.Errorf("failed to cleanAll due to %v", err) + } + +} diff --git a/pkg/ddc/jindofsx/status.go b/pkg/ddc/jindofsx/status.go new file mode 100644 index 00000000000..cc2eea19f57 --- /dev/null +++ b/pkg/ddc/jindofsx/status.go @@ -0,0 +1,136 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "reflect" + "time" + + data "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/common" + "github.com/fluid-cloudnative/fluid/pkg/ctrl" + fluiderrs "github.com/fluid-cloudnative/fluid/pkg/errors" + "github.com/fluid-cloudnative/fluid/pkg/utils" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" +) + +// CheckAndUpdateRuntimeStatus checks the related runtime status and updates it. +func (e *JindoFSxEngine) CheckAndUpdateRuntimeStatus() (ready bool, err error) { + defer utils.TimeTrack(time.Now(), "JindoFSxEngine.CheckAndUpdateRuntimeStatus", "name", e.name, "namespace", e.namespace) + var ( + masterReady, workerReady bool + masterName string = e.getMasterName() + workerName string = e.getWorkerName() + namespace string = e.namespace + ) + + // 1. Master should be ready + master, err := kubeclient.GetStatefulSet(e.Client, masterName, namespace) + if err != nil { + return ready, err + } + + // 2. Worker should be ready + workers, err := ctrl.GetWorkersAsStatefulset(e.Client, + types.NamespacedName{Namespace: e.namespace, Name: workerName}) + if err != nil { + if fluiderrs.IsDeprecated(err) { + e.Log.Info("Warning: Deprecated mode is not support, so skip handling", "details", err) + return ready, nil + } + return ready, err + } + + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + runtime, err := e.getRuntime() + if err != nil { + return err + } + + runtimeToUpdate := runtime.DeepCopy() + // if reflect.DeepEqual(runtime.Status, runtimeToUpdate.Status) { + // e.Log.V(1).Info("The runtime is equal after deepcopy") + // } + + states, err := e.queryCacheStatus() + if err != nil { + return err + } + + // 0. Update the cache status + // runtimeToUpdate.Status.CacheStates[data.Cacheable] = states.cacheable + if len(runtime.Status.CacheStates) == 0 { + runtimeToUpdate.Status.CacheStates = map[common.CacheStateName]string{} + } + + runtimeToUpdate.Status.CacheStates[common.CacheCapacity] = states.cacheCapacity + runtimeToUpdate.Status.CacheStates[common.CachedPercentage] = states.cachedPercentage + runtimeToUpdate.Status.CacheStates[common.Cached] = states.cached + + runtimeToUpdate.Status.CurrentMasterNumberScheduled = int32(master.Status.Replicas) + runtimeToUpdate.Status.MasterNumberReady = int32(master.Status.ReadyReplicas) + + if *master.Spec.Replicas == master.Status.ReadyReplicas { + runtimeToUpdate.Status.MasterPhase = data.RuntimePhaseReady + masterReady = true + } else { + runtimeToUpdate.Status.MasterPhase = data.RuntimePhaseNotReady + } + + runtimeToUpdate.Status.WorkerNumberReady = int32(workers.Status.ReadyReplicas) + runtimeToUpdate.Status.WorkerNumberUnavailable = int32(*workers.Spec.Replicas - workers.Status.ReadyReplicas) + runtimeToUpdate.Status.WorkerNumberAvailable = int32(workers.Status.CurrentReplicas) + if workers.Status.ReadyReplicas > 0 { + if runtime.Replicas() == workers.Status.ReadyReplicas { + runtimeToUpdate.Status.WorkerPhase = data.RuntimePhaseReady + workerReady = true + } else if workers.Status.ReadyReplicas >= 1 { + runtimeToUpdate.Status.WorkerPhase = data.RuntimePhasePartialReady + workerReady = true + } + } else { + runtimeToUpdate.Status.WorkerPhase = data.RuntimePhaseNotReady + } + + if masterReady && workerReady { + ready = true + } + + if !reflect.DeepEqual(runtime.Status, runtimeToUpdate.Status) { + err = e.Client.Status().Update(context.TODO(), runtimeToUpdate) + } else { + e.Log.Info("Do nothing because the runtime status is not changed.") + } + + return err + }) + + if err != nil { + _ = utils.LoggingErrorExceptConflict(e.Log, + err, + "Failed to update the runtime", + types.NamespacedName{ + Namespace: e.namespace, + Name: e.name, + }) + } + + return +} diff --git a/pkg/ddc/jindofsx/status_test.go b/pkg/ddc/jindofsx/status_test.go new file mode 100644 index 00000000000..52dd73ea85e --- /dev/null +++ b/pkg/ddc/jindofsx/status_test.go @@ -0,0 +1,209 @@ +/* +Copyright 2021 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/utils" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestCheckAndUpdateRuntimeStatus(t *testing.T) { + + masterInputs := []*appsv1.StatefulSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop-master", + Namespace: "fluid", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 0, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-master", + Namespace: "fluid", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "deprecated-jindofs-master", + Namespace: "fluid", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + }, + }, + } + + var deprecatedWorkerInputs = []appsv1.DaemonSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "deprecated-jindofs-worker", + Namespace: "fluid", + }, + }, + } + + var workerInputs = []appsv1.StatefulSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-worker", + Namespace: "fluid", + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 3, + ReadyReplicas: 3, + }, + }, + } + + runtimeInputs := []*datav1alpha1.JindoRuntime{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 3, // 2 + }, + Status: datav1alpha1.RuntimeStatus{ + CurrentWorkerNumberScheduled: 2, + CurrentMasterNumberScheduled: 2, // 0 + CurrentFuseNumberScheduled: 2, + DesiredMasterNumberScheduled: 3, + DesiredWorkerNumberScheduled: 2, + DesiredFuseNumberScheduled: 3, + Conditions: []datav1alpha1.RuntimeCondition{ + utils.NewRuntimeCondition(datav1alpha1.RuntimeWorkersInitialized, datav1alpha1.RuntimeWorkersInitializedReason, "The workers are initialized.", v1.ConditionTrue), + utils.NewRuntimeCondition(datav1alpha1.RuntimeFusesInitialized, datav1alpha1.RuntimeFusesInitializedReason, "The fuses are initialized.", v1.ConditionTrue), + }, + WorkerPhase: "NotReady", + FusePhase: "NotReady", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 2, + }, + Status: datav1alpha1.RuntimeStatus{ + CurrentWorkerNumberScheduled: 3, + CurrentMasterNumberScheduled: 3, + CurrentFuseNumberScheduled: 3, + DesiredMasterNumberScheduled: 2, + DesiredWorkerNumberScheduled: 3, + DesiredFuseNumberScheduled: 2, + Conditions: []datav1alpha1.RuntimeCondition{ + utils.NewRuntimeCondition(datav1alpha1.RuntimeWorkersInitialized, datav1alpha1.RuntimeWorkersInitializedReason, "The workers are initialized.", v1.ConditionTrue), + utils.NewRuntimeCondition(datav1alpha1.RuntimeFusesInitialized, datav1alpha1.RuntimeFusesInitializedReason, "The fuses are initialized.", v1.ConditionTrue), + }, + WorkerPhase: "NotReady", + FusePhase: "NotReady", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "obj", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 2, + }, + Status: datav1alpha1.RuntimeStatus{ + CurrentWorkerNumberScheduled: 2, + CurrentMasterNumberScheduled: 2, + CurrentFuseNumberScheduled: 2, + DesiredMasterNumberScheduled: 2, + DesiredWorkerNumberScheduled: 2, + DesiredFuseNumberScheduled: 2, + WorkerPhase: "NotReady", + FusePhase: "NotReady", + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "deprecated", + Namespace: "fluid", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 2, + }, + Status: datav1alpha1.RuntimeStatus{ + CurrentWorkerNumberScheduled: 2, + CurrentMasterNumberScheduled: 2, + CurrentFuseNumberScheduled: 2, + DesiredMasterNumberScheduled: 2, + DesiredWorkerNumberScheduled: 2, + DesiredFuseNumberScheduled: 2, + WorkerPhase: "NotReady", + FusePhase: "NotReady", + }, + }, + } + + objs := []runtime.Object{} + for _, masterInput := range masterInputs { + objs = append(objs, masterInput.DeepCopy()) + } + + for _, workerInput := range workerInputs { + objs = append(objs, workerInput.DeepCopy()) + } + + for _, runtimeInput := range runtimeInputs { + objs = append(objs, runtimeInput.DeepCopy()) + } + + for _, deprecatedWorkerInput := range deprecatedWorkerInputs { + objs = append(objs, deprecatedWorkerInput.DeepCopy()) + } + fakeClient := fake.NewFakeClientWithScheme(testScheme, objs...) + // engine := newJindoFSxEngineREP(fakeClient, testCase.name, testCase.namespace) + + testCases := []struct { + testName string + name string + namespace string + isErr bool + deprecated bool + }{ + {testName: "deprecated", + name: "deprecated", + namespace: "fluid"}, + } + + for _, testCase := range testCases { + engine := newJindoFSxEngineREP(fakeClient, testCase.name, testCase.namespace) + + _, err := engine.CheckAndUpdateRuntimeStatus() + if err != nil { + t.Errorf("testcase %s Failed due to %v", testCase.testName, err) + } + } +} diff --git a/pkg/ddc/jindofsx/transform.go b/pkg/ddc/jindofsx/transform.go new file mode 100644 index 00000000000..7f97dfdaf1b --- /dev/null +++ b/pkg/ddc/jindofsx/transform.go @@ -0,0 +1,661 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "fmt" + "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" + "os" + "regexp" + "strconv" + "strings" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/common" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base/portallocator" + "github.com/fluid-cloudnative/fluid/pkg/utils" + "github.com/fluid-cloudnative/fluid/pkg/utils/docker" + "github.com/fluid-cloudnative/fluid/pkg/utils/transfromer" + corev1 "k8s.io/api/core/v1" +) + +func (e *JindoFSxEngine) transform(runtime *datav1alpha1.JindoRuntime) (value *Jindo, err error) { + if runtime == nil { + err = fmt.Errorf("the jindoRuntime is null") + return + } + + if len(runtime.Spec.TieredStore.Levels) == 0 { + err = fmt.Errorf("the TieredStore is null") + return + } + + dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) + if err != nil { + return + } + + var cachePaths []string // /mnt/disk1/bigboot or /mnt/disk1/bigboot,/mnt/disk2/bigboot + stroagePath := runtime.Spec.TieredStore.Levels[0].Path + originPath := strings.Split(stroagePath, ",") + for _, value := range originPath { + cachePaths = append(cachePaths, strings.TrimRight(value, "/")+"/"+ + e.namespace+"/"+e.name+"/bigboot") + } + metaPath := cachePaths[0] + dataPath := strings.Join(cachePaths, ",") + + var userSetQuota []string // 1Gi or 1Gi,2Gi,3Gi + if runtime.Spec.TieredStore.Levels[0].Quota != nil { + userSetQuota = append(userSetQuota, utils.TransformQuantityToJindoUnit(runtime.Spec.TieredStore.Levels[0].Quota)) + } + + if runtime.Spec.TieredStore.Levels[0].QuotaList != "" { + quotaList := runtime.Spec.TieredStore.Levels[0].QuotaList + quotas := strings.Split(quotaList, ",") + if len(quotas) != len(originPath) { + err = fmt.Errorf("the num of cache path and quota must be equal") + return + } + for _, value := range quotas { + if strings.HasSuffix(value, "Gi") { + value = strings.ReplaceAll(value, "Gi", "g") + } + userSetQuota = append(userSetQuota, value) + } + } + userQuotas := strings.Join(userSetQuota, ",") // 1g or 1g,2g + + jindoSmartdataImage, smartdataTag, dnsServer := e.getSmartDataConfigs() + jindoFuseImage, fuseTag := e.parseFuseImage() + + value = &Jindo{ + Image: jindoSmartdataImage, + ImageTag: smartdataTag, + ImagePullPolicy: "Always", + FuseImage: jindoFuseImage, + FuseImageTag: fuseTag, + User: 0, + Group: 0, + FsGroup: 0, + UseHostNetwork: true, + UseHostPID: true, + Properties: e.transformPriority(metaPath), + Master: Master{ + ReplicaCount: e.transformReplicasCount(runtime), + NodeSelector: e.transformMasterSelector(runtime), + }, + Worker: Worker{ + NodeSelector: e.transformNodeSelector(runtime), + }, + Fuse: Fuse{ + Args: e.transformFuseArg(runtime, dataset), + HostPath: e.getHostMountPoint(), + }, + Mounts: Mounts{ + Master: e.transformMasterMountPath(metaPath), + WorkersAndClients: e.transformWorkerMountPath(originPath), + }, + Owner: transfromer.GenerateOwnerReferenceFromObject(runtime), + } + err = e.transformHadoopConfig(runtime, value) + if err != nil { + return + } + err = e.allocatePorts(value) + if err != nil { + return + } + e.transformNetworkMode(runtime, value) + e.transformFuseNodeSelector(runtime, value) + e.transformSecret(runtime, value) + e.transformToken(runtime, value) + err = e.transformMaster(runtime, metaPath, value, dataset) + if err != nil { + return + } + e.transformWorker(runtime, dataPath, userQuotas, value) + e.transformFuse(runtime, value) + e.transformInitPortCheck(value) + e.transformLabels(runtime, value) + e.transformPlacementMode(dataset, value) + e.transformRunAsUser(runtime, value) + e.transformTolerations(dataset, runtime, value) + e.transformResources(runtime, value) + e.transformLogConfig(runtime, value) + value.Master.DnsServer = dnsServer + value.Master.NameSpace = e.namespace + value.Fuse.MountPath = JINDO_FUSE_MONNTPATH + return value, err +} + +func (e *JindoFSxEngine) transformMaster(runtime *datav1alpha1.JindoRuntime, metaPath string, value *Jindo, dataset *datav1alpha1.Dataset) (err error) { + properties := map[string]string{ + "namespace.cluster.id": "local", + "namespace.oss.copy.size": "1073741824", + "namespace.filelet.threads": "10", + "namespace.blocklet.threads": "10", + "namespace.long-running.threads": "4", + "namespace.filelet.cache.size": "100000", + "namespace.blocklet.cache.size": "1000000", + "namespace.filelet.atime.enable": "false", + "namespace.permission.root.inode.perm.bits": "511", + "namespace.delete.scan.interval.second": "20", + "namespace.delete.scan.batch.size": "5000", + "namespace.backend.type": "rocksdb", + } + if value.Master.ReplicaCount == 3 { + properties["namespace.backend.type"] = "raft" + } + properties["namespace.rpc.port"] = strconv.Itoa(value.Master.Port.Rpc) + properties["namespace.meta-dir"] = metaPath + "/server" + // combine properties together + if len(runtime.Spec.Master.Properties) > 0 { + for k, v := range runtime.Spec.Master.Properties { + properties[k] = v + } + } + value.Master.MasterProperties = properties + // to set filestore properties with confvalue + propertiesFileStore := map[string]string{} + + for _, mount := range dataset.Spec.Mounts { + if !strings.HasSuffix(mount.MountPoint, "/") { + mount.MountPoint = mount.MountPoint + "/" + } + // support nas storage + if strings.HasPrefix(mount.MountPoint, "local:///") { + value.Mounts.Master[mount.Name] = mount.MountPoint[8:] + value.Mounts.WorkersAndClients[mount.Name] = mount.MountPoint[8:] + continue + } + + // TODO support s3 and cos storage + if strings.HasPrefix(mount.MountPoint, "oss://") { + var re = regexp.MustCompile(`(oss://(.*?))(/)`) + rm := re.FindStringSubmatch(mount.MountPoint) + if len(rm) < 3 { + err = fmt.Errorf("incorrect oss mountPoint with %v, please check your path is dir or file ", mount.MountPoint) + e.Log.Error(err, "mount.MountPoint", mount.MountPoint) + return + } + bucketName := rm[2] + if mount.Options["fs.oss.accessKeyId"] != "" { + propertiesFileStore["jindofsx.oss.bucket."+bucketName+".accessKeyId"] = mount.Options["fs.oss.accessKeyId"] + } + if mount.Options["fs.oss.accessKeySecret"] != "" { + propertiesFileStore["jindofsx.oss.bucket."+bucketName+".accessKeySecret"] = mount.Options["fs.oss.accessKeySecret"] + } + if mount.Options["fs.oss.endpoint"] == "" { + err = fmt.Errorf("oss endpoint can not be null, please check option") + e.Log.Error(err, "oss endpoint can not be null") + return + } + propertiesFileStore["jindofsx.oss.bucket."+bucketName+".endpoint"] = mount.Options["fs.oss.endpoint"] + if strings.Contains(mount.Options["fs.oss.endpoint"], "dls") { + propertiesFileStore["jindofsx.oss.bucket."+bucketName+".data.lake.storage.enable"] = "true" + } + } + // to check whether encryptOptions exist + for _, encryptOption := range mount.EncryptOptions { + key := encryptOption.Name + secretKeyRef := encryptOption.ValueFrom.SecretKeyRef + secret, err := kubeclient.GetSecret(e.Client, secretKeyRef.Name, e.namespace) + if err != nil { + e.Log.Info("can't get the secret") + break + } + value := secret.Data[secretKeyRef.Key] + if err != nil { + e.Log.Info("decode value failed") + } + if key == "fs.oss.accessKeyId" { + propertiesFileStore["jindofsx.oss.accessKeyId"] = string(value) + } + if key == "fs.oss.accessKeySecret" { + propertiesFileStore["jindofsx.oss.accessKeySecret"] = string(value) + } + e.Log.Info("Get Credential From Secret Successfully") + } + } + value.Master.FileStoreProperties = propertiesFileStore + + return nil +} + +func (e *JindoFSxEngine) transformWorker(runtime *datav1alpha1.JindoRuntime, dataPath string, userQuotas string, value *Jindo) { + + properties := map[string]string{ + "storage.cluster.id": "local", + "storage.compaction.enable": "true", + "storage.compaction.period.minute": "2", + "storage.maintainence.period.minute": "2", + "storage.compaction.threshold": "16", + "storage.cache.filelet.worker.threads": "200", + "storage.address": "localhost", + } + + if e.getTieredStoreType(runtime) == 0 { + // MEM + properties["storage.ram.cache.size"] = userQuotas + //properties["storage.ram.cache.size"] = "90g" + + properties["storage.slicelet.buffer.size"] = userQuotas + //properties["storage.slicelet.buffer.size"] = "90g" + } + + properties["storage.rpc.port"] = strconv.Itoa(value.Worker.Port.Rpc) + + properties["storage.data-dirs"] = dataPath + //properties["storage.data-dirs"] = "/mnt/disk1/bigboot, /mnt/disk2/bigboot, /mnt/disk3/bigboot" + + if len(runtime.Spec.TieredStore.Levels) == 0 { + properties["storage.watermark.high.ratio"] = "0.8" + } else { + properties["storage.watermark.high.ratio"] = runtime.Spec.TieredStore.Levels[0].High + } + + if len(runtime.Spec.TieredStore.Levels) == 0 { + properties["storage.watermark.low.ratio"] = "0.6" + } else { + properties["storage.watermark.low.ratio"] = runtime.Spec.TieredStore.Levels[0].Low + } + + properties["storage.data-dirs.capacities"] = userQuotas + ///properties["storage.data-dirs.capacities"] = "80g,80g,80g" + + if len(runtime.Spec.Worker.Properties) > 0 { + for k, v := range runtime.Spec.Worker.Properties { + properties[k] = v + } + } + value.Worker.WorkerProperties = properties +} + +func (e *JindoFSxEngine) transformResources(runtime *datav1alpha1.JindoRuntime, value *Jindo) { + + if runtime.Spec.Master.Resources.Limits != nil { + e.Log.Info("setting Resources limit") + if runtime.Spec.Master.Resources.Limits.Cpu() != nil { + value.Master.Resources.Limits.CPU = runtime.Spec.Master.Resources.Limits.Cpu().String() + } + if runtime.Spec.Master.Resources.Limits.Memory() != nil { + value.Master.Resources.Limits.Memory = runtime.Spec.Master.Resources.Limits.Memory().String() + } + } + + if runtime.Spec.Master.Resources.Requests != nil { + e.Log.Info("setting Resources request") + if runtime.Spec.Master.Resources.Requests.Cpu() != nil { + value.Master.Resources.Requests.CPU = runtime.Spec.Master.Resources.Requests.Cpu().String() + } + if runtime.Spec.Master.Resources.Requests.Memory() != nil { + value.Master.Resources.Requests.Memory = runtime.Spec.Master.Resources.Requests.Memory().String() + } + } + + if runtime.Spec.Fuse.Resources.Limits != nil { + e.Log.Info("setting Resources limit") + if runtime.Spec.Fuse.Resources.Limits.Cpu() != nil { + value.Fuse.Resources.Limits.CPU = runtime.Spec.Fuse.Resources.Limits.Cpu().String() + } + if runtime.Spec.Fuse.Resources.Limits.Memory() != nil { + value.Fuse.Resources.Limits.Memory = runtime.Spec.Fuse.Resources.Limits.Memory().String() + } + } + + if runtime.Spec.Fuse.Resources.Requests != nil { + e.Log.Info("setting Resources request") + if runtime.Spec.Fuse.Resources.Requests.Cpu() != nil { + value.Fuse.Resources.Requests.CPU = runtime.Spec.Fuse.Resources.Requests.Cpu().String() + } + if runtime.Spec.Fuse.Resources.Requests.Memory() != nil { + value.Fuse.Resources.Requests.Memory = runtime.Spec.Fuse.Resources.Requests.Memory().String() + } + } + + if runtime.Spec.Worker.Resources.Limits != nil { + e.Log.Info("setting Resources limit") + if runtime.Spec.Worker.Resources.Limits.Cpu() != nil { + value.Worker.Resources.Limits.CPU = runtime.Spec.Worker.Resources.Limits.Cpu().String() + } + if runtime.Spec.Worker.Resources.Limits.Memory() != nil { + value.Worker.Resources.Limits.Memory = runtime.Spec.Worker.Resources.Limits.Memory().String() + } + } + + if runtime.Spec.Worker.Resources.Requests != nil { + e.Log.Info("setting Resources request") + if runtime.Spec.Worker.Resources.Requests.Cpu() != nil { + value.Worker.Resources.Requests.CPU = runtime.Spec.Worker.Resources.Requests.Cpu().String() + } + if runtime.Spec.Worker.Resources.Requests.Memory() != nil { + value.Worker.Resources.Requests.Memory = runtime.Spec.Worker.Resources.Requests.Memory().String() + } + } +} + +func (e *JindoFSxEngine) transformFuse(runtime *datav1alpha1.JindoRuntime, value *Jindo) { + // default enable data-cache and disable meta-cache + properties := map[string]string{ + "fs.jindofsx.request.user": "root", + "fs.jindofsx.data.cache.enable": "true", + "fs.jindofsx.meta.cache.enable": "false", + "fs.jindofsx.tmp.data.dir": "/tmp", + "fs.jindofsx.client.metrics.enable": "true", + } + + for k, v := range value.Master.FileStoreProperties { + // to transform jindofsx.oss.bucket to fs.jindofsx.oss.bucket + properties[strings.Replace(k, "jindofsx", "fs", 1)] = v + } + + // "client.storage.rpc.port": "6101", + properties["fs.jindofsx.storage.rpc.port"] = strconv.Itoa(value.Worker.Port.Rpc) + + if e.getTieredStoreType(runtime) == 0 { + // MEM + properties["fs.jindofsx.ram.cache.enable"] = "true" + } else if e.getTieredStoreType(runtime) == 1 || e.getTieredStoreType(runtime) == 2 { + // HDD and SSD + properties["fs.jindofsx.ram.cache.enable"] = "false" + } + // set secret + if len(runtime.Spec.Secret) != 0 { + properties["fs.oss.credentials.provider"] = "com.aliyun.jindodata.oss.auth.CustomCredentialsProvider" + properties["aliyun.oss.provider.url"] = "secrets:///token/" + properties["fs.oss.provider.endpoint"] = "secrets:///token/" + } + + if len(runtime.Spec.Fuse.Properties) > 0 { + for k, v := range runtime.Spec.Fuse.Properties { + properties[k] = v + } + } + value.Fuse.FuseProperties = properties + + // set critical fuse pod to avoid eviction + value.Fuse.CriticalPod = common.CriticalFusePodEnabled() +} + +func (e *JindoFSxEngine) transformLogConfig(runtime *datav1alpha1.JindoRuntime, value *Jindo) { + if len(runtime.Spec.LogConfig) > 0 { + value.LogConfig = runtime.Spec.LogConfig + } else { + value.LogConfig = map[string]string{} + } +} + +func (e *JindoFSxEngine) transformFuseNodeSelector(runtime *datav1alpha1.JindoRuntime, value *Jindo) { + if len(runtime.Spec.Fuse.NodeSelector) > 0 { + value.Fuse.NodeSelector = runtime.Spec.Fuse.NodeSelector + } else { + value.Fuse.NodeSelector = map[string]string{} + } + + // The label will be added by CSI Plugin when any workload pod is scheduled on the node. + value.Fuse.NodeSelector[e.getFuseLabelname()] = "true" +} + +func (e *JindoFSxEngine) transformNodeSelector(runtime *datav1alpha1.JindoRuntime) map[string]string { + properties := map[string]string{} + if runtime.Spec.Worker.NodeSelector != nil { + properties = runtime.Spec.Worker.NodeSelector + } + return properties +} + +func (e *JindoFSxEngine) transformReplicasCount(runtime *datav1alpha1.JindoRuntime) int { + if runtime.Spec.Master.Replicas == JINDO_HA_MASTERNUM { + return JINDO_HA_MASTERNUM + } + return JINDO_MASTERNUM_DEFAULT +} + +func (e *JindoFSxEngine) transformMasterSelector(runtime *datav1alpha1.JindoRuntime) map[string]string { + properties := map[string]string{} + if runtime.Spec.Master.NodeSelector != nil { + properties = runtime.Spec.Master.NodeSelector + } + return properties +} + +func (e *JindoFSxEngine) transformPriority(metaPath string) map[string]string { + properties := map[string]string{} + properties["logDir"] = metaPath + "/log" + return properties +} + +func (e *JindoFSxEngine) transformMasterMountPath(metaPath string) map[string]string { + properties := map[string]string{} + properties["1"] = metaPath + return properties +} + +func (e *JindoFSxEngine) transformWorkerMountPath(originPath []string) map[string]string { + properties := map[string]string{} + for index, value := range originPath { + properties[strconv.Itoa(index+1)] = strings.TrimRight(value, "/") + } + return properties +} + +func (e *JindoFSxEngine) transformFuseArg(runtime *datav1alpha1.JindoRuntime, dataset *datav1alpha1.Dataset) []string { + fuseArgs := []string{} + if len(runtime.Spec.Fuse.Args) > 0 { + fuseArgs = runtime.Spec.Fuse.Args + } + return fuseArgs +} + +func (e *JindoFSxEngine) getSmartDataConfigs() (image, tag, dnsServer string) { + var ( + defaultImage = "registry.cn-shanghai.aliyuncs.com/jindofs/smartdata" + defaultTag = "4.3.0" + defaultDnsServer = "1.1.1.1" + ) + + image = docker.GetImageRepoFromEnv(common.JINDO_SMARTDATA_IMAGE_ENV) + tag = docker.GetImageTagFromEnv(common.JINDO_SMARTDATA_IMAGE_ENV) + dnsServer = os.Getenv(common.JINDO_DNS_SERVER) + if len(image) == 0 { + image = defaultImage + } + if len(tag) == 0 { + tag = defaultTag + } + if len(dnsServer) == 0 { + dnsServer = defaultDnsServer + } + e.Log.Info("Set image", "image", image, "tag", tag, "dnsServer", dnsServer) + + return +} + +func (e *JindoFSxEngine) parseFuseImage() (image, tag string) { + var ( + defaultImage = "registry.cn-shanghai.aliyuncs.com/jindofs/jindo-fuse" + defaultTag = "4.3.0" + ) + + image = docker.GetImageRepoFromEnv(common.JINDO_FUSE_IMAGE_ENV) + tag = docker.GetImageTagFromEnv(common.JINDO_FUSE_IMAGE_ENV) + if len(image) == 0 { + image = defaultImage + } + if len(tag) == 0 { + tag = defaultTag + } + e.Log.Info("Set image", "image", image, "tag", tag) + + return +} + +func (e *JindoFSxEngine) transformSecret(runtime *datav1alpha1.JindoRuntime, value *Jindo) { + if len(runtime.Spec.Secret) != 0 { + value.Secret = runtime.Spec.Secret + } +} + +func (e *JindoFSxEngine) transformToken(runtime *datav1alpha1.JindoRuntime, value *Jindo) { + properties := map[string]string{} + if len(runtime.Spec.Secret) != 0 { + properties["default.credential.provider"] = "secrets:///token/" + } else { + properties["default.credential.provider"] = "none" + } + value.Master.TokenProperties = properties +} + +func (e *JindoFSxEngine) allocatePorts(value *Jindo) error { + + // if not usehostnetwork then use default port + // usehostnetwork to choose port from port allocator + expectedPortNum := 2 + if !value.UseHostNetwork { + value.Master.Port.Rpc = DEFAULT_MASTER_RPC_PORT + value.Worker.Port.Rpc = DEFAULT_WORKER_RPC_PORT + if value.Master.ReplicaCount == JINDO_HA_MASTERNUM { + value.Master.Port.Raft = DEFAULT_RAFT_RPC_PORT + } + return nil + } + + if value.Master.ReplicaCount == JINDO_HA_MASTERNUM { + expectedPortNum = 3 + } + + allocator, err := portallocator.GetRuntimePortAllocator() + if err != nil { + e.Log.Error(err, "can't get runtime port allocator") + return err + } + + allocatedPorts, err := allocator.GetAvailablePorts(expectedPortNum) + if err != nil { + e.Log.Error(err, "can't get available ports", "expected port num", expectedPortNum) + return err + } + + index := 0 + value.Master.Port.Rpc = allocatedPorts[index] + index++ + value.Worker.Port.Rpc = allocatedPorts[index] + if value.Master.ReplicaCount == JINDO_HA_MASTERNUM { + index++ + value.Master.Port.Raft = allocatedPorts[index] + } + return nil +} + +func (e *JindoFSxEngine) transformInitPortCheck(value *Jindo) { + // This function should be called after port allocation + + if !common.PortCheckEnabled() { + return + } + + e.Log.Info("Enabled port check") + value.InitPortCheck.Enabled = true + + // Always use the default init image defined in env + value.InitPortCheck.Image, value.InitPortCheck.ImageTag, value.InitPortCheck.ImagePullPolicy = docker.ParseInitImage("", "", "", common.DEFAULT_INIT_IMAGE_ENV) + + // Inject ports to be checked to a init container which reports the usage status of the ports for easier debugging. + // The jindo master container will always start even when some of the ports is in use. + var ports []string + + ports = append(ports, strconv.Itoa(value.Master.Port.Rpc)) + if value.Master.ReplicaCount == JINDO_HA_MASTERNUM { + ports = append(ports, strconv.Itoa(value.Master.Port.Raft)) + } + + // init container takes "PORT1:PORT2:PORT3..." as input + value.InitPortCheck.PortsToCheck = strings.Join(ports, ":") +} + +func (e *JindoFSxEngine) transformRunAsUser(runtime *datav1alpha1.JindoRuntime, value *Jindo) { + if len(runtime.Spec.User) != 0 { + value.Fuse.RunAs = runtime.Spec.User + } +} + +func (e *JindoFSxEngine) transformTolerations(dataset *datav1alpha1.Dataset, runtime *datav1alpha1.JindoRuntime, value *Jindo) { + + if len(dataset.Spec.Tolerations) > 0 { + // value.Tolerations = dataset.Spec.Tolerations + value.Tolerations = []corev1.Toleration{} + for _, toleration := range dataset.Spec.Tolerations { + toleration.TolerationSeconds = nil + value.Tolerations = append(value.Tolerations, toleration) + } + value.Master.Tolerations = value.Tolerations + value.Worker.Tolerations = value.Tolerations + value.Fuse.Tolerations = value.Tolerations + } + + if len(runtime.Spec.Master.Tolerations) > 0 { + for _, toleration := range runtime.Spec.Master.Tolerations { + toleration.TolerationSeconds = nil + value.Master.Tolerations = append(value.Tolerations, toleration) + } + } + + if len(runtime.Spec.Worker.Tolerations) > 0 { + for _, toleration := range runtime.Spec.Worker.Tolerations { + toleration.TolerationSeconds = nil + value.Worker.Tolerations = append(value.Tolerations, toleration) + } + } + + if len(runtime.Spec.Fuse.Tolerations) > 0 { + for _, toleration := range runtime.Spec.Fuse.Tolerations { + toleration.TolerationSeconds = nil + value.Fuse.Tolerations = append(value.Tolerations, toleration) + } + } +} + +func (e *JindoFSxEngine) transformLabels(runtime *datav1alpha1.JindoRuntime, value *Jindo) { + // the labels will not be merged here because they will be sequentially added into yaml templates + // If two labels share the same label key, the last one in yaml templates overrides the former ones + // and takes effect. + value.Labels = runtime.Spec.Labels + value.Master.Labels = runtime.Spec.Master.Labels + value.Worker.Labels = runtime.Spec.Worker.Labels + value.Fuse.Labels = runtime.Spec.Fuse.Labels +} + +func (e *JindoFSxEngine) transformNetworkMode(runtime *datav1alpha1.JindoRuntime, value *Jindo) { + // to set hostnetwork + switch runtime.Spec.NetworkMode { + case datav1alpha1.HostNetworkMode: + value.UseHostNetwork = true + case datav1alpha1.ContainerNetworkMode: + value.UseHostNetwork = false + case datav1alpha1.DefaultNetworkMode: + value.UseHostNetwork = true + } +} + +func (e *JindoFSxEngine) transformPlacementMode(dataset *datav1alpha1.Dataset, value *Jindo) { + + value.PlacementMode = string(dataset.Spec.PlacementMode) + if len(value.PlacementMode) == 0 { + value.PlacementMode = string(datav1alpha1.ExclusiveMode) + } +} diff --git a/pkg/ddc/jindofsx/transform_fuse_test.go b/pkg/ddc/jindofsx/transform_fuse_test.go new file mode 100644 index 00000000000..ca2751e5537 --- /dev/null +++ b/pkg/ddc/jindofsx/transform_fuse_test.go @@ -0,0 +1,168 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" +) + +func TestTransformFuseWithNoArgs(t *testing.T) { + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect string + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{}, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, "true"}, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + engine.transformFuse(test.runtime, test.jindoValue) + if test.jindoValue.Fuse.FuseProperties["fs.jindofsx.data.cache.enable"] != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.Fuse.FuseProperties["jfs.cache.data-cache.enable"]) + } + } +} + +func TestTransformRunAsUser(t *testing.T) { + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect string + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + User: "user", + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, "user"}, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + engine.transformRunAsUser(test.runtime, test.jindoValue) + if test.jindoValue.Fuse.RunAs != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.Fuse.RunAs) + } + } +} + +func TestTransformSecret(t *testing.T) { + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect string + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, "secret"}, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + engine.transformSecret(test.runtime, test.jindoValue) + if test.jindoValue.Secret != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.Fuse.RunAs) + } + } +} + +func TestTransformFuseArg(t *testing.T) { + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect string + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + Fuse: datav1alpha1.JindoFuseSpec{ + Args: []string{"-okernel_cache"}, + }, + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + Path: "/", + }}, + }}, &Jindo{}, "-okernel_cache"}, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + properties := engine.transformFuseArg(test.runtime, test.dataset) + if properties[0] != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.Fuse.RunAs) + } + } +} + +func TestParseFuseImage(t *testing.T) { + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect string + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + Path: "/", + }}, + }}, &Jindo{}, "registry.cn-shanghai.aliyuncs.com/jindofs/jindo-fuse:4.3.0"}, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + imageR, tagR := engine.parseFuseImage() + registryVersion := imageR + ":" + tagR + if registryVersion != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.Fuse.RunAs) + } + } +} diff --git a/pkg/ddc/jindofsx/transform_hadoop_config.go b/pkg/ddc/jindofsx/transform_hadoop_config.go new file mode 100644 index 00000000000..403e8046b0b --- /dev/null +++ b/pkg/ddc/jindofsx/transform_hadoop_config.go @@ -0,0 +1,67 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "fmt" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" +) + +// transformHadoopConfig transforms the given value by checking existence of user-specific hadoop configurations +func (e *JindoFSxEngine) transformHadoopConfig(runtime *datav1alpha1.JindoRuntime, value *Jindo) (err error) { + if len(runtime.Spec.HadoopConfig) == 0 { + return nil + } + + key := types.NamespacedName{ + Namespace: runtime.Namespace, + Name: runtime.Spec.HadoopConfig, + } + + hadoopConfigMap := &v1.ConfigMap{} + + if err = e.Client.Get(context.TODO(), key, hadoopConfigMap); err != nil { + if apierrs.IsNotFound(err) { + err = fmt.Errorf("specified hadoopConfig \"%v\" is not found", runtime.Spec.HadoopConfig) + } + return err + } + + for k := range hadoopConfigMap.Data { + switch k { + case HADOOP_CONF_HDFS_SITE_FILENAME: + value.HadoopConfig.IncludeHdfsSite = true + case HADOOP_CONF_CORE_SITE_FILENAME: + value.HadoopConfig.IncludeCoreSite = true + } + } + + // Neither hdfs-site.xml nor core-site.xml is found in the configMap + if !value.HadoopConfig.IncludeCoreSite && !value.HadoopConfig.IncludeHdfsSite { + err = fmt.Errorf("neither \"%v\" nor \"%v\" is found in the specified configMap \"%v\" ", HADOOP_CONF_HDFS_SITE_FILENAME, HADOOP_CONF_CORE_SITE_FILENAME, runtime.Spec.HadoopConfig) + return err + } + + value.HadoopConfig.ConfigMap = runtime.Spec.HadoopConfig + + return nil +} diff --git a/pkg/ddc/jindofsx/transform_master_test.go b/pkg/ddc/jindofsx/transform_master_test.go new file mode 100644 index 00000000000..077eabe6f4b --- /dev/null +++ b/pkg/ddc/jindofsx/transform_master_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" +) + +func TestTransformToken(t *testing.T) { + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect string + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, "secrets:///token/"}, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + engine.transformToken(test.runtime, test.jindoValue) + if test.jindoValue.Master.TokenProperties["default.credential.provider"] != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.Master.MasterProperties["default.credential.provider"]) + } + } +} + +func TestTransformMasterMountPath(t *testing.T) { + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect string + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, "/mnt/disk1"}, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + properties := engine.transformMasterMountPath("/mnt/disk1") + if properties["1"] != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, properties["1"]) + } + } +} diff --git a/pkg/ddc/jindofsx/transform_test.go b/pkg/ddc/jindofsx/transform_test.go new file mode 100644 index 00000000000..c4a0585ecc9 --- /dev/null +++ b/pkg/ddc/jindofsx/transform_test.go @@ -0,0 +1,319 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "testing" + + "github.com/fluid-cloudnative/fluid/pkg/common" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" +) + +func TestTransformTolerations(t *testing.T) { + resources := corev1.ResourceRequirements{} + resources.Limits = make(corev1.ResourceList) + resources.Limits[corev1.ResourceMemory] = resource.MustParse("2Gi") + + result := resource.MustParse("20Gi") + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect int + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{{ + MediumType: common.Memory, + Quota: &result, + High: "0.8", + Low: "0.1", + }}, + }, + Master: datav1alpha1.JindoCompTemplateSpec{ + Tolerations: []corev1.Toleration{{ + Key: "master", + Operator: "Equals", + Value: "true", + }}, + }, + Worker: datav1alpha1.JindoCompTemplateSpec{ + Tolerations: []corev1.Toleration{{ + Key: "worker", + Operator: "Equals", + Value: "true", + }}, + }, + Fuse: datav1alpha1.JindoFuseSpec{ + Tolerations: []corev1.Toleration{{ + Key: "fuse", + Operator: "Equals", + Value: "true", + }}, + }, + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + Tolerations: []corev1.Toleration{{ + Key: "jindo", + Operator: "Equals", + Value: "true", + }}, + }}, &Jindo{}, 2, + }, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + engine.transformTolerations(test.dataset, test.runtime, test.jindoValue) + if len(test.jindoValue.Master.Tolerations) != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.Master.Tolerations) + } + if len(test.jindoValue.Worker.Tolerations) != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.Master.Tolerations) + } + if len(test.jindoValue.Fuse.Tolerations) != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.Master.Tolerations) + } + } +} + +func TestParseSmartDataImage(t *testing.T) { + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect string + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + Path: "/", + }}, + }}, &Jindo{}, "registry.cn-shanghai.aliyuncs.com/jindofs/smartdata:4.3.0"}, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + imageR, tagR, _ := engine.getSmartDataConfigs() + registryVersion := imageR + ":" + tagR + if registryVersion != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.Fuse.RunAs) + } + } +} + +func TestTransformHostNetWork(t *testing.T) { + resources := corev1.ResourceRequirements{} + resources.Limits = make(corev1.ResourceList) + resources.Limits[corev1.ResourceMemory] = resource.MustParse("2Gi") + + result := resource.MustParse("20Gi") + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect bool + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{{ + MediumType: common.Memory, + Quota: &result, + High: "0.8", + Low: "0.1", + }}, + }, + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, true, + }, + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{{ + MediumType: common.Memory, + Quota: &result, + High: "0.8", + Low: "0.1", + }}, + }, + NetworkMode: "HostNetwork", + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, true, + }, + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{{ + MediumType: common.Memory, + Quota: &result, + High: "0.8", + Low: "0.1", + }}, + }, + NetworkMode: "ContainerNetwork", + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, false, + }, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + engine.transformNetworkMode(test.runtime, test.jindoValue) + if test.jindoValue.UseHostNetwork != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.UseHostNetwork) + } + } + + var errortests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect bool + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{{ + MediumType: common.Memory, + Quota: &result, + High: "0.8", + Low: "0.1", + }}, + }, + NetworkMode: "Non", + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, false, + }, + } + for _, test := range errortests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + engine.transformNetworkMode(test.runtime, test.jindoValue) + if test.jindoValue.UseHostNetwork != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.UseHostNetwork) + } + } +} + +func TestTransformAllocatePorts(t *testing.T) { + resources := corev1.ResourceRequirements{} + resources.Limits = make(corev1.ResourceList) + resources.Limits[corev1.ResourceMemory] = resource.MustParse("2Gi") + + result := resource.MustParse("20Gi") + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect int + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{{ + MediumType: common.Memory, + Quota: &result, + High: "0.8", + Low: "0.1", + }}, + }, + NetworkMode: "ContainerNetwork", + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, 8101, + }, + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{{ + MediumType: common.Memory, + Quota: &result, + High: "0.8", + Low: "0.1", + }}, + }, + NetworkMode: "ContainerNetwork", + Replicas: 3, + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, 8101, + }, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + engine.transformNetworkMode(test.runtime, test.jindoValue) + test.jindoValue.Master.ReplicaCount = 3 + err := engine.allocatePorts(test.jindoValue) + if test.jindoValue.Master.Port.Rpc != test.expect && err != nil { + t.Errorf("expected value %v, but got %v, and err %v", test.expect, test.jindoValue.Master.Port.Rpc, err) + } + } +} diff --git a/pkg/ddc/jindofsx/transform_worker_test.go b/pkg/ddc/jindofsx/transform_worker_test.go new file mode 100644 index 00000000000..a57049606ac --- /dev/null +++ b/pkg/ddc/jindofsx/transform_worker_test.go @@ -0,0 +1,183 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "strings" + "testing" + + "github.com/fluid-cloudnative/fluid/pkg/common" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" +) + +func TestTransformWorker(t *testing.T) { + resources := corev1.ResourceRequirements{} + resources.Limits = make(corev1.ResourceList) + resources.Limits[corev1.ResourceMemory] = resource.MustParse("2Gi") + + result := resource.MustParse("20Gi") + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect string + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{{ + MediumType: common.Memory, + Quota: &result, + High: "0.8", + Low: "0.1", + }}, + }, + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, "1G"}, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + test.jindoValue.Worker.Port.Rpc = 8001 + test.jindoValue.Worker.Port.Raft = 8002 + dataPath := "/var/lib/docker/data" + userQuotas := "1G" + engine.transformWorker(test.runtime, dataPath, userQuotas, test.jindoValue) + if test.jindoValue.Worker.WorkerProperties["storage.data-dirs.capacities"] != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.Worker.WorkerProperties["storage.data-dirs.capacities"]) + } + } +} + +func TestTransformWorkerMountPath(t *testing.T) { + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + jindoValue *Jindo + expect string + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + }, + }, &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + }}, + }}, &Jindo{}, "/mnt/disk2"}, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + stroagePath := "/mnt/disk1,/mnt/disk2" + originPath := strings.Split(stroagePath, ",") + properties := engine.transformWorkerMountPath(originPath) + if properties["2"] != test.expect { + t.Errorf("expected value %v, but got %v", test.expect, properties["2"]) + } + } +} + +func TestTransformResourcesForWorkerNoValue(t *testing.T) { + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + jindoValue *Jindo + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{}, + }, &Jindo{ + Properties: map[string]string{}, + }}, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + engine.transformResources(test.runtime, test.jindoValue) + if test.jindoValue.Worker.Resources.Requests.Memory != "" { + t.Errorf("expected nil, got %v", test.jindoValue.Worker.Resources.Requests.Memory) + } + if test.jindoValue.Worker.Resources.Requests.CPU != "" { + t.Errorf("expected nil, got %v", test.jindoValue.Worker.Resources.Requests.CPU) + } + if test.jindoValue.Worker.Resources.Limits.Memory != "" { + t.Errorf("expected nil, got %v", test.jindoValue.Worker.Resources.Limits.Memory) + } + if test.jindoValue.Worker.Resources.Limits.CPU != "" { + t.Errorf("expected nil, got %v", test.jindoValue.Worker.Resources.Limits.CPU) + } + } +} + +func TestTransformResourcesForWorkerWithValue(t *testing.T) { + + resources := corev1.ResourceRequirements{} + resources.Limits = make(corev1.ResourceList) + resources.Limits[corev1.ResourceMemory] = resource.MustParse("2Gi") + resources.Limits[corev1.ResourceCPU] = resource.MustParse("2") + resources.Requests = make(corev1.ResourceList) + resources.Requests[corev1.ResourceMemory] = resource.MustParse("1Gi") + resources.Requests[corev1.ResourceCPU] = resource.MustParse("1") + result := resource.MustParse("20Gi") + + var tests = []struct { + runtime *datav1alpha1.JindoRuntime + jindoValue *Jindo + }{ + {&datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Worker: datav1alpha1.JindoCompTemplateSpec{ + Resources: resources, + }, + TieredStore: datav1alpha1.TieredStore{ + Levels: []datav1alpha1.Level{{ + MediumType: common.Memory, + Quota: &result, + }}, + }, + }, + }, &Jindo{ + Properties: map[string]string{}, + Master: Master{}, + }}, + } + for _, test := range tests { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + engine.transformResources(test.runtime, test.jindoValue) + if test.jindoValue.Worker.Resources.Requests.Memory != "1Gi" { + t.Errorf("expected nil, got %v", test.jindoValue.Worker.Resources.Requests.Memory) + } + if test.jindoValue.Worker.Resources.Requests.CPU != "1" { + t.Errorf("expected nil, got %v", test.jindoValue.Worker.Resources.Requests.CPU) + } + if test.jindoValue.Worker.Resources.Limits.Memory != "2Gi" { + t.Errorf("expected nil, got %v", test.jindoValue.Worker.Resources.Limits.Memory) + } + if test.jindoValue.Worker.Resources.Limits.CPU != "2" { + t.Errorf("expected nil, got %v", test.jindoValue.Worker.Resources.Limits.CPU) + } + } +} diff --git a/pkg/ddc/jindofsx/types.go b/pkg/ddc/jindofsx/types.go new file mode 100644 index 00000000000..8d6ab7c2b3c --- /dev/null +++ b/pkg/ddc/jindofsx/types.go @@ -0,0 +1,124 @@ +/* +Copyright 2022 The Fluid Author. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "github.com/fluid-cloudnative/fluid/pkg/common" + v1 "k8s.io/api/core/v1" +) + +type Jindo struct { + Image string `yaml:"image"` + ImageTag string `yaml:"imageTag"` + ImagePullPolicy string `yaml:"imagePullPolicy"` + FuseImage string `yaml:"fuseImage"` + FuseImageTag string `yaml:"fuseImageTag"` + User int `yaml:"user"` + Group int `yaml:"group"` + FsGroup int `yaml:"fsGroup"` + UseHostNetwork bool `yaml:"useHostNetwork"` + UseHostPID bool `yaml:"useHostPID"` + Properties map[string]string `yaml:"properties"` + Master Master `yaml:"master"` + Worker Worker `yaml:"worker"` + Fuse Fuse `yaml:"fuse"` + Mounts Mounts `yaml:"mounts"` + HadoopConfig HadoopConfig `yaml:"hadoopConfig,omitempty"` + Secret string `yaml:"secret,omitempty"` + Tolerations []v1.Toleration `yaml:"tolerations,omitempty"` + InitPortCheck common.InitPortCheck `yaml:"initPortCheck,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + LogConfig map[string]string `yaml:"logConfig,omitempty"` + PlacementMode string `yaml:"placement,omitempty"` + Owner *common.OwnerReference `yaml:"owner,omitempty"` +} + +type HadoopConfig struct { + ConfigMap string `yaml:"configMap"` + IncludeHdfsSite bool `yaml:"includeHdfsSite"` + IncludeCoreSite bool `yaml:"includeCoreSite"` +} + +type Master struct { + ReplicaCount int `yaml:"replicaCount"` + Resources Resources `yaml:"resources"` + NodeSelector map[string]string `yaml:"nodeSelector,omitempty"` + MasterProperties map[string]string `yaml:"properties"` + FileStoreProperties map[string]string `yaml:"fileStoreProperties"` + TokenProperties map[string]string `yaml:"secretProperties"` + Port Ports `yaml:"ports,omitempty"` + OssKey string `yaml:"osskey,omitempty"` + OssSecret string `yaml:"osssecret,omitempty"` + Tolerations []v1.Toleration `yaml:"tolerations,omitempty"` + DnsServer string `yaml:"dnsServer,omitempty"` + NameSpace string `yaml:"namespace,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` +} + +type Worker struct { + Resources Resources `yaml:"resources,omitempty"` + NodeSelector map[string]string `yaml:"nodeSelector,omitempty"` + WorkerProperties map[string]string `yaml:"properties"` + Port Ports `yaml:"ports,omitempty"` + Tolerations []v1.Toleration `yaml:"tolerations,omitempty"` + // Affinity v1.Affinity `yaml:"affinity,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` +} + +type Ports struct { + Rpc int `yaml:"rpc,omitempty"` + Raft int `yaml:"raft,omitempty"` +} + +type Fuse struct { + Args []string `yaml:"args"` + HostPath string `yaml:"hostPath"` + NodeSelector map[string]string `yaml:"nodeSelector,omitempty"` + FuseProperties map[string]string `yaml:"properties"` + Global bool `yaml:"global,omitempty"` + RunAs string `yaml:"runAs,omitempty"` + Tolerations []v1.Toleration `yaml:"tolerations,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + CriticalPod bool `yaml:"criticalPod,omitempty"` + Resources Resources `yaml:"resources,omitempty"` + MountPath string `yaml:"mountPath,omitempty"` +} + +type Mounts struct { + Master map[string]string `yaml:"master"` + WorkersAndClients map[string]string `yaml:"workersAndClients"` +} + +type Resources struct { + Limits Resource `yaml:"limits"` + Requests Resource `yaml:"requests"` +} + +type Resource struct { + CPU string `yaml:"cpu"` + Memory string `yaml:"memory"` +} + +type cacheStates struct { + cacheCapacity string + // cacheable string + // lowWaterMark string + // highWaterMark string + cached string + cachedPercentage string + // nonCacheable string +} diff --git a/pkg/ddc/jindofsx/ufs.go b/pkg/ddc/jindofsx/ufs.go new file mode 100644 index 00000000000..18f6c5d419e --- /dev/null +++ b/pkg/ddc/jindofsx/ufs.go @@ -0,0 +1,92 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "github.com/fluid-cloudnative/fluid/pkg/ddc/jindofsx/operations" + "github.com/fluid-cloudnative/fluid/pkg/utils" +) + +// ShouldCheckUFS checks if it requires checking UFS +func (e *JindoFSxEngine) ShouldCheckUFS() (should bool, err error) { + should = true + return +} + +// PrepareUFS do all the UFS preparations +func (e *JindoFSxEngine) PrepareUFS() (err error) { + // 1. Mount UFS (Synchronous Operation) + shouldMountUfs, err := e.shouldMountUFS() + if err != nil { + return + } + e.Log.Info("shouldMountUFS", "should", shouldMountUfs) + + if shouldMountUfs { + err = e.mountUFS() + if err != nil { + return + } + } + e.Log.Info("mountUFS") + + err = e.SyncMetadata() + if err != nil { + // just report this error and ignore it because SyncMetadata isn't on the critical path of Setup + e.Log.Error(err, "SyncMetadata") + return nil + } + + return +} + +// UsedStorageBytes returns used storage size of Jindo in bytes +func (e *JindoFSxEngine) UsedStorageBytes() (value int64, err error) { + + return +} + +// FreeStorageBytes returns free storage size of Jindo in bytes +func (e *JindoFSxEngine) FreeStorageBytes() (value int64, err error) { + return +} + +// return total storage size of Jindo in bytes +func (e *JindoFSxEngine) TotalStorageBytes() (value int64, err error) { + return +} + +// return the total num of files in Jindo +func (e *JindoFSxEngine) TotalFileNums() (value int64, err error) { + return +} + +// report jindo summary +func (e *JindoFSxEngine) GetReportSummary() (summary string, err error) { + podName, containerName := e.getMasterPodInfo() + fileUtils := operations.NewJindoFileUtils(podName, containerName, e.namespace, e.Log) + return fileUtils.ReportSummary() +} + +// JindoFSxEngine hasn't support UpdateOnUFSChange +func (e *JindoFSxEngine) ShouldUpdateUFS() (ufsToUpdate *utils.UFSToUpdate) { + return +} + +func (e *JindoFSxEngine) UpdateOnUFSChange(*utils.UFSToUpdate) (updateReady bool, err error) { + return +} diff --git a/pkg/ddc/jindofsx/ufs_internal.go b/pkg/ddc/jindofsx/ufs_internal.go new file mode 100644 index 00000000000..547e78a9af8 --- /dev/null +++ b/pkg/ddc/jindofsx/ufs_internal.go @@ -0,0 +1,96 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "fmt" + "github.com/fluid-cloudnative/fluid/pkg/ddc/jindofsx/operations" + "github.com/fluid-cloudnative/fluid/pkg/utils" +) + +// shouldMountUFS checks if there's any UFS that need to be mounted +func (e *JindoFSxEngine) shouldMountUFS() (should bool, err error) { + dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) + e.Log.Info("get dataset info", "dataset", dataset) + if err != nil { + return should, err + } + + podName, containerName := e.getMasterPodInfo() + fileUtils := operations.NewJindoFileUtils(podName, containerName, e.namespace, e.Log) + + ready := fileUtils.Ready() + if !ready { + should = false + err = fmt.Errorf("the UFS is not ready") + return should, err + } + + // Check if any of the Mounts has not been mounted in Alluxio + for _, mount := range dataset.Spec.Mounts { + mounted, err := fileUtils.IsMounted("/" + mount.Name) + if err != nil { + should = false + return should, err + } + if !mounted { + e.Log.Info("Found dataset that is not mounted.", "dataset", dataset) + should = true + return should, err + } + } + + return should, err +} + +// mountUFS() mount all UFSs to Alluxio according to mount points in `dataset.Spec`. If a mount point is Fluid-native, mountUFS() will skip it. +func (e *JindoFSxEngine) mountUFS() (err error) { + dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) + if err != nil { + return err + } + + podName, containerName := e.getMasterPodInfo() + fileUitls := operations.NewJindoFileUtils(podName, containerName, e.namespace, e.Log) + + ready := fileUitls.Ready() + if !ready { + return fmt.Errorf("the UFS is not ready") + } + + // Iterate all the mount points, do mount if the mount point is not Fluid-native(e.g. Hostpath or PVC) + for _, mount := range dataset.Spec.Mounts { + + // first to check the path isMounted + mounted := false + if !mounted { + if mount.Path != "" { + err = fileUitls.Mount(mount.Path, mount.MountPoint) + if err != nil { + return err + } + continue + } + err = fileUitls.Mount(mount.Name, mount.MountPoint) + if err != nil { + return err + } + } + + } + return nil +} diff --git a/pkg/ddc/jindofsx/utils.go b/pkg/ddc/jindofsx/utils.go new file mode 100644 index 00000000000..9929a8d6e11 --- /dev/null +++ b/pkg/ddc/jindofsx/utils.go @@ -0,0 +1,119 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "context" + "fmt" + "strconv" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/common" + "github.com/fluid-cloudnative/fluid/pkg/ddc/jindo/operations" + "github.com/fluid-cloudnative/fluid/pkg/utils" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/types" +) + +func (e *JindoFSxEngine) getTieredStoreType(runtime *datav1alpha1.JindoRuntime) int { + var mediumType int + for _, level := range runtime.Spec.TieredStore.Levels { + mediumType = common.GetDefaultTieredStoreOrder(level.MediumType) + } + return mediumType +} + +func (e *JindoFSxEngine) getMountPoint() (mountPath string) { + mountRoot := getMountRoot() + e.Log.Info("mountRoot", "path", mountRoot) + return fmt.Sprintf("%s/%s/%s/jindofs-fuse", mountRoot, e.namespace, e.name) +} + +func (j *JindoFSxEngine) getHostMountPoint() (mountPath string) { + mountRoot := getMountRoot() + j.Log.Info("mountRoot", "path", mountRoot) + return fmt.Sprintf("%s/%s/%s", mountRoot, j.namespace, j.name) +} + +// getMountRoot returns the default path, if it's not set +func getMountRoot() (path string) { + path, err := utils.GetMountRoot() + if err != nil { + path = "/" + common.JindoRuntime + } else { + path = path + "/" + common.JindoRuntime + } + // e.Log.Info("Mount root", "path", path) + return +} + +// getRuntime gets the jindo runtime +func (e *JindoFSxEngine) getRuntime() (*datav1alpha1.JindoRuntime, error) { + + key := types.NamespacedName{ + Name: e.name, + Namespace: e.namespace, + } + + var runtime datav1alpha1.JindoRuntime + if err := e.Get(context.TODO(), key, &runtime); err != nil { + return nil, err + } + return &runtime, nil +} + +func (e *JindoFSxEngine) getMasterName() (dsName string) { + return e.name + "-jindofs-master" +} + +func (e *JindoFSxEngine) getWorkerName() (dsName string) { + return e.name + "-jindofs-worker" +} + +func (e *JindoFSxEngine) getFuseName() (dsName string) { + return e.name + "-jindofs-fuse" +} + +func (e *JindoFSxEngine) getDaemonset(name string, namespace string) (daemonset *appsv1.DaemonSet, err error) { + daemonset = &appsv1.DaemonSet{} + err = e.Client.Get(context.TODO(), types.NamespacedName{ + Name: name, + Namespace: namespace, + }, daemonset) + + return daemonset, err +} + +func (e *JindoFSxEngine) getMasterPodInfo() (podName string, containerName string) { + podName = e.name + "-jindofs-master-0" + containerName = "jindofs-master" + + return +} + +// return total storage size of Jindo in bytes +func (e *JindoFSxEngine) TotalJindoStorageBytes(useStsSecret bool) (value int64, err error) { + podName, containerName := e.getMasterPodInfo() + fileUtils := operations.NewJindoFileUtils(podName, containerName, e.namespace, e.Log) + url := "jfs://jindo/" + ufsSize, err := fileUtils.GetUfsTotalSize(url, useStsSecret) + e.Log.Info("jindo storage ufsSize", "ufsSize", ufsSize) + if err != nil { + e.Log.Error(err, "get total size") + } + return strconv.ParseInt(ufsSize, 10, 64) +} diff --git a/pkg/ddc/jindofsx/utils_test.go b/pkg/ddc/jindofsx/utils_test.go new file mode 100644 index 00000000000..d04460ed07d --- /dev/null +++ b/pkg/ddc/jindofsx/utils_test.go @@ -0,0 +1,123 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "os" + "testing" + + "github.com/go-logr/logr" + + "github.com/fluid-cloudnative/fluid/pkg/common" + "github.com/fluid-cloudnative/fluid/pkg/utils" + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" +) + +func TestIsFluidNativeScheme(t *testing.T) { + + var tests = []struct { + mountPoint string + expect bool + }{ + {"local:///test", + true}, + { + "pvc://test", + true, + }, { + "oss://test", + false, + }, + } + for _, test := range tests { + result := common.IsFluidNativeScheme(test.mountPoint) + if result != test.expect { + t.Errorf("expect %v for %s, but got %v", test.expect, test.mountPoint, result) + } + } +} + +func TestMountRootWithEnvSet(t *testing.T) { + var testCases = []struct { + input string + expected string + }{ + {"/var/lib/mymount", "/var/lib/mymount/jindo"}, + } + for _, tc := range testCases { + os.Setenv(utils.MountRoot, tc.input) + if tc.expected != getMountRoot() { + t.Errorf("expected %#v, got %#v", + tc.expected, getMountRoot()) + } + } +} + +func TestMountRootWithoutEnvSet(t *testing.T) { + var testCases = []struct { + input string + expected string + }{ + {"/var/lib/mymount", "/jindo"}, + } + + for _, tc := range testCases { + os.Unsetenv(utils.MountRoot) + if tc.expected != getMountRoot() { + t.Errorf("expected %#v, got %#v", + tc.expected, getMountRoot()) + } + } +} + +func TestJindoFSEngine_getHostMountPoint(t *testing.T) { + type fields struct { + name string + namespace string + Log logr.Logger + MountRoot string + } + var tests = []struct { + name string + fields fields + wantMountPath string + }{ + { + name: "test", + fields: fields{ + name: "jindofs", + namespace: "default", + Log: fake.NullLogger(), + MountRoot: "/tmp", + }, + wantMountPath: "/tmp/jindo/default/jindofs", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + j := &JindoFSxEngine{ + name: tt.fields.name, + namespace: tt.fields.namespace, + Log: tt.fields.Log, + } + os.Setenv("MOUNT_ROOT", tt.fields.MountRoot) + if gotMountPath := j.getHostMountPoint(); gotMountPath != tt.wantMountPath { + t.Errorf("getHostMountPoint() = %v, want %v", gotMountPath, tt.wantMountPath) + } + }) + } +} diff --git a/pkg/ddc/jindofsx/worker.go b/pkg/ddc/jindofsx/worker.go new file mode 100644 index 00000000000..db7cea5b8b7 --- /dev/null +++ b/pkg/ddc/jindofsx/worker.go @@ -0,0 +1,236 @@ +package jindofsx + +import ( + "context" + + "github.com/fluid-cloudnative/fluid/pkg/common" + "github.com/fluid-cloudnative/fluid/pkg/ctrl" + fluiderrs "github.com/fluid-cloudnative/fluid/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/utils" + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/util/retry" +) + +// SetupWorkers checks the desired and current replicas of workers and makes an update +// over the status by setting phases and conditions. The function +// calls for a status update and finally returns error if anything unexpected happens. +func (e *JindoFSxEngine) SetupWorkers() (err error) { + + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + workers, err := ctrl.GetWorkersAsStatefulset(e.Client, + types.NamespacedName{Namespace: e.namespace, Name: e.getWorkerName()}) + if err != nil { + if fluiderrs.IsDeprecated(err) { + e.Log.Info("Warning: Deprecated mode is not support, so skip handling", "details", err) + return nil + } + return err + } + + runtime, err := e.getRuntime() + if err != nil { + return err + } + runtimeToUpdate := runtime.DeepCopy() + return e.Helper.SetupWorkers(runtimeToUpdate, runtimeToUpdate.Status, workers) + }) + + if err != nil { + _ = utils.LoggingErrorExceptConflict(e.Log, + err, + "Failed to setup worker", + types.NamespacedName{ + Namespace: e.namespace, + Name: e.name, + }) + } + return +} + +// ShouldSetupWorkers checks if we need setup the workers +func (e *JindoFSxEngine) ShouldSetupWorkers() (should bool, err error) { + runtime, err := e.getRuntime() + if err != nil { + return + } + + switch runtime.Status.WorkerPhase { + case datav1alpha1.RuntimePhaseNone: + should = true + default: + should = false + } + + return +} + +// CheckWorkersReady checks if the workers are ready +func (e *JindoFSxEngine) CheckWorkersReady() (ready bool, err error) { + + workers, err := ctrl.GetWorkersAsStatefulset(e.Client, + types.NamespacedName{Namespace: e.namespace, Name: e.getWorkerName()}) + if err != nil { + if fluiderrs.IsDeprecated(err) { + e.Log.Info("Warning: Deprecated mode is not support, so skip handling", "details", err) + ready = true + return ready, nil + } + return ready, err + } + + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + runtime, err := e.getRuntime() + if err != nil { + return err + } + runtimeToUpdate := runtime.DeepCopy() + ready, err = e.Helper.CheckWorkersReady(runtimeToUpdate, runtimeToUpdate.Status, workers) + if err != nil { + _ = utils.LoggingErrorExceptConflict(e.Log, + err, + "Failed to setup worker", + types.NamespacedName{ + Namespace: e.namespace, + Name: e.name, + }) + } + return err + }) + + return +} + +// getWorkerSelectors gets the selector of the worker +func (e *JindoFSxEngine) getWorkerSelectors() string { + labels := map[string]string{ + "release": e.name, + POD_ROLE_TYPE: WOKRER_POD_ROLE, + "app": common.JindoRuntime, + } + labelSelector := &metav1.LabelSelector{ + MatchLabels: labels, + } + + selectorValue := "" + selector, err := metav1.LabelSelectorAsSelector(labelSelector) + if err != nil { + e.Log.Error(err, "Failed to parse the labelSelector of the runtime", "labels", labels) + } else { + selectorValue = selector.String() + } + return selectorValue +} + +// buildWorkersAffinity builds workers affinity if it doesn't have +func (e *JindoFSxEngine) buildWorkersAffinity(workers *v1.StatefulSet) (workersToUpdate *v1.StatefulSet, err error) { + // TODO: for now, runtime affinity can't be set by user, so we can assume the affinity is nil in the first time. + // We need to enhance it in future + workersToUpdate = workers.DeepCopy() + + if workersToUpdate.Spec.Template.Spec.Affinity == nil { + workersToUpdate.Spec.Template.Spec.Affinity = &corev1.Affinity{} + dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) + if err != nil { + return workersToUpdate, err + } + // 1. Set pod anti affinity(required) for same dataset (Current using port conflict for scheduling, no need to do) + + // 2. Set pod anti affinity for the different dataset + if dataset.IsExclusiveMode() { + workersToUpdate.Spec.Template.Spec.Affinity.PodAntiAffinity = &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "fluid.io/dataset", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + } + } else { + workersToUpdate.Spec.Template.Spec.Affinity.PodAntiAffinity = &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + { + // The default weight is 50 + Weight: 50, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "fluid.io/dataset", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "fluid.io/dataset-placement", + Operator: metav1.LabelSelectorOpIn, + Values: []string{string(datav1alpha1.ExclusiveMode)}, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + } + } + + // 3. Prefer to locate on the node which already has fuse on it + if workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity == nil { + workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity = &corev1.NodeAffinity{} + } + + if len(workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution) == 0 { + workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []corev1.PreferredSchedulingTerm{} + } + + workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = + append(workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, + corev1.PreferredSchedulingTerm{ + Weight: 100, + Preference: corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: e.getFuseLabelname(), + Operator: corev1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + }, + }, + }) + + // 3. set node affinity if possible + if dataset.Spec.NodeAffinity != nil { + if dataset.Spec.NodeAffinity.Required != nil { + workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = + dataset.Spec.NodeAffinity.Required + } + } + + err = e.Client.Update(context.TODO(), workersToUpdate) + if err != nil { + return workersToUpdate, err + } + + } + + return +} diff --git a/pkg/ddc/jindofsx/worker_test.go b/pkg/ddc/jindofsx/worker_test.go new file mode 100644 index 00000000000..7e0a4ba13ca --- /dev/null +++ b/pkg/ddc/jindofsx/worker_test.go @@ -0,0 +1,839 @@ +/* +Copyright 2022 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindofsx + +import ( + "reflect" + "testing" + + datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" + "github.com/fluid-cloudnative/fluid/pkg/ddc/base" + appsv1 "k8s.io/api/apps/v1" + + "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilpointer "k8s.io/utils/pointer" + ctrl "sigs.k8s.io/controller-runtime" + + ctrlhelper "github.com/fluid-cloudnative/fluid/pkg/ctrl" +) + +func TestSetupWorkers(t *testing.T) { + + // runtimeInfoSpark tests create worker in exclusive mode. + + runtimeInfoSpark, err := base.BuildRuntimeInfo("spark", "big-data", "jindo", datav1alpha1.TieredStore{}) + + if err != nil { + t.Errorf("fail to create the runtimeInfo with error %v", err) + } + runtimeInfoSpark.SetupWithDataset(&datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{PlacementMode: datav1alpha1.ExclusiveMode}, + }) + + // runtimeInfoSpark tests create worker in shareMode mode. + runtimeInfoHadoop, err := base.BuildRuntimeInfo("hadoop", "big-data", "jindo", datav1alpha1.TieredStore{}) + if err != nil { + t.Errorf("fail to create the runtimeInfo with error %v", err) + } + runtimeInfoHadoop.SetupWithDataset(&datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{PlacementMode: datav1alpha1.ShareMode}, + }) + nodeSelector := map[string]string{ + "node-select": "true", + } + runtimeInfoHadoop.SetupFuseDeployMode(true, nodeSelector) + + type fields struct { + replicas int32 + nodeInputs []*v1.Node + worker *appsv1.StatefulSet + deprecatedWorker *appsv1.DaemonSet + runtime *datav1alpha1.JindoRuntime + runtimeInfo base.RuntimeInfoInterface + name string + namespace string + deprecated bool + } + tests := []struct { + name string + fields fields + wantedNodeLabels map[string]map[string]string + }{ + { + name: "test0", + fields: fields{ + replicas: 1, + nodeInputs: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node-spark", + }, + }, + }, + worker: &appsv1.StatefulSet{ + + ObjectMeta: metav1.ObjectMeta{ + Name: "spark-jindofs-worker", + Namespace: "big-data", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: utilpointer.Int32Ptr(1), + }, + }, + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "spark", + Namespace: "big-data", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 1, + }, + }, + runtimeInfo: runtimeInfoSpark, + name: "spark", + namespace: "big-data", + }, + wantedNodeLabels: map[string]map[string]string{ + "test-node-spark": { + "fluid.io/dataset-num": "1", + "fluid.io/s-jindo-big-data-spark": "true", + "fluid.io/s-big-data-spark": "true", + "fluid.io/s-h-jindo-t-big-data-spark": "0B", + "fluid_exclusive": "big-data_spark", + }, + }, + }, + { + name: "test1", + fields: fields{ + replicas: 1, + worker: &appsv1.StatefulSet{ + + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop-jindofs-worker", + Namespace: "big-data", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: utilpointer.Int32Ptr(1), + }, + }, + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop", + Namespace: "big-data", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 1, + }, + }, + runtimeInfo: runtimeInfoHadoop, + name: "hadoop", + namespace: "big-data", + }, + wantedNodeLabels: map[string]map[string]string{ + "test-node-hadoop": { + "fluid.io/dataset-num": "1", + "fluid.io/s-jindo-big-data-hadoop": "true", + "fluid.io/s-big-data-hadoop": "true", + "fluid.io/s-h-jindo-t-big-data-hadoop": "0B", + }, + }, + }, { + name: "deprecated", + fields: fields{ + replicas: 0, + worker: &appsv1.StatefulSet{}, + deprecatedWorker: &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{ + Name: "deprecated-jindofs-worker", + Namespace: "big-data", + }}, + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deprecated", + Namespace: "big-data", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 1, + }, + }, + runtimeInfo: runtimeInfoHadoop, + name: "deprecated", + namespace: "big-data", + deprecated: true, + }, + wantedNodeLabels: map[string]map[string]string{ + "test-node-hadoop": { + "fluid.io/dataset-num": "1", + "fluid.io/s-jindo-big-data-hadoop": "true", + "fluid.io/s-big-data-hadoop": "true", + "fluid.io/s-h-jindo-t-big-data-hadoop": "0B", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + runtimeObjs := []runtime.Object{} + for _, nodeInput := range tt.fields.nodeInputs { + runtimeObjs = append(runtimeObjs, nodeInput.DeepCopy()) + } + runtimeObjs = append(runtimeObjs, tt.fields.worker.DeepCopy()) + + s := runtime.NewScheme() + data := &datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.fields.name, + Namespace: tt.fields.namespace, + }, + } + s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime) + s.AddKnownTypes(datav1alpha1.GroupVersion, data) + s.AddKnownTypes(appsv1.SchemeGroupVersion, tt.fields.worker) + if tt.fields.deprecatedWorker != nil { + s.AddKnownTypes(appsv1.SchemeGroupVersion, tt.fields.deprecatedWorker) + } + _ = v1.AddToScheme(s) + runtimeObjs = append(runtimeObjs, tt.fields.runtime) + if tt.fields.deprecatedWorker != nil { + runtimeObjs = append(runtimeObjs, tt.fields.deprecatedWorker) + } + runtimeObjs = append(runtimeObjs, data) + mockClient := fake.NewFakeClientWithScheme(s, runtimeObjs...) + + e := &JindoFSxEngine{ + runtime: tt.fields.runtime, + runtimeInfo: tt.fields.runtimeInfo, + Client: mockClient, + name: tt.fields.name, + namespace: tt.fields.namespace, + Log: ctrl.Log.WithName(tt.fields.name), + } + + e.Helper = ctrlhelper.BuildHelper(tt.fields.runtimeInfo, mockClient, e.Log) + err := e.SetupWorkers() + if err != nil { + t.Errorf("testCase %s JindoFSxEngine.SetupWorkers() error = %v", tt.name, err) + } + + if !tt.fields.deprecated { + if tt.fields.replicas != *tt.fields.worker.Spec.Replicas { + t.Errorf("Failed to scale %v for %v", tt.name, tt.fields) + } + } + + // for _, node := range tt.fields.nodeInputs { + // newNode, err := kubeclient.GetNode(mockClient, node.Name) + // if err != nil { + // t.Errorf("fail to get the node with the error %v", err) + // } + + // if len(newNode.Labels) != len(tt.wantedNodeLabels[node.Name]) { + // t.Errorf("fail to decrease the labels, newNode labels is %v", newNode.Labels) + // } + // if len(newNode.Labels) != 0 && !reflect.DeepEqual(newNode.Labels, tt.wantedNodeLabels[node.Name]) { + // t.Errorf("fail to decrease the labels, newNode labels is %v", newNode.Labels) + // } + // } + }) + } +} + +func TestShouldSetupWorkers(t *testing.T) { + type fields struct { + name string + namespace string + runtime *datav1alpha1.JindoRuntime + } + tests := []struct { + name string + fields fields + wantShould bool + wantErr bool + }{ + { + name: "test0", + fields: fields{ + name: "spark", + namespace: "big-data", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "spark", + Namespace: "big-data", + }, + Status: datav1alpha1.RuntimeStatus{ + WorkerPhase: datav1alpha1.RuntimePhaseNone, + }, + }, + }, + wantShould: true, + wantErr: false, + }, + { + name: "test1", + fields: fields{ + name: "hadoop", + namespace: "big-data", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hadoop", + Namespace: "big-data", + }, + Status: datav1alpha1.RuntimeStatus{ + WorkerPhase: datav1alpha1.RuntimePhaseNotReady, + }, + }, + }, + wantShould: false, + wantErr: false, + }, + { + name: "test2", + fields: fields{ + name: "hbase", + namespace: "big-data", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "big-data", + }, + Status: datav1alpha1.RuntimeStatus{ + WorkerPhase: datav1alpha1.RuntimePhasePartialReady, + }, + }, + }, + wantShould: false, + wantErr: false, + }, + { + name: "test3", + fields: fields{ + name: "tensorflow", + namespace: "ml", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tensorflow", + Namespace: "ml", + }, + Status: datav1alpha1.RuntimeStatus{ + WorkerPhase: datav1alpha1.RuntimePhaseReady, + }, + }, + }, + wantShould: false, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + runtimeObjs := []runtime.Object{} + data := &datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.fields.name, + Namespace: tt.fields.namespace, + }, + } + + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime) + s.AddKnownTypes(datav1alpha1.GroupVersion, data) + _ = v1.AddToScheme(s) + runtimeObjs = append(runtimeObjs, tt.fields.runtime, data) + mockClient := fake.NewFakeClientWithScheme(s, runtimeObjs...) + e := &JindoFSxEngine{ + name: tt.fields.name, + namespace: tt.fields.namespace, + runtime: tt.fields.runtime, + Client: mockClient, + } + + gotShould, err := e.ShouldSetupWorkers() + if (err != nil) != tt.wantErr { + t.Errorf("JindoFSxEngine.ShouldSetupWorkers() error = %v, wantErr %v", err, tt.wantErr) + return + } + if gotShould != tt.wantShould { + t.Errorf("JindoFSxEngine.ShouldSetupWorkers() = %v, want %v", gotShould, tt.wantShould) + } + }) + } +} + +func TestCheckWorkersReady(t *testing.T) { + type fields struct { + runtime *datav1alpha1.JindoRuntime + worker *appsv1.StatefulSet + fuse *appsv1.DaemonSet + name string + namespace string + } + tests := []struct { + name string + fields fields + wantReady bool + wantErr bool + }{ + { + name: "test0", + fields: fields{ + name: "spark", + namespace: "big-data", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "spark", + Namespace: "big-data", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 1, + Fuse: datav1alpha1.JindoFuseSpec{ + Global: true, + }, + }, + }, + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "spark-jindofs-worker", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + }, + }, + fuse: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "spark-jindofs-fuse", + Namespace: "big-data", + }, + Status: appsv1.DaemonSetStatus{ + NumberAvailable: 1, + DesiredNumberScheduled: 1, + CurrentNumberScheduled: 1, + }, + }, + }, + wantReady: true, + wantErr: false, + }, + { + name: "test1", + fields: fields{ + name: "hbase", + namespace: "big-data", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase", + Namespace: "big-data", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 1, + Fuse: datav1alpha1.JindoFuseSpec{ + Global: true, + }, + }, + }, + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-jindofs-worker", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 0, + }, + }, + fuse: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hbase-jindofs-fuse", + Namespace: "big-data", + }, + Status: appsv1.DaemonSetStatus{ + NumberAvailable: 0, + DesiredNumberScheduled: 1, + CurrentNumberScheduled: 0, + }, + }, + }, + wantReady: false, + wantErr: false, + }, { + name: "deprecated", + fields: fields{ + name: "deprecated", + namespace: "big-data", + runtime: &datav1alpha1.JindoRuntime{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deprecated", + Namespace: "big-data", + }, + Spec: datav1alpha1.JindoRuntimeSpec{ + Replicas: 1, + Fuse: datav1alpha1.JindoFuseSpec{ + Global: true, + }, + }, + }, + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deprecated-jindofs-worker-0", + Namespace: "big-data", + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 0, + }, + }, + fuse: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deprecated-jindofs-worker", + Namespace: "big-data", + }, + Status: appsv1.DaemonSetStatus{ + NumberAvailable: 0, + DesiredNumberScheduled: 1, + CurrentNumberScheduled: 0, + }, + }, + }, + wantReady: true, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + runtimeObjs := []runtime.Object{} + data := &datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.fields.name, + Namespace: tt.fields.namespace, + }, + } + + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime) + s.AddKnownTypes(datav1alpha1.GroupVersion, data) + s.AddKnownTypes(appsv1.SchemeGroupVersion, tt.fields.worker) + s.AddKnownTypes(appsv1.SchemeGroupVersion, tt.fields.fuse) + _ = v1.AddToScheme(s) + + runtimeObjs = append(runtimeObjs, tt.fields.runtime, data, tt.fields.worker, tt.fields.fuse) + mockClient := fake.NewFakeClientWithScheme(s, runtimeObjs...) + e := &JindoFSxEngine{ + runtime: tt.fields.runtime, + name: tt.fields.name, + namespace: tt.fields.namespace, + Client: mockClient, + Log: ctrl.Log.WithName(tt.fields.name), + } + + runtimeInfo, err := base.BuildRuntimeInfo(tt.fields.name, tt.fields.namespace, "jindo", datav1alpha1.TieredStore{}) + if err != nil { + t.Errorf("JindoFSxEngine.CheckWorkersReady() error = %v", err) + } + + e.Helper = ctrlhelper.BuildHelper(runtimeInfo, mockClient, e.Log) + + gotReady, err := e.CheckWorkersReady() + if (err != nil) != tt.wantErr { + t.Errorf("JindoFSxEngine.CheckWorkersReady() error = %v, wantErr %v", err, tt.wantErr) + return + } + if gotReady != tt.wantReady { + t.Errorf("JindoFSxEngine.CheckWorkersReady() = %v, want %v", gotReady, tt.wantReady) + } + }) + } +} + +func TestGetWorkerSelectors(t *testing.T) { + type fields struct { + name string + } + tests := []struct { + name string + fields fields + want string + }{ + { + name: "test0", + fields: fields{ + name: "spark", + }, + want: "app=jindo,release=spark,role=jindo-worker", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &JindoFSxEngine{ + name: tt.fields.name, + } + if got := e.getWorkerSelectors(); got != tt.want { + t.Errorf("JindoFSxEngine.getWorkerSelectors() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestBuildWorkersAffinity(t *testing.T) { + type fields struct { + dataset *datav1alpha1.Dataset + worker *appsv1.StatefulSet + want *v1.Affinity + } + tests := []struct { + name string + fields fields + want *v1.Affinity + }{ + {name: "exlusive", + fields: fields{ + dataset: &datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test1", + Namespace: "big-data", + }, + Spec: datav1alpha1.DatasetSpec{ + PlacementMode: datav1alpha1.ExclusiveMode, + }, + }, + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test1-jindofs-worker", + Namespace: "big-data", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: utilpointer.Int32Ptr(1), + }, + }, + want: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "fluid.io/dataset", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + NodeAffinity: &v1.NodeAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{ + { + Weight: 100, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "fluid.io/f-big-data-test1", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, {name: "shared", + fields: fields{ + dataset: &datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test2", + Namespace: "big-data", + }, + Spec: datav1alpha1.DatasetSpec{ + PlacementMode: datav1alpha1.ShareMode, + }, + }, + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test2-jindofs-worker", + Namespace: "big-data", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: utilpointer.Int32Ptr(1), + }, + }, + want: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ + { + // The default weight is 50 + Weight: 50, + PodAffinityTerm: v1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "fluid.io/dataset", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "fluid.io/dataset-placement", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"Exclusive"}, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + NodeAffinity: &v1.NodeAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{ + { + Weight: 100, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "fluid.io/f-big-data-test2", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, {name: "dataset-with-affinity", + fields: fields{ + dataset: &datav1alpha1.Dataset{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test3", + Namespace: "big-data", + }, + Spec: datav1alpha1.DatasetSpec{ + NodeAffinity: &datav1alpha1.CacheableNodeAffinity{ + Required: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "nodeA", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + }, + }, + }, + }, + }, + }, + }, + worker: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test3-jindofs-worker", + Namespace: "big-data", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: utilpointer.Int32Ptr(1), + }, + }, + want: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "fluid.io/dataset", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "nodeA", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + }, + }, + }, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{ + { + Weight: 100, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "fluid.io/f-big-data-test3", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.dataset) + s.AddKnownTypes(appsv1.SchemeGroupVersion, tt.fields.worker) + _ = v1.AddToScheme(s) + runtimeObjs := []runtime.Object{} + runtimeObjs = append(runtimeObjs, tt.fields.dataset) + runtimeObjs = append(runtimeObjs, tt.fields.worker) + mockClient := fake.NewFakeClientWithScheme(s, runtimeObjs...) + e := &JindoFSxEngine{ + name: tt.fields.dataset.Name, + namespace: tt.fields.dataset.Namespace, + Client: mockClient, + } + + want := tt.fields.want + worker, err := e.buildWorkersAffinity(tt.fields.worker) + if err != nil { + t.Errorf("JindoFSxEngine.buildWorkersAffinity() = %v", err) + } + + if !reflect.DeepEqual(worker.Spec.Template.Spec.Affinity, want) { + t.Errorf("Test case %s JindoFSxEngine.buildWorkersAffinity() = %v, want %v", tt.name, worker.Spec.Template.Spec.Affinity, tt.fields.want) + } + }) + } +} From 8be78893ff45eb243362f15fbc57126825bb8aab Mon Sep 17 00:00:00 2001 From: Weiwei Date: Mon, 25 Apr 2022 20:16:53 +0800 Subject: [PATCH 02/10] add configs in juicefsruntime (#1804) Signed-off-by: zwwhdls --- api/v1alpha1/juicefsruntime_types.go | 3 + api/v1alpha1/openapi_generated.go | 15 ++++ api/v1alpha1/zz_generated.deepcopy.go | 9 ++ .../crds/data.fluid.io_alluxioruntimes.yaml | 31 ++++--- .../fluid/crds/data.fluid.io_databackups.yaml | 3 +- .../fluid/crds/data.fluid.io_dataloads.yaml | 3 +- .../fluid/crds/data.fluid.io_datasets.yaml | 3 +- .../crds/data.fluid.io_goosefsruntimes.yaml | 31 ++++--- .../crds/data.fluid.io_jindoruntimes.yaml | 15 ++-- .../crds/data.fluid.io_juicefsruntimes.yaml | 88 ++++++++++--------- .../bases/data.fluid.io_alluxioruntimes.yaml | 31 ++++--- .../crd/bases/data.fluid.io_databackups.yaml | 3 +- config/crd/bases/data.fluid.io_dataloads.yaml | 3 +- config/crd/bases/data.fluid.io_datasets.yaml | 3 +- .../bases/data.fluid.io_goosefsruntimes.yaml | 31 ++++--- .../bases/data.fluid.io_jindoruntimes.yaml | 15 ++-- .../bases/data.fluid.io_juicefsruntimes.yaml | 88 ++++++++++--------- config/rbac/role.yaml | 1 - config/webhook/manifests.yaml | 1 - pkg/ddc/juicefs/transform_fuse.go | 12 ++- pkg/ddc/juicefs/transform_fuse_test.go | 8 +- 21 files changed, 219 insertions(+), 178 deletions(-) diff --git a/api/v1alpha1/juicefsruntime_types.go b/api/v1alpha1/juicefsruntime_types.go index 042a771acc1..6649d8fada4 100644 --- a/api/v1alpha1/juicefsruntime_types.go +++ b/api/v1alpha1/juicefsruntime_types.go @@ -50,6 +50,9 @@ type JuiceFSRuntimeSpec struct { // Tiered storage used by JuiceFS TieredStore TieredStore `json:"tieredstore,omitempty"` + // Configs of JuiceFS + Configs *[]string `json:"configs,omitempty"` + // The replicas of the worker, need to be specified Replicas int32 `json:"replicas,omitempty"` diff --git a/api/v1alpha1/openapi_generated.go b/api/v1alpha1/openapi_generated.go index 0ae047fd6aa..cd9e706ab57 100644 --- a/api/v1alpha1/openapi_generated.go +++ b/api/v1alpha1/openapi_generated.go @@ -2989,6 +2989,21 @@ func schema_fluid_cloudnative_fluid_api_v1alpha1_JuiceFSRuntimeSpec(ref common.R Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.TieredStore"), }, }, + "configs": { + SchemaProps: spec.SchemaProps{ + Description: "Configs of JuiceFS", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, "replicas": { SchemaProps: spec.SchemaProps{ Description: "The replicas of the worker, need to be specified", diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index dcff0c8b3a9..1f6f863f7d9 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1341,6 +1341,15 @@ func (in *JuiceFSRuntimeSpec) DeepCopyInto(out *JuiceFSRuntimeSpec) { in.JobWorker.DeepCopyInto(&out.JobWorker) in.Fuse.DeepCopyInto(&out.Fuse) in.TieredStore.DeepCopyInto(&out.TieredStore) + if in.Configs != nil { + in, out := &in.Configs, &out.Configs + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } if in.RunAs != nil { in, out := &in.RunAs, &out.RunAs *out = new(User) diff --git a/charts/fluid/fluid/crds/data.fluid.io_alluxioruntimes.yaml b/charts/fluid/fluid/crds/data.fluid.io_alluxioruntimes.yaml index d36776efd0e..939d8c0cec9 100644 --- a/charts/fluid/fluid/crds/data.fluid.io_alluxioruntimes.yaml +++ b/charts/fluid/fluid/crds/data.fluid.io_alluxioruntimes.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: alluxioruntimes.data.fluid.io spec: @@ -153,7 +152,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -165,7 +164,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -254,7 +253,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -266,7 +265,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -313,7 +312,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -325,7 +324,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -386,7 +385,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -398,7 +397,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -459,7 +458,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -471,7 +470,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -537,7 +536,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -549,7 +548,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -701,7 +700,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -713,7 +712,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object diff --git a/charts/fluid/fluid/crds/data.fluid.io_databackups.yaml b/charts/fluid/fluid/crds/data.fluid.io_databackups.yaml index a6df249afbd..1733aaf266c 100644 --- a/charts/fluid/fluid/crds/data.fluid.io_databackups.yaml +++ b/charts/fluid/fluid/crds/data.fluid.io_databackups.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: databackups.data.fluid.io spec: diff --git a/charts/fluid/fluid/crds/data.fluid.io_dataloads.yaml b/charts/fluid/fluid/crds/data.fluid.io_dataloads.yaml index 29e20e0e8df..a0dd18869dc 100644 --- a/charts/fluid/fluid/crds/data.fluid.io_dataloads.yaml +++ b/charts/fluid/fluid/crds/data.fluid.io_dataloads.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: dataloads.data.fluid.io spec: diff --git a/charts/fluid/fluid/crds/data.fluid.io_datasets.yaml b/charts/fluid/fluid/crds/data.fluid.io_datasets.yaml index e2d40e6fd40..1cc9f8afa3e 100644 --- a/charts/fluid/fluid/crds/data.fluid.io_datasets.yaml +++ b/charts/fluid/fluid/crds/data.fluid.io_datasets.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: datasets.data.fluid.io spec: diff --git a/charts/fluid/fluid/crds/data.fluid.io_goosefsruntimes.yaml b/charts/fluid/fluid/crds/data.fluid.io_goosefsruntimes.yaml index 4bd907f636b..fc3768ca047 100644 --- a/charts/fluid/fluid/crds/data.fluid.io_goosefsruntimes.yaml +++ b/charts/fluid/fluid/crds/data.fluid.io_goosefsruntimes.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: goosefsruntimes.data.fluid.io spec: @@ -146,7 +145,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -158,7 +157,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -255,7 +254,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -267,7 +266,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -329,7 +328,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -341,7 +340,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -410,7 +409,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -422,7 +421,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -491,7 +490,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -503,7 +502,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -577,7 +576,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -589,7 +588,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -751,7 +750,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -763,7 +762,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object diff --git a/charts/fluid/fluid/crds/data.fluid.io_jindoruntimes.yaml b/charts/fluid/fluid/crds/data.fluid.io_jindoruntimes.yaml index 6e40518a1e5..f6954f96037 100644 --- a/charts/fluid/fluid/crds/data.fluid.io_jindoruntimes.yaml +++ b/charts/fluid/fluid/crds/data.fluid.io_jindoruntimes.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: jindoruntimes.data.fluid.io spec: @@ -143,7 +142,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -155,7 +154,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object tolerations: @@ -284,7 +283,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -296,7 +295,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object tolerations: @@ -493,7 +492,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -505,7 +504,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object tolerations: diff --git a/charts/fluid/fluid/crds/data.fluid.io_juicefsruntimes.yaml b/charts/fluid/fluid/crds/data.fluid.io_juicefsruntimes.yaml index 6e0c99c86f3..097e60e3cfd 100644 --- a/charts/fluid/fluid/crds/data.fluid.io_juicefsruntimes.yaml +++ b/charts/fluid/fluid/crds/data.fluid.io_juicefsruntimes.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: juicefsruntimes.data.fluid.io spec: @@ -66,6 +65,11 @@ spec: spec: description: JuiceFSRuntimeSpec defines the desired state of JuiceFSRuntime properties: + configs: + description: Configs of JuiceFS + items: + type: string + type: array disablePrometheus: description: Disable monitoring for JuiceFS Runtime Prometheus is enabled by default @@ -93,13 +97,14 @@ spec: type: string value: description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' type: string valueFrom: description: Source for the environment variable's value. @@ -222,7 +227,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -234,7 +239,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -273,7 +278,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -285,7 +290,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -308,13 +313,14 @@ spec: type: string value: description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' type: string valueFrom: description: Source for the environment variable's value. @@ -464,7 +470,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -476,7 +482,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -514,13 +520,14 @@ spec: type: string value: description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' type: string valueFrom: description: Source for the environment variable's value. @@ -670,7 +677,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -682,7 +689,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -789,13 +796,14 @@ spec: type: string value: description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' type: string valueFrom: description: Source for the environment variable's value. @@ -945,7 +953,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -957,7 +965,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object diff --git a/config/crd/bases/data.fluid.io_alluxioruntimes.yaml b/config/crd/bases/data.fluid.io_alluxioruntimes.yaml index d36776efd0e..939d8c0cec9 100644 --- a/config/crd/bases/data.fluid.io_alluxioruntimes.yaml +++ b/config/crd/bases/data.fluid.io_alluxioruntimes.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: alluxioruntimes.data.fluid.io spec: @@ -153,7 +152,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -165,7 +164,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -254,7 +253,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -266,7 +265,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -313,7 +312,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -325,7 +324,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -386,7 +385,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -398,7 +397,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -459,7 +458,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -471,7 +470,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -537,7 +536,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -549,7 +548,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -701,7 +700,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -713,7 +712,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object diff --git a/config/crd/bases/data.fluid.io_databackups.yaml b/config/crd/bases/data.fluid.io_databackups.yaml index a6df249afbd..1733aaf266c 100644 --- a/config/crd/bases/data.fluid.io_databackups.yaml +++ b/config/crd/bases/data.fluid.io_databackups.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: databackups.data.fluid.io spec: diff --git a/config/crd/bases/data.fluid.io_dataloads.yaml b/config/crd/bases/data.fluid.io_dataloads.yaml index 29e20e0e8df..a0dd18869dc 100644 --- a/config/crd/bases/data.fluid.io_dataloads.yaml +++ b/config/crd/bases/data.fluid.io_dataloads.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: dataloads.data.fluid.io spec: diff --git a/config/crd/bases/data.fluid.io_datasets.yaml b/config/crd/bases/data.fluid.io_datasets.yaml index e2d40e6fd40..1cc9f8afa3e 100644 --- a/config/crd/bases/data.fluid.io_datasets.yaml +++ b/config/crd/bases/data.fluid.io_datasets.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: datasets.data.fluid.io spec: diff --git a/config/crd/bases/data.fluid.io_goosefsruntimes.yaml b/config/crd/bases/data.fluid.io_goosefsruntimes.yaml index 4bd907f636b..fc3768ca047 100644 --- a/config/crd/bases/data.fluid.io_goosefsruntimes.yaml +++ b/config/crd/bases/data.fluid.io_goosefsruntimes.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: goosefsruntimes.data.fluid.io spec: @@ -146,7 +145,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -158,7 +157,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -255,7 +254,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -267,7 +266,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -329,7 +328,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -341,7 +340,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -410,7 +409,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -422,7 +421,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -491,7 +490,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -503,7 +502,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -577,7 +576,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -589,7 +588,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -751,7 +750,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -763,7 +762,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object diff --git a/config/crd/bases/data.fluid.io_jindoruntimes.yaml b/config/crd/bases/data.fluid.io_jindoruntimes.yaml index 6e40518a1e5..f6954f96037 100644 --- a/config/crd/bases/data.fluid.io_jindoruntimes.yaml +++ b/config/crd/bases/data.fluid.io_jindoruntimes.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: jindoruntimes.data.fluid.io spec: @@ -143,7 +142,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -155,7 +154,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object tolerations: @@ -284,7 +283,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -296,7 +295,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object tolerations: @@ -493,7 +492,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -505,7 +504,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object tolerations: diff --git a/config/crd/bases/data.fluid.io_juicefsruntimes.yaml b/config/crd/bases/data.fluid.io_juicefsruntimes.yaml index 6e0c99c86f3..097e60e3cfd 100644 --- a/config/crd/bases/data.fluid.io_juicefsruntimes.yaml +++ b/config/crd/bases/data.fluid.io_juicefsruntimes.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: juicefsruntimes.data.fluid.io spec: @@ -66,6 +65,11 @@ spec: spec: description: JuiceFSRuntimeSpec defines the desired state of JuiceFSRuntime properties: + configs: + description: Configs of JuiceFS + items: + type: string + type: array disablePrometheus: description: Disable monitoring for JuiceFS Runtime Prometheus is enabled by default @@ -93,13 +97,14 @@ spec: type: string value: description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' type: string valueFrom: description: Source for the environment variable's value. @@ -222,7 +227,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -234,7 +239,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -273,7 +278,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -285,7 +290,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -308,13 +313,14 @@ spec: type: string value: description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' type: string valueFrom: description: Source for the environment variable's value. @@ -464,7 +470,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -476,7 +482,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -514,13 +520,14 @@ spec: type: string value: description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' type: string valueFrom: description: Source for the environment variable's value. @@ -670,7 +677,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -682,7 +689,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object @@ -789,13 +796,14 @@ spec: type: string value: description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' type: string valueFrom: description: Source for the environment variable's value. @@ -945,7 +953,7 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -957,7 +965,7 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object type: object diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 33417e95ec3..aa9071e3e40 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1,4 +1,3 @@ - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 34468a05599..d14d44b240f 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -1,4 +1,3 @@ - --- apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration diff --git a/pkg/ddc/juicefs/transform_fuse.go b/pkg/ddc/juicefs/transform_fuse.go index d96b2c0f943..33729e047f6 100644 --- a/pkg/ddc/juicefs/transform_fuse.go +++ b/pkg/ddc/juicefs/transform_fuse.go @@ -52,7 +52,7 @@ func (j *JuiceFSEngine) transformFuse(runtime *datav1alpha1.JuiceFSRuntime, data if err != nil { return err } - j.genFormatCmd(value) + j.genFormatCmd(value, runtime.Spec.Configs) err = j.genMount(value, option) if err != nil { return err @@ -196,8 +196,16 @@ func (j *JuiceFSEngine) genMount(value *JuiceFS, options []string) (err error) { return nil } -func (j *JuiceFSEngine) genFormatCmd(value *JuiceFS) { +func (j *JuiceFSEngine) genFormatCmd(value *JuiceFS, config *[]string) { args := make([]string, 0) + if config != nil { + for _, option := range *config { + o := strings.TrimSpace(option) + if o != "" { + args = append(args, fmt.Sprintf("--%s", o)) + } + } + } if value.Edition == "community" { // ce if value.Fuse.AccessKeySecret != "" { diff --git a/pkg/ddc/juicefs/transform_fuse_test.go b/pkg/ddc/juicefs/transform_fuse_test.go index ed1fc8000e5..f3a467be638 100644 --- a/pkg/ddc/juicefs/transform_fuse_test.go +++ b/pkg/ddc/juicefs/transform_fuse_test.go @@ -731,8 +731,12 @@ func TestJuiceFSEngine_genFormatCmd(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - j := &JuiceFSEngine{} - j.genFormatCmd(tt.args.value) + j := &JuiceFSEngine{ + runtime: &datav1alpha1.JuiceFSRuntime{ + Spec: datav1alpha1.JuiceFSRuntimeSpec{}, + }, + } + j.genFormatCmd(tt.args.value, j.runtime.Spec.Configs) if tt.args.value.Fuse.FormatCmd != tt.wantFormatCmd { t.Errorf("genMount() value = %v", tt.args.value) } From ce674d6d89bf433dc749148e70ef213cd326a2cf Mon Sep 17 00:00:00 2001 From: frankleaf <62129564+frankleaf@users.noreply.github.com> Date: Tue, 26 Apr 2022 09:54:02 +0800 Subject: [PATCH 03/10] fix dataload (#1806) Signed-off-by: frankleaf --- pkg/ddc/jindo/const.go | 2 +- pkg/ddc/jindofsx/const.go | 2 ++ pkg/ddc/jindofsx/load_data.go | 4 ++-- pkg/ddc/jindofsx/utils.go | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/ddc/jindo/const.go b/pkg/ddc/jindo/const.go index 6681c8488ed..7763adad034 100644 --- a/pkg/ddc/jindo/const.go +++ b/pkg/ddc/jindo/const.go @@ -50,5 +50,5 @@ const ( JINDO_FUSE_MONNTPATH = "/jfs/jindofs-fuse" - DEFAULT_JINDO_RUNTIME_IMAGE = "registry.cn-shanghai.aliyuncs.com/jindofs/smartdata:4.3.0" + DEFAULT_JINDO_RUNTIME_IMAGE = "registry.cn-shanghai.aliyuncs.com/jindofs/smartdata:3.8.0" ) diff --git a/pkg/ddc/jindofsx/const.go b/pkg/ddc/jindofsx/const.go index a8a2feb4080..b79115f893f 100644 --- a/pkg/ddc/jindofsx/const.go +++ b/pkg/ddc/jindofsx/const.go @@ -55,4 +55,6 @@ const ( JINDO_FUSE_MONNTPATH = "/jfs/jindofs-fuse" DEFAULT_JINDOFSX_RUNTIME_IMAGE = "registry.cn-shanghai.aliyuncs.com/jindofs/smartdata:4.3.0" + + ENGINE_TYPE = "jindofsx" ) diff --git a/pkg/ddc/jindofsx/load_data.go b/pkg/ddc/jindofsx/load_data.go index fbfb6d44f99..93b43e7e550 100644 --- a/pkg/ddc/jindofsx/load_data.go +++ b/pkg/ddc/jindofsx/load_data.go @@ -25,7 +25,7 @@ import ( datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" "github.com/fluid-cloudnative/fluid/pkg/common" cdataload "github.com/fluid-cloudnative/fluid/pkg/dataload" - "github.com/fluid-cloudnative/fluid/pkg/ddc/jindo/operations" + "github.com/fluid-cloudnative/fluid/pkg/ddc/jindofsx/operations" cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime" "github.com/fluid-cloudnative/fluid/pkg/utils" "github.com/fluid-cloudnative/fluid/pkg/utils/docker" @@ -56,7 +56,7 @@ func (e *JindoFSxEngine) CreateDataLoadJob(ctx cruntime.ReconcileRequestContext, log.Error(err, "failed to generate dataload chart's value file") return err } - chartName := utils.GetChartsDirectory() + "/" + cdataload.DATALOAD_CHART + "/" + common.JindoRuntime + chartName := utils.GetChartsDirectory() + "/" + cdataload.DATALOAD_CHART + "/" + ENGINE_TYPE err = helm.InstallRelease(releaseName, targetDataload.Namespace, valueFileName, chartName) if err != nil { log.Error(err, "failed to install dataload chart") diff --git a/pkg/ddc/jindofsx/utils.go b/pkg/ddc/jindofsx/utils.go index 9929a8d6e11..812e2195e37 100644 --- a/pkg/ddc/jindofsx/utils.go +++ b/pkg/ddc/jindofsx/utils.go @@ -23,7 +23,7 @@ import ( datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" "github.com/fluid-cloudnative/fluid/pkg/common" - "github.com/fluid-cloudnative/fluid/pkg/ddc/jindo/operations" + "github.com/fluid-cloudnative/fluid/pkg/ddc/jindofsx/operations" "github.com/fluid-cloudnative/fluid/pkg/utils" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/types" From 97f74b5f254443202b094851e12072ce31320804 Mon Sep 17 00:00:00 2001 From: cheyang Date: Tue, 26 Apr 2022 10:00:21 +0800 Subject: [PATCH 04/10] Refactor jindoruntime, To #40605168 (#1807) Signed-off-by: cheyang --- pkg/controllers/v1alpha1/jindo/implement.go | 11 ---- .../v1alpha1/jindo/jindoruntime_controller.go | 3 +- pkg/ddc/jindo/load_data.go | 5 +- pkg/utils/jindo/jindo.go | 50 +++++++++++++++++++ 4 files changed, 55 insertions(+), 14 deletions(-) create mode 100644 pkg/utils/jindo/jindo.go diff --git a/pkg/controllers/v1alpha1/jindo/implement.go b/pkg/controllers/v1alpha1/jindo/implement.go index d403d9aa173..1fa5d40e7d3 100644 --- a/pkg/controllers/v1alpha1/jindo/implement.go +++ b/pkg/controllers/v1alpha1/jindo/implement.go @@ -18,14 +18,12 @@ package jindo import ( datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime" - "os" "github.com/fluid-cloudnative/fluid/pkg/ddc" "github.com/fluid-cloudnative/fluid/pkg/ddc/base" ) const ( - engineType = "JINDO_ENGINE_TYPE" runtimeResourceFinalizerName = "jindo-runtime-controller-finalizer" ) @@ -67,12 +65,3 @@ func (r *RuntimeReconciler) RemoveEngine(ctx cruntime.ReconcileRequestContext) { id := ddc.GenerateEngineID(ctx.NamespacedName) delete(r.engines, id) } - -func (r *RuntimeReconciler) GetRuntimeType() string { - engine := "jindo" - if env := os.Getenv(engineType); env == "jindofsx" { - engine = env - } - r.Log.V(1).Info("Put Engine to engine map with engine type " + engine) - return engine -} diff --git a/pkg/controllers/v1alpha1/jindo/jindoruntime_controller.go b/pkg/controllers/v1alpha1/jindo/jindoruntime_controller.go index 76cb3380a40..039c812eef4 100644 --- a/pkg/controllers/v1alpha1/jindo/jindoruntime_controller.go +++ b/pkg/controllers/v1alpha1/jindo/jindoruntime_controller.go @@ -36,6 +36,7 @@ import ( "github.com/fluid-cloudnative/fluid/pkg/ddc/base" cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime" "github.com/fluid-cloudnative/fluid/pkg/utils" + jindoutils "github.com/fluid-cloudnative/fluid/pkg/utils/jindo" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -78,7 +79,7 @@ func (r *RuntimeReconciler) Reconcile(context context.Context, req ctrl.Request) NamespacedName: req.NamespacedName, Recorder: r.Recorder, Category: common.AccelerateCategory, - RuntimeType: r.GetRuntimeType(), + RuntimeType: jindoutils.GetRuntimeType(), Client: r.Client, FinalizerName: runtimeResourceFinalizerName, } diff --git a/pkg/ddc/jindo/load_data.go b/pkg/ddc/jindo/load_data.go index 7705382e9de..3d43f1aeea8 100644 --- a/pkg/ddc/jindo/load_data.go +++ b/pkg/ddc/jindo/load_data.go @@ -30,6 +30,7 @@ import ( "github.com/fluid-cloudnative/fluid/pkg/utils" "github.com/fluid-cloudnative/fluid/pkg/utils/docker" "github.com/fluid-cloudnative/fluid/pkg/utils/helm" + jindoutils "github.com/fluid-cloudnative/fluid/pkg/utils/jindo" "gopkg.in/yaml.v2" v1 "k8s.io/api/core/v1" ) @@ -79,7 +80,7 @@ func (e *JindoEngine) generateDataLoadValueFile(r cruntime.ReconcileRequestConte imageName, imageTag := docker.GetWorkerImage(r.Client, dataload.Spec.Dataset.Name, "jindo", dataload.Spec.Dataset.Namespace) if len(imageName) == 0 { - defaultImageInfo := strings.Split(DEFAULT_JINDO_RUNTIME_IMAGE, ":") + defaultImageInfo := strings.Split(jindoutils.GetRuntimeImage(), ":") if len(defaultImageInfo) < 1 { panic("invalid default dataload image!") } else { @@ -88,7 +89,7 @@ func (e *JindoEngine) generateDataLoadValueFile(r cruntime.ReconcileRequestConte } if len(imageTag) == 0 { - defaultImageInfo := strings.Split(DEFAULT_JINDO_RUNTIME_IMAGE, ":") + defaultImageInfo := strings.Split(jindoutils.GetRuntimeImage(), ":") if len(defaultImageInfo) < 2 { panic("invalid default dataload image!") } else { diff --git a/pkg/utils/jindo/jindo.go b/pkg/utils/jindo/jindo.go new file mode 100644 index 00000000000..4a981e63b7b --- /dev/null +++ b/pkg/utils/jindo/jindo.go @@ -0,0 +1,50 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jindo + +import "os" + +const ( + engineTypeFromEnv = "JINDO_ENGINE_TYPE" + + jindoEngine = "jindo" + + jindofsxEngine = "jindofsx" + + defaultJindofsxRuntimeImage = "registry.cn-shanghai.aliyuncs.com/jindofs/smartdata:4.3.0" + + defaultJindofsRuntimeImage = "registry.cn-shanghai.aliyuncs.com/jindofs/smartdata:3.8.0" +) + +// GetRuntimeType gets the runtime type for Jindo +func GetRuntimeType() (engine string) { + engine = jindoEngine + if env := os.Getenv(engineTypeFromEnv); env == jindofsxEngine { + engine = env + } + return +} + +// GetRuntimeImage gets the runtime of Jindo +func GetRuntimeImage() (image string) { + if GetRuntimeType() == jindofsxEngine { + image = defaultJindofsxRuntimeImage + } else { + image = defaultJindofsRuntimeImage + } + + return +} From e675bcb95de6a96c4a873bb2c049b768f5aa1ce2 Mon Sep 17 00:00:00 2001 From: cheyang Date: Tue, 26 Apr 2022 11:01:07 +0800 Subject: [PATCH 05/10] Refactor jindoruntime, To #40605168 (#1808) Signed-off-by: cheyang --- charts/fluid/fluid/Chart.yaml | 2 +- charts/fluid/fluid/values.yaml | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/charts/fluid/fluid/Chart.yaml b/charts/fluid/fluid/Chart.yaml index 124296e7629..7ccd83859bb 100644 --- a/charts/fluid/fluid/Chart.yaml +++ b/charts/fluid/fluid/Chart.yaml @@ -18,7 +18,7 @@ version: 0.8.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 0.8.0-8f58616 +appVersion: 0.8.0-97f74b5 home: https://github.com/fluid-cloudnative/fluid keywords: - category:data diff --git a/charts/fluid/fluid/values.yaml b/charts/fluid/fluid/values.yaml index 17490201e30..2b006d8e16f 100644 --- a/charts/fluid/fluid/values.yaml +++ b/charts/fluid/fluid/values.yaml @@ -6,7 +6,7 @@ workdir: /tmp dataset: controller: - image: fluidcloudnative/dataset-controller:v0.8.0-8f58616 + image: fluidcloudnative/dataset-controller:v0.8.0-97f74b5 csi: featureGates: "FuseRecovery=false" @@ -15,7 +15,7 @@ csi: registrar: image: registry.aliyuncs.com/acs/csi-node-driver-registrar:v1.2.0 plugins: - image: fluidcloudnative/fluid-csi:v0.8.0-8f58616 + image: fluidcloudnative/fluid-csi:v0.8.0-97f74b5 kubelet: rootDir: /var/lib/kubelet @@ -28,9 +28,9 @@ runtime: portRange: 20000-26000 enabled: true init: - image: fluidcloudnative/init-users:v0.8.0-8f58616 + image: fluidcloudnative/init-users:v0.8.0-97f74b5 controller: - image: fluidcloudnative/alluxioruntime-controller:v0.8.0-8f58616 + image: fluidcloudnative/alluxioruntime-controller:v0.8.0-97f74b5 runtime: image: registry.aliyuncs.com/alluxio/alluxio:release-2.7.2-SNAPSHOT-3714f2b fuse: @@ -44,19 +44,19 @@ runtime: fuse: image: registry.cn-shanghai.aliyuncs.com/jindofs/jindo-fuse:3.8.0 controller: - image: fluidcloudnative/jindoruntime-controller:v0.8.0-8f58616 + image: fluidcloudnative/jindoruntime-controller:v0.8.0-97f74b5 init: portCheck: enabled: false - image: fluidcloudnative/init-users:v0.8.0-8f58616 + image: fluidcloudnative/init-users:v0.8.0-97f74b5 goosefs: runtimeWorkers: 3 portRange: 26000-32000 enabled: false init: - image: fluidcloudnative/init-users:v0.8.0-8f58616 + image: fluidcloudnative/init-users:v0.8.0-97f74b5 controller: - image: fluidcloudnative/goosefsruntime-controller:v0.8.0-8f58616 + image: fluidcloudnative/goosefsruntime-controller:v0.8.0-97f74b5 runtime: image: ccr.ccs.tencentyun.com/qcloud/goosefs:v1.2.0 fuse: @@ -64,16 +64,16 @@ runtime: juicefs: enabled: false controller: - image: fluidcloudnative/juicefsruntime-controller:v0.8.0-8f58616 + image: fluidcloudnative/juicefsruntime-controller:v0.8.0-97f74b5 fuse: image: registry.cn-hangzhou.aliyuncs.com/juicefs/juicefs-fuse:v1.0.0-beta2 webhook: enabled: true - image: fluidcloudnative/fluid-webhook:v0.8.0-8f58616 + image: fluidcloudnative/fluid-webhook:v0.8.0-97f74b5 replicas: 1 fluidapp: enabled: true controller: - image: fluidcloudnative/application-controller:v0.8.0-8f58616 + image: fluidcloudnative/application-controller:v0.8.0-97f74b5 From 79d0b306f92a91bb076aa35511708acefad14358 Mon Sep 17 00:00:00 2001 From: Weiwei Date: Wed, 27 Apr 2022 10:25:27 +0800 Subject: [PATCH 06/10] separate worker option and fuse option (#1810) Signed-off-by: zwwhdls --- api/v1alpha1/juicefsruntime_types.go | 3 + api/v1alpha1/openapi_generated.go | 16 + api/v1alpha1/zz_generated.deepcopy.go | 7 + .../crds/data.fluid.io_juicefsruntimes.yaml | 15 + charts/juicefs/templates/fuse/daemonset.yaml | 20 +- .../templates/worker/statefuleset.yaml | 24 +- charts/juicefs/values.yaml | 12 +- .../bases/data.fluid.io_juicefsruntimes.yaml | 15 + pkg/ddc/juicefs/transform_fuse.go | 169 +++++----- pkg/ddc/juicefs/transform_fuse_test.go | 293 +++++++++--------- pkg/ddc/juicefs/type.go | 42 ++- 11 files changed, 359 insertions(+), 257 deletions(-) diff --git a/api/v1alpha1/juicefsruntime_types.go b/api/v1alpha1/juicefsruntime_types.go index 6649d8fada4..728d8920b1f 100644 --- a/api/v1alpha1/juicefsruntime_types.go +++ b/api/v1alpha1/juicefsruntime_types.go @@ -82,6 +82,9 @@ type JuiceFSCompTemplateSpec struct { // +optional Resources corev1.ResourceRequirements `json:"resources,omitempty"` + // Options + Options map[string]string `json:"options,omitempty"` + // Environment variables that will be used by JuiceFS component. Env []corev1.EnvVar `json:"env,omitempty"` diff --git a/api/v1alpha1/openapi_generated.go b/api/v1alpha1/openapi_generated.go index cd9e706ab57..7b9b735fbbb 100644 --- a/api/v1alpha1/openapi_generated.go +++ b/api/v1alpha1/openapi_generated.go @@ -2706,6 +2706,22 @@ func schema_fluid_cloudnative_fluid_api_v1alpha1_JuiceFSCompTemplateSpec(ref com Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), }, }, + "options": { + SchemaProps: spec.SchemaProps{ + Description: "Options", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, "env": { SchemaProps: spec.SchemaProps{ Description: "Environment variables that will be used by JuiceFS component.", diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 1f6f863f7d9..7e390754c2b 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1216,6 +1216,13 @@ func (in *JuiceFSCompTemplateSpec) DeepCopyInto(out *JuiceFSCompTemplateSpec) { copy(*out, *in) } in.Resources.DeepCopyInto(&out.Resources) + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.Env != nil { in, out := &in.Env, &out.Env *out = make([]v1.EnvVar, len(*in)) diff --git a/charts/fluid/fluid/crds/data.fluid.io_juicefsruntimes.yaml b/charts/fluid/fluid/crds/data.fluid.io_juicefsruntimes.yaml index 097e60e3cfd..71b63b8fda2 100644 --- a/charts/fluid/fluid/crds/data.fluid.io_juicefsruntimes.yaml +++ b/charts/fluid/fluid/crds/data.fluid.io_juicefsruntimes.yaml @@ -416,6 +416,11 @@ spec: type: string description: NodeSelector is a selector type: object + options: + additionalProperties: + type: string + description: Options + type: object ports: description: Ports used by JuiceFS items: @@ -623,6 +628,11 @@ spec: type: string description: NodeSelector is a selector type: object + options: + additionalProperties: + type: string + description: Options + type: object ports: description: Ports used by JuiceFS items: @@ -899,6 +909,11 @@ spec: type: string description: NodeSelector is a selector type: object + options: + additionalProperties: + type: string + description: Options + type: object ports: description: Ports used by JuiceFS items: diff --git a/charts/juicefs/templates/fuse/daemonset.yaml b/charts/juicefs/templates/fuse/daemonset.yaml index e947cc47d1b..e891102438c 100644 --- a/charts/juicefs/templates/fuse/daemonset.yaml +++ b/charts/juicefs/templates/fuse/daemonset.yaml @@ -64,32 +64,32 @@ spec: {{- if .Values.fuse.envs }} {{ toYaml .Values.fuse.envs | trim | indent 10 }} {{- end }} - {{- if .Values.fuse.metaurlSecret }} + {{- if .Values.configs.metaurlSecret }} - name: METAURL valueFrom: secretKeyRef: - name: {{ .Values.fuse.metaurlSecret }} + name: {{ .Values.configs.metaurlSecret }} key: metaurl {{- end }} - {{- if .Values.fuse.accesskeySecret }} + {{- if .Values.configs.accesskeySecret }} - name: ACCESS_KEY valueFrom: secretKeyRef: - name: {{ .Values.fuse.accesskeySecret }} + name: {{ .Values.configs.accesskeySecret }} key: access-key {{- end }} - {{- if .Values.fuse.secretkeySecret }} + {{- if .Values.configs.secretkeySecret }} - name: SECRET_KEY valueFrom: secretKeyRef: - name: {{ .Values.fuse.secretkeySecret }} + name: {{ .Values.configs.secretkeySecret }} key: secret-key {{- end }} - {{- if .Values.fuse.tokenSecret }} + {{- if .Values.configs.tokenSecret }} - name: TOKEN valueFrom: secretKeyRef: - name: {{ .Values.fuse.tokenSecret }} + name: {{ .Values.configs.tokenSecret }} key: token {{- end }} readinessProbe: @@ -154,8 +154,8 @@ data: script.sh: | #!/bin/bash - {{- if .Values.fuse.formatCmd }} - {{ .Values.fuse.formatCmd }} + {{- if .Values.configs.formatCmd }} + {{ .Values.configs.formatCmd }} {{- end }} {{ .Values.fuse.command }} diff --git a/charts/juicefs/templates/worker/statefuleset.yaml b/charts/juicefs/templates/worker/statefuleset.yaml index 4bb1a0709c0..1760309f762 100644 --- a/charts/juicefs/templates/worker/statefuleset.yaml +++ b/charts/juicefs/templates/worker/statefuleset.yaml @@ -71,32 +71,32 @@ spec: {{- if .Values.worker.envs }} {{ toYaml .Values.worker.envs | trim | indent 10 }} {{- end }} - {{- if .Values.fuse.metaurlSecret }} + {{- if .Values.configs.metaurlSecret }} - name: METAURL valueFrom: secretKeyRef: - name: {{ .Values.fuse.metaurlSecret }} + name: {{ .Values.configs.metaurlSecret }} key: metaurl {{- end }} - {{- if .Values.fuse.accesskeySecret }} + {{- if .Values.configs.accesskeySecret }} - name: ACCESS_KEY valueFrom: secretKeyRef: - name: {{ .Values.fuse.accesskeySecret }} + name: {{ .Values.configs.accesskeySecret }} key: access-key {{- end }} - {{- if .Values.fuse.secretkeySecret }} + {{- if .Values.configs.secretkeySecret }} - name: SECRET_KEY valueFrom: secretKeyRef: - name: {{ .Values.fuse.secretkeySecret }} + name: {{ .Values.configs.secretkeySecret }} key: secret-key {{- end }} - {{- if .Values.fuse.tokenSecret }} + {{- if .Values.configs.tokenSecret }} - name: TOKEN valueFrom: secretKeyRef: - name: {{ .Values.fuse.tokenSecret }} + name: {{ .Values.configs.tokenSecret }} key: token {{- end }} livenessProbe: @@ -104,7 +104,7 @@ spec: command: - sh - -c - - 'if [ x$({{ .Values.fuse.statCmd }}) = x1 ]; then exit 0; else exit 1; fi ' + - 'if [ x$({{ .Values.worker.statCmd }}) = x1 ]; then exit 0; else exit 1; fi ' failureThreshold: 3 initialDelaySeconds: 1 periodSeconds: 1 @@ -113,7 +113,7 @@ spec: lifecycle: preStop: exec: - command: ["sh", "-c", "umount {{ .Values.fuse.mountPath }}"] + command: ["sh", "-c", "umount {{ .Values.worker.mountPath }}"] volumeMounts: - mountPath: /root/script name: script @@ -148,8 +148,8 @@ data: script.sh: | #!/bin/bash - {{- if .Values.fuse.formatCmd }} - {{ .Values.fuse.formatCmd }} + {{- if .Values.configs.formatCmd }} + {{ .Values.configs.formatCmd }} {{- end }} {{ .Values.worker.command }} diff --git a/charts/juicefs/values.yaml b/charts/juicefs/values.yaml index a9cbea5a943..cbd395e57bd 100644 --- a/charts/juicefs/values.yaml +++ b/charts/juicefs/values.yaml @@ -31,6 +31,8 @@ worker: ports: [] envs: [] command: "" + mountPath: /mnt/jfs + statCmd: "stat -c %i /mnt/jfs" resources: requests: # cpu: "0.5" @@ -40,10 +42,7 @@ worker: # memory: "4G" replicaCount: 0 - -## FUSE ## - -fuse: +configs: name: "" accesskeySecret: "" secretkeySecret: "" @@ -52,6 +51,10 @@ fuse: storage: "" tokenSecret: "" formatCmd : "" + +## FUSE ## + +fuse: subPath: "" criticalPod: false enabled: true @@ -64,7 +67,6 @@ fuse: # Mount path in the host mountPath: /mnt/jfs cacheDir: "" - metaUrl : "" hostMountPath: /mnt/jfs command: "/usr/local/bin/juicefs mount /mnt/jfs" statCmd: "stat -c %i /mnt/jfs" diff --git a/config/crd/bases/data.fluid.io_juicefsruntimes.yaml b/config/crd/bases/data.fluid.io_juicefsruntimes.yaml index 097e60e3cfd..71b63b8fda2 100644 --- a/config/crd/bases/data.fluid.io_juicefsruntimes.yaml +++ b/config/crd/bases/data.fluid.io_juicefsruntimes.yaml @@ -416,6 +416,11 @@ spec: type: string description: NodeSelector is a selector type: object + options: + additionalProperties: + type: string + description: Options + type: object ports: description: Ports used by JuiceFS items: @@ -623,6 +628,11 @@ spec: type: string description: NodeSelector is a selector type: object + options: + additionalProperties: + type: string + description: Options + type: object ports: description: Ports used by JuiceFS items: @@ -899,6 +909,11 @@ spec: type: string description: NodeSelector is a selector type: object + options: + additionalProperties: + type: string + description: Options + type: object ports: description: Ports used by JuiceFS items: diff --git a/pkg/ddc/juicefs/transform_fuse.go b/pkg/ddc/juicefs/transform_fuse.go index 33729e047f6..688faf89e50 100644 --- a/pkg/ddc/juicefs/transform_fuse.go +++ b/pkg/ddc/juicefs/transform_fuse.go @@ -21,7 +21,6 @@ import ( "fmt" datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" "github.com/fluid-cloudnative/fluid/pkg/common" - "github.com/fluid-cloudnative/fluid/pkg/utils" "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient" "strings" ) @@ -34,7 +33,7 @@ func (j *JuiceFSEngine) transformFuse(runtime *datav1alpha1.JuiceFSRuntime, data } mount := dataset.Spec.Mounts[0] - value.Fuse.Name = mount.Name + value.Configs.Name = mount.Name image := runtime.Spec.Fuse.Image tag := runtime.Spec.Fuse.ImageTag @@ -53,7 +52,7 @@ func (j *JuiceFSEngine) transformFuse(runtime *datav1alpha1.JuiceFSRuntime, data return err } j.genFormatCmd(value, runtime.Spec.Configs) - err = j.genMount(value, option) + err = j.genMount(value, runtime, option) if err != nil { return err } @@ -73,30 +72,23 @@ func (j *JuiceFSEngine) transformFuse(runtime *datav1alpha1.JuiceFSRuntime, data return } -func (j *JuiceFSEngine) genValue(mount datav1alpha1.Mount, tiredStoreLevel *datav1alpha1.Level, value *JuiceFS) ([]string, error) { - value.Fuse.Name = mount.Name - opts := make(map[string]string) +func (j *JuiceFSEngine) genValue(mount datav1alpha1.Mount, tiredStoreLevel *datav1alpha1.Level, value *JuiceFS) (map[string]string, error) { + value.Configs.Name = mount.Name + options := make(map[string]string) source := "" value.Edition = "enterprise" for k, v := range mount.Options { switch k { case JuiceStorage: - value.Fuse.Storage = v + value.Configs.Storage = v continue case JuiceBucket: - value.Fuse.Bucket = v + value.Configs.Bucket = v continue default: - opts[k] = v + options[k] = v } } - options := []string{} - for k, v := range opts { - if v != "" { - k = fmt.Sprintf("%s=%s", k, v) - } - options = append(options, k) - } for _, encryptOption := range mount.EncryptOptions { key := encryptOption.Name secretKeyRef := encryptOption.ValueFrom.SecretKeyRef @@ -112,18 +104,18 @@ func (j *JuiceFSEngine) genValue(mount datav1alpha1.Mount, tiredStoreLevel *data switch key { case JuiceMetaUrl: source = "${METAURL}" - value.Fuse.MetaUrlSecret = secretKeyRef.Name + value.Configs.MetaUrlSecret = secretKeyRef.Name _, ok := secret.Data[secretKeyRef.Key] if !ok { return nil, fmt.Errorf("can't get metaurl from secret %s", secret.Name) } value.Edition = "community" case JuiceAccessKey: - value.Fuse.AccessKeySecret = secretKeyRef.Name + value.Configs.AccessKeySecret = secretKeyRef.Name case JuiceSecretKey: - value.Fuse.SecretKeySecret = secretKeyRef.Name + value.Configs.SecretKeySecret = secretKeyRef.Name case JuiceToken: - value.Fuse.TokenSecret = secretKeyRef.Name + value.Configs.TokenSecret = secretKeyRef.Name } } @@ -137,9 +129,12 @@ func (j *JuiceFSEngine) genValue(mount datav1alpha1.Mount, tiredStoreLevel *data return nil, err } value.Fuse.MountPath = j.getMountPoint() + value.Worker.MountPath = j.getMountPoint() value.Fuse.HostMountPath = j.getHostMountPoint() - value.Fuse.SubPath = subPath - options = append(options, fmt.Sprintf("subdir=%s", subPath)) + if subPath != "/" { + value.Fuse.SubPath = subPath + options["subdir"] = subPath + } var cacheDir = DefaultCacheDir if tiredStoreLevel != nil { @@ -148,13 +143,13 @@ func (j *JuiceFSEngine) genValue(mount datav1alpha1.Mount, tiredStoreLevel *data cacheDir = "memory" } if tiredStoreLevel.Quota != nil { - options = append(options, fmt.Sprintf("cache-size=%s", tiredStoreLevel.Quota.String())) + options["cache-size"] = tiredStoreLevel.Quota.String() } if tiredStoreLevel.Low != "" { - options = append(options, fmt.Sprintf("free-space-ratio=%s", tiredStoreLevel.Low)) + options["free-space-ratio"] = tiredStoreLevel.Low } } - options = append(options, fmt.Sprintf("cache-dir=%s", cacheDir)) + options["cache-dir"] = cacheDir if cacheDir != "memory" { value.Fuse.CacheDir = cacheDir } @@ -162,40 +157,68 @@ func (j *JuiceFSEngine) genValue(mount datav1alpha1.Mount, tiredStoreLevel *data return options, nil } -func (j *JuiceFSEngine) genMount(value *JuiceFS, options []string) (err error) { +func (j *JuiceFSEngine) genMount(value *JuiceFS, runtime *datav1alpha1.JuiceFSRuntime, optionMap map[string]string) (err error) { var mountArgs, mountArgsWorker []string - if options == nil { - options = []string{} + workerOptionMap := make(map[string]string) + if optionMap == nil { + optionMap = map[string]string{} + } + // gen worker option + for k, v := range optionMap { + workerOptionMap[k] = v + } + if runtime != nil { + // if runtime.worker option is set, take it + for k, v := range runtime.Spec.Worker.Options { + workerOptionMap[k] = v + } } if value.Edition == "community" { - if !utils.ContainsSubString(options, "metrics") { - options = append(options, "metrics=0.0.0.0:9567") + if _, ok := optionMap["metrics"]; !ok { + optionMap["metrics"] = "0.0.0.0:9567" } - mountArgs = []string{common.JuiceFSCeMountPath, value.Source, value.Fuse.MountPath, "-o", strings.Join(options, ",")} - mountArgsWorker = []string{common.JuiceFSCeMountPath, value.Source, value.Fuse.MountPath, "-o", strings.Join(options, ",")} - } else { - if !utils.ContainsString(options, "foreground") { - options = append(options, "foreground") + if _, ok := workerOptionMap["metrics"]; !ok { + workerOptionMap["metrics"] = "0.0.0.0:9567" } - fuseOption := make([]string, len(options)) - copy(fuseOption, options) - if !utils.ContainsSubString(options, "cache-group") { - // start independent cache cluster, refer to [juicefs cache sharing](https://juicefs.com/docs/cloud/cache/#client_cache_sharing) - // fuse and worker use the same cache-group, fuse use no-sharing - options = append(options, fmt.Sprintf("cache-group=%s", value.FullnameOverride)) - fuseOption = append(fuseOption, fmt.Sprintf("cache-group=%s", value.FullnameOverride)) - fuseOption = append(fuseOption, "no-sharing") + mountArgs = []string{common.JuiceFSCeMountPath, value.Source, value.Fuse.MountPath, "-o", strings.Join(genOption(optionMap), ",")} + mountArgsWorker = []string{common.JuiceFSCeMountPath, value.Source, value.Worker.MountPath, "-o", strings.Join(genOption(workerOptionMap), ",")} + } else { + optionMap["foreground"] = "" + workerOptionMap["foreground"] = "" + + // start independent cache cluster, refer to [juicefs cache sharing](https://juicefs.com/docs/cloud/cache/#client_cache_sharing) + // fuse and worker use the same cache-group, fuse use no-sharing + cacheGroup := value.FullnameOverride + if _, ok := optionMap["cache-group"]; ok { + cacheGroup = optionMap["cache-group"] } - mountArgs = []string{common.JuiceFSMountPath, value.Source, value.Fuse.MountPath, "-o", strings.Join(fuseOption, ",")} - mountArgsWorker = []string{common.JuiceFSMountPath, value.Source, value.Fuse.MountPath, "-o", strings.Join(options, ",")} + optionMap["cache-group"] = cacheGroup + workerOptionMap["cache-group"] = cacheGroup + optionMap["no-sharing"] = "" + delete(workerOptionMap, "no-sharing") + + mountArgs = []string{common.JuiceFSMountPath, value.Source, value.Fuse.MountPath, "-o", strings.Join(genOption(optionMap), ",")} + mountArgsWorker = []string{common.JuiceFSMountPath, value.Source, value.Worker.MountPath, "-o", strings.Join(genOption(workerOptionMap), ",")} } value.Worker.Command = strings.Join(mountArgsWorker, " ") value.Fuse.Command = strings.Join(mountArgs, " ") value.Fuse.StatCmd = "stat -c %i " + value.Fuse.MountPath + value.Worker.StatCmd = "stat -c %i " + value.Worker.MountPath return nil } +func genOption(optionMap map[string]string) []string { + options := []string{} + for k, v := range optionMap { + if v != "" { + k = fmt.Sprintf("%s=%s", k, v) + } + options = append(options, k) + } + return options +} + func (j *JuiceFSEngine) genFormatCmd(value *JuiceFS, config *[]string) { args := make([]string, 0) if config != nil { @@ -208,42 +231,42 @@ func (j *JuiceFSEngine) genFormatCmd(value *JuiceFS, config *[]string) { } if value.Edition == "community" { // ce - if value.Fuse.AccessKeySecret != "" { + if value.Configs.AccessKeySecret != "" { args = append(args, "--access-key=${ACCESS_KEY}") } - if value.Fuse.SecretKeySecret != "" { + if value.Configs.SecretKeySecret != "" { args = append(args, "--secret-key=${SECRET_KEY}") } - if value.Fuse.Storage == "" || value.Fuse.Bucket == "" { + if value.Configs.Storage == "" || value.Configs.Bucket == "" { args = append(args, "--no-update") } - if value.Fuse.Storage != "" { - args = append(args, fmt.Sprintf("--storage=%s", value.Fuse.Storage)) + if value.Configs.Storage != "" { + args = append(args, fmt.Sprintf("--storage=%s", value.Configs.Storage)) } - if value.Fuse.Bucket != "" { - args = append(args, fmt.Sprintf("--bucket=%s", value.Fuse.Bucket)) + if value.Configs.Bucket != "" { + args = append(args, fmt.Sprintf("--bucket=%s", value.Configs.Bucket)) } - args = append(args, value.Source, value.Fuse.Name) + args = append(args, value.Source, value.Configs.Name) cmd := append([]string{common.JuiceCeCliPath, "format"}, args...) - value.Fuse.FormatCmd = strings.Join(cmd, " ") - } else { - // ee - if value.Fuse.TokenSecret == "" { - // skip juicefs auth - return - } - args = append(args, "--token=${TOKEN}") - if value.Fuse.AccessKeySecret != "" { - args = append(args, "--accesskey=${ACCESS_KEY}") - } - if value.Fuse.SecretKeySecret != "" { - args = append(args, "--secretkey=${SECRET_KEY}") - } - if value.Fuse.Bucket != "" { - args = append(args, fmt.Sprintf("--bucket=%s", value.Fuse.Bucket)) - } - args = append(args, value.Source) - cmd := append([]string{common.JuiceCliPath, "auth"}, args...) - value.Fuse.FormatCmd = strings.Join(cmd, " ") + value.Configs.FormatCmd = strings.Join(cmd, " ") + return + } + // ee + if value.Configs.TokenSecret == "" { + // skip juicefs auth + return + } + args = append(args, "--token=${TOKEN}") + if value.Configs.AccessKeySecret != "" { + args = append(args, "--accesskey=${ACCESS_KEY}") + } + if value.Configs.SecretKeySecret != "" { + args = append(args, "--secretkey=${SECRET_KEY}") + } + if value.Configs.Bucket != "" { + args = append(args, fmt.Sprintf("--bucket=%s", value.Configs.Bucket)) } + args = append(args, value.Source) + cmd := append([]string{common.JuiceCliPath, "auth"}, args...) + value.Configs.FormatCmd = strings.Join(cmd, " ") } diff --git a/pkg/ddc/juicefs/transform_fuse_test.go b/pkg/ddc/juicefs/transform_fuse_test.go index f3a467be638..bdf34d844d6 100644 --- a/pkg/ddc/juicefs/transform_fuse_test.go +++ b/pkg/ddc/juicefs/transform_fuse_test.go @@ -20,6 +20,7 @@ import ( "encoding/base64" "github.com/fluid-cloudnative/fluid/pkg/common" "github.com/go-logr/logr" + "reflect" "testing" "k8s.io/apimachinery/pkg/api/resource" @@ -348,11 +349,12 @@ func TestJuiceFSEngine_genValue(t *testing.T) { value *JuiceFS } tests := []struct { - name string - fields fields - args args - wantErr bool - wantValue *JuiceFS + name string + fields fields + args args + wantErr bool + wantValue *JuiceFS + wantOptions map[string]string }{ { name: "test", @@ -388,21 +390,8 @@ func TestJuiceFSEngine_genValue(t *testing.T) { }, }, wantErr: false, - wantValue: &JuiceFS{ - FullnameOverride: "test", - Fuse: Fuse{ - SubPath: "/", - TokenSecret: "test-enterprise", - MountPath: "/juicefs/fluid/test/juicefs-fuse", - CacheDir: "/dev", - HostMountPath: "/juicefs/fluid/test", - //Command: "/sbin/mount.juicefs test /juicefs/fluid/test/juicefs-fuse -o subdir=/,cache-dir=/dev,foreground,cache-group=test,no-sharing", - //StatCmd: "stat -c %i /juicefs/fluid/test/juicefs-fuse", - //FormatCmd: "/usr/bin/juicefs auth --token=${TOKEN} test", - }, - Worker: Worker{ - //Command: "/sbin/mount.juicefs test /juicefs/fluid/test/juicefs-fuse -o subdir=/,cache-dir=/dev,foreground,cache-group=test", - }, + wantOptions: map[string]string{ + "cache-dir": "/dev", }, }, { @@ -414,7 +403,7 @@ func TestJuiceFSEngine_genValue(t *testing.T) { }, args: args{ mount: datav1alpha1.Mount{ - MountPoint: "juicefs:///", + MountPoint: "juicefs:///test", Options: map[string]string{}, Name: "test-community", EncryptOptions: []datav1alpha1.EncryptOption{{ @@ -446,29 +435,20 @@ func TestJuiceFSEngine_genValue(t *testing.T) { }, }, wantErr: false, - wantValue: &JuiceFS{ - Fuse: Fuse{ - SubPath: "/", - TokenSecret: "test-enterprise", - MountPath: "/juicefs/fluid/test/juicefs-fuse", - CacheDir: "/dev", - HostMountPath: "/juicefs/fluid/test", - }, - Worker: Worker{}, + wantOptions: map[string]string{ + "subdir": "/test", + "cache-dir": "/dev", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if _, err := engine.genValue(tt.args.mount, tt.args.tiredStoreLevel, tt.args.value); (err != nil) != tt.wantErr { - t.Errorf("genMount() error = %v, wantErr %v", err, tt.wantErr) + opt, err := engine.genValue(tt.args.mount, tt.args.tiredStoreLevel, tt.args.value) + if (err != nil) != tt.wantErr { + t.Errorf("genValue() error = %v, wantErr %v", err, tt.wantErr) } - if tt.wantValue != nil { - if tt.wantValue.Fuse.Command != tt.args.value.Fuse.Command && - tt.wantValue.Fuse.FormatCmd != tt.args.value.Fuse.FormatCmd && - tt.wantValue.Worker.Command != tt.args.value.Worker.Command { - t.Errorf("genMount() got = %v, want = %v", tt.args.value, tt.wantValue) - } + if len(opt) != len(tt.wantOptions) { + t.Errorf("genValue() got = %v, wantOptions %v", opt, tt.wantOptions) } }) } @@ -482,7 +462,8 @@ func TestJuiceFSEngine_genMount(t *testing.T) { } type args struct { value *JuiceFS - options []string + options map[string]string + runtime *datav1alpha1.JuiceFSRuntime } tests := []struct { name string @@ -491,7 +472,8 @@ func TestJuiceFSEngine_genMount(t *testing.T) { wantErr bool wantWorkerCommand string wantFuseCommand string - wantStatCmd string + wantFuseStatCmd string + wantWorkerStatCmd string }{ { name: "test-community", @@ -505,27 +487,33 @@ func TestJuiceFSEngine_genMount(t *testing.T) { FullnameOverride: "test-community", Edition: "community", Source: "redis://127.0.0.1:6379", - Fuse: Fuse{ - SubPath: "/", + Configs: Configs{ Name: "test-community", AccessKeySecret: "test", SecretKeySecret: "test", Bucket: "http://127.0.0.1:9000/minio/test", MetaUrlSecret: "test", Storage: "minio", - MountPath: "/test", - CacheDir: "/cache", - HostMountPath: "/test", + }, + Fuse: Fuse{ + SubPath: "/", + MountPath: "/test", + CacheDir: "/cache", + HostMountPath: "/test", + }, + Worker: Worker{ + MountPath: "/test-worker", }, }, }, wantErr: false, - wantWorkerCommand: "/bin/mount.juicefs redis://127.0.0.1:6379 /test -o metrics=0.0.0.0:9567", + wantWorkerCommand: "/bin/mount.juicefs redis://127.0.0.1:6379 /test-worker -o metrics=0.0.0.0:9567", wantFuseCommand: "/bin/mount.juicefs redis://127.0.0.1:6379 /test -o metrics=0.0.0.0:9567", - wantStatCmd: "stat -c %i /test", + wantFuseStatCmd: "stat -c %i /test", + wantWorkerStatCmd: "stat -c %i /test-worker", }, { - name: "test-enterprise", + name: "test-community-options", fields: fields{ name: "test", namespace: "fluid", @@ -533,68 +521,40 @@ func TestJuiceFSEngine_genMount(t *testing.T) { }, args: args{ value: &JuiceFS{ - FullnameOverride: "test-enterprise", - Edition: "enterprise", - Source: "test-enterprise", - Fuse: Fuse{ - SubPath: "/", - Name: "test-enterprise", + FullnameOverride: "test-community", + Edition: "community", + Source: "redis://127.0.0.1:6379", + Configs: Configs{ + Name: "test-community", AccessKeySecret: "test", SecretKeySecret: "test", Bucket: "http://127.0.0.1:9000/minio/test", - TokenSecret: "test", - MountPath: "/test", - CacheDir: "/cache", - HostMountPath: "/test", + MetaUrlSecret: "test", + Storage: "minio", + }, + Fuse: Fuse{ + SubPath: "/", + MountPath: "/test", + CacheDir: "/cache", + HostMountPath: "/test", + }, + Worker: Worker{ + MountPath: "/test-worker", }, }, + options: map[string]string{"verbose": ""}, + runtime: &datav1alpha1.JuiceFSRuntime{Spec: datav1alpha1.JuiceFSRuntimeSpec{Worker: datav1alpha1.JuiceFSCompTemplateSpec{ + Options: map[string]string{"metrics": "127.0.0.1:9567"}, + }}}, }, wantErr: false, - wantWorkerCommand: "/sbin/mount.juicefs test-enterprise /test -o foreground,cache-group=test-enterprise", - wantFuseCommand: "/sbin/mount.juicefs test-enterprise /test -o foreground,cache-group=test-enterprise,no-sharing", - wantStatCmd: "stat -c %i /test", + wantWorkerCommand: "/bin/mount.juicefs redis://127.0.0.1:6379 /test-worker -o verbose,metrics=127.0.0.1:9567", + wantFuseCommand: "/bin/mount.juicefs redis://127.0.0.1:6379 /test -o verbose,metrics=0.0.0.0:9567", + wantFuseStatCmd: "stat -c %i /test", + wantWorkerStatCmd: "stat -c %i /test-worker", }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - j := &JuiceFSEngine{ - name: tt.fields.name, - namespace: tt.fields.namespace, - Log: tt.fields.Log, - } - if err := j.genMount(tt.args.value, tt.args.options); (err != nil) != tt.wantErr { - t.Errorf("genMount() error = %v, wantErr %v", err, tt.wantErr) - } - if tt.args.value.Fuse.Command != tt.wantFuseCommand || - tt.args.value.Fuse.StatCmd != tt.wantStatCmd || - tt.args.value.Worker.Command != tt.wantWorkerCommand { - t.Errorf("genMount() value = %v", tt.args.value) - } - }) - } -} - -func TestJuiceFSEngine_genFormat(t *testing.T) { - type fields struct { - name string - namespace string - Log logr.Logger - } - type args struct { - value *JuiceFS - options []string - } - tests := []struct { - name string - fields fields - args args - wantErr bool - wantWorkerCommand string - wantFuseCommand string - wantStatCmd string - }{ { - name: "test-community", + name: "test-enterprise", fields: fields{ name: "test", namespace: "fluid", @@ -602,30 +562,35 @@ func TestJuiceFSEngine_genFormat(t *testing.T) { }, args: args{ value: &JuiceFS{ - FullnameOverride: "test-community", - Edition: "community", - Source: "redis://127.0.0.1:6379", - Fuse: Fuse{ - SubPath: "/", - Name: "test-community", + FullnameOverride: "test-enterprise", + Edition: "enterprise", + Source: "test-enterprise", + Configs: Configs{ + Name: "test-enterprise", AccessKeySecret: "test", SecretKeySecret: "test", Bucket: "http://127.0.0.1:9000/minio/test", - MetaUrlSecret: "test", - Storage: "minio", - MountPath: "/test", - CacheDir: "/cache", - HostMountPath: "/test", + TokenSecret: "test", + }, + Fuse: Fuse{ + SubPath: "/", + MountPath: "/test", + CacheDir: "/cache", + HostMountPath: "/test", + }, + Worker: Worker{ + MountPath: "/test", }, }, }, wantErr: false, - wantWorkerCommand: "/bin/mount.juicefs redis://127.0.0.1:6379 /test -o metrics=0.0.0.0:9567", - wantFuseCommand: "/bin/mount.juicefs redis://127.0.0.1:6379 /test -o metrics=0.0.0.0:9567", - wantStatCmd: "stat -c %i /test", + wantWorkerCommand: "/sbin/mount.juicefs test-enterprise /test -o foreground,cache-group=test-enterprise", + wantFuseCommand: "/sbin/mount.juicefs test-enterprise /test -o foreground,cache-group=test-enterprise,no-sharing", + wantFuseStatCmd: "stat -c %i /test", + wantWorkerStatCmd: "stat -c %i /test", }, { - name: "test-enterprise", + name: "test-enterprise-options", fields: fields{ name: "test", namespace: "fluid", @@ -636,23 +601,33 @@ func TestJuiceFSEngine_genFormat(t *testing.T) { FullnameOverride: "test-enterprise", Edition: "enterprise", Source: "test-enterprise", - Fuse: Fuse{ - SubPath: "/", + Configs: Configs{ Name: "test-enterprise", AccessKeySecret: "test", SecretKeySecret: "test", Bucket: "http://127.0.0.1:9000/minio/test", TokenSecret: "test", - MountPath: "/test", - CacheDir: "/cache", - HostMountPath: "/test", + }, + Fuse: Fuse{ + SubPath: "/", + MountPath: "/test", + CacheDir: "/cache", + HostMountPath: "/test", + }, + Worker: Worker{ + MountPath: "/test", }, }, + options: map[string]string{"cache-group": "test", "verbose": ""}, + runtime: &datav1alpha1.JuiceFSRuntime{Spec: datav1alpha1.JuiceFSRuntimeSpec{Worker: datav1alpha1.JuiceFSCompTemplateSpec{ + Options: map[string]string{"no-sharing": ""}, + }}}, }, wantErr: false, - wantWorkerCommand: "/sbin/mount.juicefs test-enterprise /test -o foreground,cache-group=test-enterprise", - wantFuseCommand: "/sbin/mount.juicefs test-enterprise /test -o foreground,cache-group=test-enterprise,no-sharing", - wantStatCmd: "stat -c %i /test", + wantFuseCommand: "/sbin/mount.juicefs test-enterprise /test -o verbose,foreground,cache-group=test,no-sharing", + wantWorkerCommand: "/sbin/mount.juicefs test-enterprise /test -o verbose,foreground,cache-group=test", + wantFuseStatCmd: "stat -c %i /test", + wantWorkerStatCmd: "stat -c %i /test", }, } for _, tt := range tests { @@ -662,12 +637,13 @@ func TestJuiceFSEngine_genFormat(t *testing.T) { namespace: tt.fields.namespace, Log: tt.fields.Log, } - if err := j.genMount(tt.args.value, tt.args.options); (err != nil) != tt.wantErr { - t.Errorf("genMount() error = %v, wantErr %v", err, tt.wantErr) + if err := j.genMount(tt.args.value, tt.args.runtime, tt.args.options); (err != nil) != tt.wantErr { + t.Errorf("genMount() error = %v\nwantErr %v", err, tt.wantErr) } - if tt.args.value.Fuse.Command != tt.wantFuseCommand || - tt.args.value.Fuse.StatCmd != tt.wantStatCmd || - tt.args.value.Worker.Command != tt.wantWorkerCommand { + if len(tt.args.value.Fuse.Command) != len(tt.wantFuseCommand) || + tt.args.value.Fuse.StatCmd != tt.wantFuseStatCmd || + tt.args.value.Worker.StatCmd != tt.wantWorkerStatCmd || + len(tt.args.value.Worker.Command) != len(tt.wantWorkerCommand) { t.Errorf("genMount() value = %v", tt.args.value) } }) @@ -690,17 +666,19 @@ func TestJuiceFSEngine_genFormatCmd(t *testing.T) { FullnameOverride: "test-community", Edition: "community", Source: "redis://127.0.0.1:6379", - Fuse: Fuse{ - SubPath: "/", + Configs: Configs{ Name: "test-community", AccessKeySecret: "test", SecretKeySecret: "test", Bucket: "http://127.0.0.1:9000/minio/test", MetaUrlSecret: "test", Storage: "minio", - MountPath: "/test", - CacheDir: "/cache", - HostMountPath: "/test", + }, + Fuse: Fuse{ + SubPath: "/", + MountPath: "/test", + CacheDir: "/cache", + HostMountPath: "/test", }, }, }, @@ -713,16 +691,18 @@ func TestJuiceFSEngine_genFormatCmd(t *testing.T) { FullnameOverride: "test-enterprise", Edition: "enterprise", Source: "test-enterprise", - Fuse: Fuse{ - SubPath: "/", + Configs: Configs{ Name: "test-enterprise", AccessKeySecret: "test", SecretKeySecret: "test", Bucket: "http://127.0.0.1:9000/minio/test", TokenSecret: "test", - MountPath: "/test", - CacheDir: "/cache", - HostMountPath: "/test", + }, + Fuse: Fuse{ + SubPath: "/", + MountPath: "/test", + CacheDir: "/cache", + HostMountPath: "/test", }, }, }, @@ -737,9 +717,42 @@ func TestJuiceFSEngine_genFormatCmd(t *testing.T) { }, } j.genFormatCmd(tt.args.value, j.runtime.Spec.Configs) - if tt.args.value.Fuse.FormatCmd != tt.wantFormatCmd { + if tt.args.value.Configs.FormatCmd != tt.wantFormatCmd { t.Errorf("genMount() value = %v", tt.args.value) } }) } } + +func Test_genOption(t *testing.T) { + type args struct { + optionMap map[string]string + } + tests := []struct { + name string + args args + want []string + }{ + { + name: "test", + args: args{ + optionMap: map[string]string{"a": "b", "c": ""}, + }, + want: []string{"a=b", "c"}, + }, + { + name: "test-empty", + args: args{ + optionMap: nil, + }, + want: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := genOption(tt.args.optionMap); !reflect.DeepEqual(got, tt.want) { + t.Errorf("genOption() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/ddc/juicefs/type.go b/pkg/ddc/juicefs/type.go index e27a5dd1e84..ee5ff2884d6 100644 --- a/pkg/ddc/juicefs/type.go +++ b/pkg/ddc/juicefs/type.go @@ -31,47 +31,55 @@ type JuiceFS struct { common.UserInfo `yaml:",inline"` NodeSelector map[string]string `yaml:"nodeSelector,omitempty"` + Configs Configs `yaml:"configs,omitempty"` Fuse Fuse `yaml:"fuse,omitempty"` Worker Worker `yaml:"worker,omitempty"` TieredStore TieredStore `yaml:"tieredstore,omitempty"` PlacementMode string `yaml:"placement,omitempty"` } +type Configs struct { + Name string `yaml:"name"` + AccessKeySecret string `yaml:"accesskeySecret,omitempty"` + SecretKeySecret string `yaml:"secretkeySecret,omitempty"` + Bucket string `yaml:"bucket,omitempty"` + MetaUrlSecret string `yaml:"metaurlSecret,omitempty"` + TokenSecret string `yaml:"tokenSecret,omitempty"` + Storage string `yaml:"storage,omitempty"` + FormatCmd string `yaml:"formatCmd,omitempty"` +} + type Worker struct { Image string `yaml:"image,omitempty"` NodeSelector map[string]string `yaml:"nodeSelector,omitempty"` ImageTag string `yaml:"imageTag,omitempty"` ImagePullPolicy string `yaml:"imagePullPolicy,omitempty"` Resources common.Resources `yaml:"resources,omitempty"` - CacheDir string `yaml:"cacheDir,omitempty"` - Command string `yaml:"command,omitempty"` Envs []corev1.EnvVar `yaml:"envs,omitempty"` Ports []corev1.ContainerPort `yaml:"ports,omitempty"` + + MountPath string `yaml:"mountPath,omitempty"` + CacheDir string `yaml:"cacheDir,omitempty"` + StatCmd string `yaml:"statCmd,omitempty"` + Command string `yaml:"command,omitempty"` } type Fuse struct { - SubPath string `yaml:"subPath,omitempty"` - Name string `yaml:"name"` - AccessKeySecret string `yaml:"accesskeySecret,omitempty"` - SecretKeySecret string `yaml:"secretkeySecret,omitempty"` - Bucket string `yaml:"bucket,omitempty"` - MetaUrlSecret string `yaml:"metaurlSecret,omitempty"` - TokenSecret string `yaml:"tokenSecret,omitempty"` - Storage string `yaml:"storage,omitempty"` + Enabled bool `yaml:"enabled,omitempty"` Image string `yaml:"image,omitempty"` NodeSelector map[string]string `yaml:"nodeSelector,omitempty"` Envs []corev1.EnvVar `yaml:"envs,omitempty"` ImageTag string `yaml:"imageTag,omitempty"` ImagePullPolicy string `yaml:"imagePullPolicy,omitempty"` - MountPath string `yaml:"mountPath,omitempty"` - CacheDir string `yaml:"cacheDir,omitempty"` - HostMountPath string `yaml:"hostMountPath,omitempty"` - Command string `yaml:"command,omitempty"` - StatCmd string `yaml:"statCmd,omitempty"` - FormatCmd string `yaml:"formatCmd,omitempty"` - Enabled bool `yaml:"enabled,omitempty"` Resources common.Resources `yaml:"resources,omitempty"` CriticalPod bool `yaml:"criticalPod,omitempty"` + + SubPath string `yaml:"subPath,omitempty"` + MountPath string `yaml:"mountPath,omitempty"` + CacheDir string `yaml:"cacheDir,omitempty"` + HostMountPath string `yaml:"hostMountPath,omitempty"` + Command string `yaml:"command,omitempty"` + StatCmd string `yaml:"statCmd,omitempty"` } type TieredStore struct { From 9346ca1957491595cfc3ff07c5a8f90c7f7086d1 Mon Sep 17 00:00:00 2001 From: cheyang Date: Wed, 27 Apr 2022 12:23:11 +0800 Subject: [PATCH 07/10] Update API for Juicefs, To #40605168 (#1811) Signed-off-by: cheyang --- charts/fluid/fluid/Chart.yaml | 2 +- charts/fluid/fluid/values.yaml | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/charts/fluid/fluid/Chart.yaml b/charts/fluid/fluid/Chart.yaml index 7ccd83859bb..65c64668a2d 100644 --- a/charts/fluid/fluid/Chart.yaml +++ b/charts/fluid/fluid/Chart.yaml @@ -18,7 +18,7 @@ version: 0.8.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 0.8.0-97f74b5 +appVersion: 0.8.0-79d0b30 home: https://github.com/fluid-cloudnative/fluid keywords: - category:data diff --git a/charts/fluid/fluid/values.yaml b/charts/fluid/fluid/values.yaml index 2b006d8e16f..9a67ec42561 100644 --- a/charts/fluid/fluid/values.yaml +++ b/charts/fluid/fluid/values.yaml @@ -6,7 +6,7 @@ workdir: /tmp dataset: controller: - image: fluidcloudnative/dataset-controller:v0.8.0-97f74b5 + image: fluidcloudnative/dataset-controller:v0.8.0-79d0b30 csi: featureGates: "FuseRecovery=false" @@ -15,7 +15,7 @@ csi: registrar: image: registry.aliyuncs.com/acs/csi-node-driver-registrar:v1.2.0 plugins: - image: fluidcloudnative/fluid-csi:v0.8.0-97f74b5 + image: fluidcloudnative/fluid-csi:v0.8.0-79d0b30 kubelet: rootDir: /var/lib/kubelet @@ -28,9 +28,9 @@ runtime: portRange: 20000-26000 enabled: true init: - image: fluidcloudnative/init-users:v0.8.0-97f74b5 + image: fluidcloudnative/init-users:v0.8.0-79d0b30 controller: - image: fluidcloudnative/alluxioruntime-controller:v0.8.0-97f74b5 + image: fluidcloudnative/alluxioruntime-controller:v0.8.0-79d0b30 runtime: image: registry.aliyuncs.com/alluxio/alluxio:release-2.7.2-SNAPSHOT-3714f2b fuse: @@ -44,19 +44,19 @@ runtime: fuse: image: registry.cn-shanghai.aliyuncs.com/jindofs/jindo-fuse:3.8.0 controller: - image: fluidcloudnative/jindoruntime-controller:v0.8.0-97f74b5 + image: fluidcloudnative/jindoruntime-controller:v0.8.0-79d0b30 init: portCheck: enabled: false - image: fluidcloudnative/init-users:v0.8.0-97f74b5 + image: fluidcloudnative/init-users:v0.8.0-79d0b30 goosefs: runtimeWorkers: 3 portRange: 26000-32000 enabled: false init: - image: fluidcloudnative/init-users:v0.8.0-97f74b5 + image: fluidcloudnative/init-users:v0.8.0-79d0b30 controller: - image: fluidcloudnative/goosefsruntime-controller:v0.8.0-97f74b5 + image: fluidcloudnative/goosefsruntime-controller:v0.8.0-79d0b30 runtime: image: ccr.ccs.tencentyun.com/qcloud/goosefs:v1.2.0 fuse: @@ -64,16 +64,16 @@ runtime: juicefs: enabled: false controller: - image: fluidcloudnative/juicefsruntime-controller:v0.8.0-97f74b5 + image: fluidcloudnative/juicefsruntime-controller:v0.8.0-79d0b30 fuse: image: registry.cn-hangzhou.aliyuncs.com/juicefs/juicefs-fuse:v1.0.0-beta2 webhook: enabled: true - image: fluidcloudnative/fluid-webhook:v0.8.0-97f74b5 + image: fluidcloudnative/fluid-webhook:v0.8.0-79d0b30 replicas: 1 fluidapp: enabled: true controller: - image: fluidcloudnative/application-controller:v0.8.0-97f74b5 + image: fluidcloudnative/application-controller:v0.8.0-79d0b30 From 67496ba0e3e0ad507e99f5fc9ccf34d192742ab2 Mon Sep 17 00:00:00 2001 From: frankleaf <62129564+frankleaf@users.noreply.github.com> Date: Thu, 28 Apr 2022 10:25:31 +0800 Subject: [PATCH 08/10] Add jindofsx fuseOnly deploy mode (#1809) * Add jindofsx fuseonly Signed-off-by: frankleaf * Add jindofsx fuseonly Signed-off-by: frankleaf * fix test Signed-off-by: frankleaf * fix test Signed-off-by: frankleaf * fix ut Signed-off-by: frankleaf * fix ut Signed-off-by: frankleaf --- api/v1alpha1/jindoruntime_types.go | 8 +++ api/v1alpha1/openapi_generated.go | 14 ++++++ .../crds/data.fluid.io_jindoruntimes.yaml | 9 ++++ .../bases/data.fluid.io_jindoruntimes.yaml | 9 ++++ pkg/ddc/jindofsx/const.go | 2 + pkg/ddc/jindofsx/health_check.go | 49 +++++++++++-------- pkg/ddc/jindofsx/master.go | 10 ++++ pkg/ddc/jindofsx/master_test.go | 15 ++++++ pkg/ddc/jindofsx/status.go | 6 +++ pkg/ddc/jindofsx/transform.go | 29 ++++++++--- pkg/ddc/jindofsx/types.go | 2 + pkg/ddc/jindofsx/ufs.go | 6 +++ pkg/ddc/jindofsx/worker.go | 11 +++++ pkg/ddc/juicefs/transform_fuse_test.go | 16 ++++-- 14 files changed, 154 insertions(+), 32 deletions(-) diff --git a/api/v1alpha1/jindoruntime_types.go b/api/v1alpha1/jindoruntime_types.go index 539985d8cfd..64f98637c55 100644 --- a/api/v1alpha1/jindoruntime_types.go +++ b/api/v1alpha1/jindoruntime_types.go @@ -62,6 +62,10 @@ type JindoCompTemplateSpec struct { // Any label already existed will be overriden // +optional Labels map[string]string `json:"labels,omitempty"` + + // If disable JindoFS master or worker + // +optional + Disabled bool `json:"disabled,omitempty"` } // JindoFuseSpec is a description of the Jindo Fuse @@ -120,6 +124,10 @@ type JindoFuseSpec struct { // Defaults to OnRuntimeDeleted // +optional CleanPolicy FuseCleanPolicy `json:"cleanPolicy,omitempty"` + + // If disable JindoFS fuse + // +optional + Disabled bool `json:"disabled,omitempty"` } // JindoRuntimeSpec defines the desired state of JindoRuntime diff --git a/api/v1alpha1/openapi_generated.go b/api/v1alpha1/openapi_generated.go index 7b9b735fbbb..bff51c4bd5d 100644 --- a/api/v1alpha1/openapi_generated.go +++ b/api/v1alpha1/openapi_generated.go @@ -2281,6 +2281,13 @@ func schema_fluid_cloudnative_fluid_api_v1alpha1_JindoCompTemplateSpec(ref commo }, }, }, + "disabled": { + SchemaProps: spec.SchemaProps{ + Description: "If disable JindoFS master or worker", + Type: []string{"boolean"}, + Format: "", + }, + }, }, }, }, @@ -2431,6 +2438,13 @@ func schema_fluid_cloudnative_fluid_api_v1alpha1_JindoFuseSpec(ref common.Refere Format: "", }, }, + "disabled": { + SchemaProps: spec.SchemaProps{ + Description: "If disable JindoFS fuse", + Type: []string{"boolean"}, + Format: "", + }, + }, }, }, }, diff --git a/charts/fluid/fluid/crds/data.fluid.io_jindoruntimes.yaml b/charts/fluid/fluid/crds/data.fluid.io_jindoruntimes.yaml index f6954f96037..364d3ff54ca 100644 --- a/charts/fluid/fluid/crds/data.fluid.io_jindoruntimes.yaml +++ b/charts/fluid/fluid/crds/data.fluid.io_jindoruntimes.yaml @@ -91,6 +91,9 @@ spec: needed OnRuntimeDeleted cleans fuse pod only when the cache runtime is deleted Defaults to OnRuntimeDeleted' type: string + disabled: + description: If disable JindoFS fuse + type: boolean env: additionalProperties: type: string @@ -234,6 +237,9 @@ spec: master: description: The component spec of Jindo master properties: + disabled: + description: If disable JindoFS master or worker + type: boolean env: additionalProperties: type: string @@ -443,6 +449,9 @@ spec: worker: description: The component spec of Jindo worker properties: + disabled: + description: If disable JindoFS master or worker + type: boolean env: additionalProperties: type: string diff --git a/config/crd/bases/data.fluid.io_jindoruntimes.yaml b/config/crd/bases/data.fluid.io_jindoruntimes.yaml index f6954f96037..364d3ff54ca 100644 --- a/config/crd/bases/data.fluid.io_jindoruntimes.yaml +++ b/config/crd/bases/data.fluid.io_jindoruntimes.yaml @@ -91,6 +91,9 @@ spec: needed OnRuntimeDeleted cleans fuse pod only when the cache runtime is deleted Defaults to OnRuntimeDeleted' type: string + disabled: + description: If disable JindoFS fuse + type: boolean env: additionalProperties: type: string @@ -234,6 +237,9 @@ spec: master: description: The component spec of Jindo master properties: + disabled: + description: If disable JindoFS master or worker + type: boolean env: additionalProperties: type: string @@ -443,6 +449,9 @@ spec: worker: description: The component spec of Jindo worker properties: + disabled: + description: If disable JindoFS master or worker + type: boolean env: additionalProperties: type: string diff --git a/pkg/ddc/jindofsx/const.go b/pkg/ddc/jindofsx/const.go index b79115f893f..33d54b7f7c9 100644 --- a/pkg/ddc/jindofsx/const.go +++ b/pkg/ddc/jindofsx/const.go @@ -57,4 +57,6 @@ const ( DEFAULT_JINDOFSX_RUNTIME_IMAGE = "registry.cn-shanghai.aliyuncs.com/jindofs/smartdata:4.3.0" ENGINE_TYPE = "jindofsx" + + FuseOnly = "fuseOnly" ) diff --git a/pkg/ddc/jindofsx/health_check.go b/pkg/ddc/jindofsx/health_check.go index 5f5715681d1..bb91a3121cb 100644 --- a/pkg/ddc/jindofsx/health_check.go +++ b/pkg/ddc/jindofsx/health_check.go @@ -28,37 +28,44 @@ import ( ) func (e *JindoFSxEngine) CheckRuntimeHealthy() (err error) { + // 1. Check the healthy of the master - err = e.checkMasterHealthy() - if err != nil { - e.Log.Error(err, "The master is not healthy") - updateErr := e.UpdateDatasetStatus(data.FailedDatasetPhase) - if updateErr != nil { - e.Log.Error(updateErr, "Failed to update dataset") + if !e.runtime.Spec.Master.Disabled { + err = e.checkMasterHealthy() + if err != nil { + e.Log.Error(err, "The master is not healthy") + updateErr := e.UpdateDatasetStatus(data.FailedDatasetPhase) + if updateErr != nil { + e.Log.Error(updateErr, "Failed to update dataset") + } + return } - return } // 2. Check the healthy of the workers - err = e.checkWorkersHealthy() - if err != nil { - e.Log.Error(err, "The worker is not healthy") - updateErr := e.UpdateDatasetStatus(data.FailedDatasetPhase) - if updateErr != nil { - e.Log.Error(updateErr, "Failed to update dataset") + if !e.runtime.Spec.Worker.Disabled { + err = e.checkWorkersHealthy() + if err != nil { + e.Log.Error(err, "The worker is not healthy") + updateErr := e.UpdateDatasetStatus(data.FailedDatasetPhase) + if updateErr != nil { + e.Log.Error(updateErr, "Failed to update dataset") + } + return } - return } // 3. Check the healthy of the fuse - err = e.checkFuseHealthy() - if err != nil { - e.Log.Error(err, "The fuse is not healthy") - updateErr := e.UpdateDatasetStatus(data.FailedDatasetPhase) - if updateErr != nil { - e.Log.Error(updateErr, "Failed to update dataset") + if !e.runtime.Spec.Fuse.Disabled { + err = e.checkFuseHealthy() + if err != nil { + e.Log.Error(err, "The fuse is not healthy") + updateErr := e.UpdateDatasetStatus(data.FailedDatasetPhase) + if updateErr != nil { + e.Log.Error(updateErr, "Failed to update dataset") + } + return } - return } // 4. Update the dataset as Bounded diff --git a/pkg/ddc/jindofsx/master.go b/pkg/ddc/jindofsx/master.go index 5f7aa4dd244..f3911dac0b1 100644 --- a/pkg/ddc/jindofsx/master.go +++ b/pkg/ddc/jindofsx/master.go @@ -14,6 +14,11 @@ import ( ) func (e *JindoFSxEngine) CheckMasterReady() (ready bool, err error) { + if e.runtime.Spec.Master.Disabled { + ready = true + err = nil + return + } masterName := e.getMasterName() // 1. Check the status runtime, err := e.getRuntime() @@ -116,6 +121,11 @@ func (e *JindoFSxEngine) SetupMaster() (err error) { e.Log.V(1).Info("The master has been set.", "replicas", master.Status.ReadyReplicas) } + if e.runtime.Spec.Master.Disabled { + err = nil + return + } + // 2. Update the status of the runtime err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { runtime, err := e.getRuntime() diff --git a/pkg/ddc/jindofsx/master_test.go b/pkg/ddc/jindofsx/master_test.go index cbe27205230..c3064d2ceef 100644 --- a/pkg/ddc/jindofsx/master_test.go +++ b/pkg/ddc/jindofsx/master_test.go @@ -96,18 +96,33 @@ func TestCheckMasterReady(t *testing.T) { namespace: "fluid", Client: client, Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{Spec: datav1alpha1.JindoRuntimeSpec{ + Master: datav1alpha1.JindoCompTemplateSpec{ + Disabled: false, + }}, + }, }, { name: "hbase", namespace: "fluid", Client: client, Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{Spec: datav1alpha1.JindoRuntimeSpec{ + Master: datav1alpha1.JindoCompTemplateSpec{ + Disabled: false, + }}, + }, }, { name: "hadoop", namespace: "fluid", Client: client, Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{Spec: datav1alpha1.JindoRuntimeSpec{ + Master: datav1alpha1.JindoCompTemplateSpec{ + Disabled: false, + }}, + }, }, } diff --git a/pkg/ddc/jindofsx/status.go b/pkg/ddc/jindofsx/status.go index cc2eea19f57..5dbbb1690a4 100644 --- a/pkg/ddc/jindofsx/status.go +++ b/pkg/ddc/jindofsx/status.go @@ -33,6 +33,12 @@ import ( // CheckAndUpdateRuntimeStatus checks the related runtime status and updates it. func (e *JindoFSxEngine) CheckAndUpdateRuntimeStatus() (ready bool, err error) { + if e.runtime.Spec.Master.Disabled && e.runtime.Spec.Worker.Disabled { + ready = true + err = nil + return + } + defer utils.TimeTrack(time.Now(), "JindoFSxEngine.CheckAndUpdateRuntimeStatus", "name", e.name, "namespace", e.namespace) var ( masterReady, workerReady bool diff --git a/pkg/ddc/jindofsx/transform.go b/pkg/ddc/jindofsx/transform.go index 7f97dfdaf1b..490e4236772 100644 --- a/pkg/ddc/jindofsx/transform.go +++ b/pkg/ddc/jindofsx/transform.go @@ -39,18 +39,16 @@ func (e *JindoFSxEngine) transform(runtime *datav1alpha1.JindoRuntime) (value *J return } - if len(runtime.Spec.TieredStore.Levels) == 0 { - err = fmt.Errorf("the TieredStore is null") - return - } - dataset, err := utils.GetDataset(e.Client, e.name, e.namespace) if err != nil { return } var cachePaths []string // /mnt/disk1/bigboot or /mnt/disk1/bigboot,/mnt/disk2/bigboot - stroagePath := runtime.Spec.TieredStore.Levels[0].Path + var stroagePath = "/dev/shm/" + if len(runtime.Spec.TieredStore.Levels) > 0 { + stroagePath = runtime.Spec.TieredStore.Levels[0].Path + } originPath := strings.Split(stroagePath, ",") for _, value := range originPath { cachePaths = append(cachePaths, strings.TrimRight(value, "/")+"/"+ @@ -60,11 +58,13 @@ func (e *JindoFSxEngine) transform(runtime *datav1alpha1.JindoRuntime) (value *J dataPath := strings.Join(cachePaths, ",") var userSetQuota []string // 1Gi or 1Gi,2Gi,3Gi - if runtime.Spec.TieredStore.Levels[0].Quota != nil { + if len(runtime.Spec.TieredStore.Levels) == 0 { + userSetQuota = append(userSetQuota, "1Gi") + } else if runtime.Spec.TieredStore.Levels[0].Quota != nil { userSetQuota = append(userSetQuota, utils.TransformQuantityToJindoUnit(runtime.Spec.TieredStore.Levels[0].Quota)) } - if runtime.Spec.TieredStore.Levels[0].QuotaList != "" { + if len(runtime.Spec.TieredStore.Levels) != 0 && runtime.Spec.TieredStore.Levels[0].QuotaList != "" { quotaList := runtime.Spec.TieredStore.Levels[0].QuotaList quotas := strings.Split(quotaList, ",") if len(quotas) != len(originPath) { @@ -137,6 +137,7 @@ func (e *JindoFSxEngine) transform(runtime *datav1alpha1.JindoRuntime) (value *J e.transformTolerations(dataset, runtime, value) e.transformResources(runtime, value) e.transformLogConfig(runtime, value) + e.transformDeployMode(runtime, value) value.Master.DnsServer = dnsServer value.Master.NameSpace = e.namespace value.Fuse.MountPath = JINDO_FUSE_MONNTPATH @@ -459,6 +460,9 @@ func (e *JindoFSxEngine) transformFuseArg(runtime *datav1alpha1.JindoRuntime, da if len(runtime.Spec.Fuse.Args) > 0 { fuseArgs = runtime.Spec.Fuse.Args } + if runtime.Spec.Master.Disabled && runtime.Spec.Worker.Disabled { + fuseArgs = append(fuseArgs, "-ouri="+dataset.Spec.Mounts[0].MountPoint) + } return fuseArgs } @@ -659,3 +663,12 @@ func (e *JindoFSxEngine) transformPlacementMode(dataset *datav1alpha1.Dataset, v value.PlacementMode = string(datav1alpha1.ExclusiveMode) } } + +func (e *JindoFSxEngine) transformDeployMode(runtime *datav1alpha1.JindoRuntime, value *Jindo) { + // to set fuseOnly + if runtime.Spec.Master.Disabled && runtime.Spec.Worker.Disabled { + value.Master.ReplicaCount = 0 + value.Worker.ReplicaCount = 0 + value.Fuse.Mode = FuseOnly + } +} diff --git a/pkg/ddc/jindofsx/types.go b/pkg/ddc/jindofsx/types.go index 8d6ab7c2b3c..cab5655e3ae 100644 --- a/pkg/ddc/jindofsx/types.go +++ b/pkg/ddc/jindofsx/types.go @@ -70,6 +70,7 @@ type Master struct { } type Worker struct { + ReplicaCount int `yaml:"replicaCount"` Resources Resources `yaml:"resources,omitempty"` NodeSelector map[string]string `yaml:"nodeSelector,omitempty"` WorkerProperties map[string]string `yaml:"properties"` @@ -96,6 +97,7 @@ type Fuse struct { CriticalPod bool `yaml:"criticalPod,omitempty"` Resources Resources `yaml:"resources,omitempty"` MountPath string `yaml:"mountPath,omitempty"` + Mode string `yaml:"mode,omitempty"` } type Mounts struct { diff --git a/pkg/ddc/jindofsx/ufs.go b/pkg/ddc/jindofsx/ufs.go index 18f6c5d419e..bc7ded9a7a8 100644 --- a/pkg/ddc/jindofsx/ufs.go +++ b/pkg/ddc/jindofsx/ufs.go @@ -29,6 +29,12 @@ func (e *JindoFSxEngine) ShouldCheckUFS() (should bool, err error) { // PrepareUFS do all the UFS preparations func (e *JindoFSxEngine) PrepareUFS() (err error) { + + if e.runtime.Spec.Master.Disabled { + err = nil + return + } + // 1. Mount UFS (Synchronous Operation) shouldMountUfs, err := e.shouldMountUFS() if err != nil { diff --git a/pkg/ddc/jindofsx/worker.go b/pkg/ddc/jindofsx/worker.go index db7cea5b8b7..f59605712d0 100644 --- a/pkg/ddc/jindofsx/worker.go +++ b/pkg/ddc/jindofsx/worker.go @@ -21,6 +21,11 @@ import ( // calls for a status update and finally returns error if anything unexpected happens. func (e *JindoFSxEngine) SetupWorkers() (err error) { + if e.runtime.Spec.Worker.Disabled { + err = nil + return + } + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { workers, err := ctrl.GetWorkersAsStatefulset(e.Client, types.NamespacedName{Namespace: e.namespace, Name: e.getWorkerName()}) @@ -72,6 +77,12 @@ func (e *JindoFSxEngine) ShouldSetupWorkers() (should bool, err error) { // CheckWorkersReady checks if the workers are ready func (e *JindoFSxEngine) CheckWorkersReady() (ready bool, err error) { + if e.runtime.Spec.Worker.Disabled { + ready = true + err = nil + return + } + workers, err := ctrl.GetWorkersAsStatefulset(e.Client, types.NamespacedName{Namespace: e.namespace, Name: e.getWorkerName()}) if err != nil { diff --git a/pkg/ddc/juicefs/transform_fuse_test.go b/pkg/ddc/juicefs/transform_fuse_test.go index bdf34d844d6..9cf6168d9a3 100644 --- a/pkg/ddc/juicefs/transform_fuse_test.go +++ b/pkg/ddc/juicefs/transform_fuse_test.go @@ -18,9 +18,10 @@ package juicefs import ( "encoding/base64" + "fmt" "github.com/fluid-cloudnative/fluid/pkg/common" "github.com/go-logr/logr" - "reflect" + "sort" "testing" "k8s.io/apimachinery/pkg/api/resource" @@ -750,8 +751,17 @@ func Test_genOption(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := genOption(tt.args.optionMap); !reflect.DeepEqual(got, tt.want) { - t.Errorf("genOption() = %v, want %v", got, tt.want) + got := genOption(tt.args.optionMap) + var keys []int + for k := range got { + keys = append(keys, k) + } + sort.Ints(keys) + fmt.Println(keys) + for _, k := range keys { + if got[k] != tt.want[k] { + t.Errorf("genOption() = %v, want %v", got, tt.want) + } } }) } From 2073b38ec614dd83b2b22af9a16d421aeba809ab Mon Sep 17 00:00:00 2001 From: Weiwei Date: Thu, 28 Apr 2022 17:37:39 +0800 Subject: [PATCH 09/10] add application controller doc (#1812) * add application controller doc Signed-off-by: zwwhdls * fix chinese in en doc Signed-off-by: zwwhdls * update title Signed-off-by: zwwhdls --- docs/en/samples/application_controller.md | 108 ++++++++++++++++++++++ docs/zh/samples/application_controller.md | 102 ++++++++++++++++++++ 2 files changed, 210 insertions(+) create mode 100644 docs/en/samples/application_controller.md create mode 100644 docs/zh/samples/application_controller.md diff --git a/docs/en/samples/application_controller.md b/docs/en/samples/application_controller.md new file mode 100644 index 00000000000..9953eb7383b --- /dev/null +++ b/docs/en/samples/application_controller.md @@ -0,0 +1,108 @@ +# Demo - How to ensure the completion of Fluid's serverless tasks + +## Background + +In the serverless scenario, Workload such as Job, when the user container of the Pod completes the task and exits, the +Fuse Sidecar can also actively exit. +This enables the Job Controller to correctly determine the completion status of the Pod. However, the fuse container +itself does not have an exit mechanism, and the Fluid Application Controller will detect the pods with the fluid label +in the cluster. +After the user container exits, the fuse container is exited normally to reach the state where the job is completed. + +## Installation + +You can download the latest Fluid installation package +from [Fluid Releases](https://github.com/fluid-cloudnative/fluid/releases). +Refer to the [Installation Documentation](../userguide/install.md) to complete the installation. And check that the +components of Fluid are running normally (here takes JuiceFSRuntime as an example): + +```shell +$ kubectl -n fluid-system get po +NAME READY STATUS RESTARTS AGE +dataset-controller-86768b56fb-4pdts 1/1 Running 0 36s +fluid-webhook-f77465869-zh8rv 1/1 Running 0 62s +fluidapp-controller-597dbd77dd-jgsbp 1/1 Running 0 81s +juicefsruntime-controller-65d54bb48f-vnzpj 1/1 Running 0 99s +``` + +Typically, you will see a Pod named `dataset-controller`, a Pod named `juicefsruntime-controller`, a Pod +named `fluid-webhook` and a Pod named `fluidapp-controller`. + +## Demo + +**Enable webhook for namespace** + +Fluid webhook provides the ability to inject FUSE sidecars for pods in serverless scenarios. To enable this function, you need to set label `fluid.io/enable-injection=true` in the corresponding namespace. The operation is as follows: + +```shell +$ kubectl patch ns default -p '{"metadata": {"labels": {"fluid.io/enable-injection": "true"}}}' +namespace/default patched +$ kubectl get ns default --show-labels +NAME STATUS AGE LABELS +default Active 4d12h fluid.io/enable-injection=true,kubernetes.io/metadata.name=default +``` + +**Create dataset and runtime** + +Create corresponding Runtime resources and Datasets with the same name for different types of runtimes. Take JuiceFSRuntime as an example here. For details, please refer to [Documentation](juicefs_runtime.md), as follows: + +```shell +$ kubectl get juicefsruntime +NAME WORKER PHASE FUSE PHASE AGE +jfsdemo Ready Ready 2m58s +$ kubectl get dataset +NAME UFS TOTAL SIZE CACHED CACHE CAPACITY CACHED PERCENTAGE PHASE AGE +jfsdemo [Calculating] N/A N/A Bound 2m55s +``` + +**Create Job** + +To use Fluid in a serverless scenario, you need to add the `serverless.fluid.io/inject: "true"` label to the application pod. as follows: + +```yaml +$ cat<sample.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: demo-app +spec: + template: + metadata: + labels: + serverless.fluid.io/inject: "true" + spec: + containers: + - name: demo + image: busybox + args: + - -c + - echo $(date -u) >> /data/out.txt + command: + - /bin/sh + volumeMounts: + - mountPath: /data + name: demo + restartPolicy: Never + volumes: + - name: demo + persistentVolumeClaim: + claimName: jfsdemo + backoffLimit: 4 +EOF +$ kubectl create -f sample.yaml +job.batch/demo-app created +``` + +**Check if the Pod is completed** + +```shell +$ kubectl get job +NAME COMPLETIONS DURATION AGE +demo-app 1/1 14s 46s +$ kubectl get po +NAME READY STATUS RESTARTS AGE +demo-app-wdfr8 0/2 Completed 0 25s +jfsdemo-worker-0 1/1 Running 0 14m +``` + +It can be seen that the job has been completed, and its pod has two containers, both of which have been completed. diff --git a/docs/zh/samples/application_controller.md b/docs/zh/samples/application_controller.md new file mode 100644 index 00000000000..6eea178095c --- /dev/null +++ b/docs/zh/samples/application_controller.md @@ -0,0 +1,102 @@ +# 示例 - 如何保障 Fluid 的 Serverless 任务顺利完成 + +## 背景介绍 + +在 Serverless 场景中, Job 等 Workload,当 Pod 的 user container 完成任务并退出后,需要 Fuse Sidecar 也可以主动退出, +从而使 Job Controller 能够正确判断 Pod 所处的完成状态。然而,fuse container 自身并没有退出机制,Fluid Application Controller 会检测集群中带 fluid label 的 Pod, +在 user container 退出后,将 fuse container 正常退出,以达到 Job 完成的状态。 + +## 安装 + +您可以从 [Fluid Releases](https://github.com/fluid-cloudnative/fluid/releases) 下载最新的 Fluid 安装包。 +再参考 [安装文档](../userguide/install.md) 完成安装。并检查 Fluid 各组件正常运行(这里以 JuiceFSRuntime 为例): + +```shell +$ kubectl -n fluid-system get po +NAME READY STATUS RESTARTS AGE +dataset-controller-86768b56fb-4pdts 1/1 Running 0 36s +fluid-webhook-f77465869-zh8rv 1/1 Running 0 62s +fluidapp-controller-597dbd77dd-jgsbp 1/1 Running 0 81s +juicefsruntime-controller-65d54bb48f-vnzpj 1/1 Running 0 99s +``` + +通常来说,你会看到一个名为 `dataset-controller` 的 Pod、一个名为 `juicefsruntime-controller` 的 Pod、一个名为 `fluid-webhook` 的 Pod 和一个名为 `fluidapp-controller` 的 Pod。 + +## 运行示例 + +**为 namespace 开启 webhook** + +Fluid webhook 提供了在 Serverless 场景中为 pod 注入 FUSE Sidecar 的功能,为了开启该功能,需要将对应的 namespace 打上 `fluid.io/enable-injection=true` 的标签。操作如下: + +```shell +$ kubectl patch ns default -p '{"metadata": {"labels": {"fluid.io/enable-injection": "true"}}}' +namespace/default patched +$ kubectl get ns default --show-labels +NAME STATUS AGE LABELS +default Active 4d12h fluid.io/enable-injection=true,kubernetes.io/metadata.name=default +``` + +**创建 dataset 和 runtime** + +针对不同类型的 runtime 创建相应的 Runtime 资源,以及同名的 Dataset。这里以 JuiceFSRuntime 为例,具体可参考 [文档](juicefs_runtime.md),如下: + +```shell +$ kubectl get juicefsruntime +NAME WORKER PHASE FUSE PHASE AGE +jfsdemo Ready Ready 2m58s +$ kubectl get dataset +NAME UFS TOTAL SIZE CACHED CACHE CAPACITY CACHED PERCENTAGE PHASE AGE +jfsdemo [Calculating] N/A N/A Bound 2m55s +``` + +**创建 Job 资源对象** + +在 Serverless 场景使用 Fluid,需要在应用 Pod 中添加 `serverless.fluid.io/inject: "true"` label。如下: + +```yaml +$ cat<sample.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: demo-app +spec: + template: + metadata: + labels: + serverless.fluid.io/inject: "true" + spec: + containers: + - name: demo + image: busybox + args: + - -c + - echo $(date -u) >> /data/out.txt + command: + - /bin/sh + volumeMounts: + - mountPath: /data + name: demo + restartPolicy: Never + volumes: + - name: demo + persistentVolumeClaim: + claimName: jfsdemo + backoffLimit: 4 +EOF +$ kubectl create -f sample.yaml +job.batch/demo-app created +``` + +**查看 job 是否完成** + +```shell +$ kubectl get job +NAME COMPLETIONS DURATION AGE +demo-app 1/1 14s 46s +$ kubectl get po +NAME READY STATUS RESTARTS AGE +demo-app-wdfr8 0/2 Completed 0 25s +jfsdemo-worker-0 1/1 Running 0 14m +``` + +可以看到,job 已经完成,其 pod 有两个 container,均已完成。 From 3218390b48703ea21962761b0ca0e41bcb94c63b Mon Sep 17 00:00:00 2001 From: cheyang Date: Thu, 28 Apr 2022 18:18:33 +0800 Subject: [PATCH 10/10] Update docker image for jindofsx (#1813) * Add doc for serverless job, To #40605168 Signed-off-by: cheyang * Add doc for serverless job, To #40605168 Signed-off-by: cheyang --- charts/fluid/fluid/Chart.yaml | 2 +- charts/fluid/fluid/values.yaml | 22 +++++++++++----------- docs/en/TOC.md | 2 ++ docs/zh/TOC.md | 4 +++- 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/charts/fluid/fluid/Chart.yaml b/charts/fluid/fluid/Chart.yaml index 65c64668a2d..47dc2a150f9 100644 --- a/charts/fluid/fluid/Chart.yaml +++ b/charts/fluid/fluid/Chart.yaml @@ -18,7 +18,7 @@ version: 0.8.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 0.8.0-79d0b30 +appVersion: 0.8.0-2073b38 home: https://github.com/fluid-cloudnative/fluid keywords: - category:data diff --git a/charts/fluid/fluid/values.yaml b/charts/fluid/fluid/values.yaml index 9a67ec42561..4eac2a18495 100644 --- a/charts/fluid/fluid/values.yaml +++ b/charts/fluid/fluid/values.yaml @@ -6,7 +6,7 @@ workdir: /tmp dataset: controller: - image: fluidcloudnative/dataset-controller:v0.8.0-79d0b30 + image: fluidcloudnative/dataset-controller:v0.8.0-2073b38 csi: featureGates: "FuseRecovery=false" @@ -15,7 +15,7 @@ csi: registrar: image: registry.aliyuncs.com/acs/csi-node-driver-registrar:v1.2.0 plugins: - image: fluidcloudnative/fluid-csi:v0.8.0-79d0b30 + image: fluidcloudnative/fluid-csi:v0.8.0-2073b38 kubelet: rootDir: /var/lib/kubelet @@ -28,9 +28,9 @@ runtime: portRange: 20000-26000 enabled: true init: - image: fluidcloudnative/init-users:v0.8.0-79d0b30 + image: fluidcloudnative/init-users:v0.8.0-2073b38 controller: - image: fluidcloudnative/alluxioruntime-controller:v0.8.0-79d0b30 + image: fluidcloudnative/alluxioruntime-controller:v0.8.0-2073b38 runtime: image: registry.aliyuncs.com/alluxio/alluxio:release-2.7.2-SNAPSHOT-3714f2b fuse: @@ -44,19 +44,19 @@ runtime: fuse: image: registry.cn-shanghai.aliyuncs.com/jindofs/jindo-fuse:3.8.0 controller: - image: fluidcloudnative/jindoruntime-controller:v0.8.0-79d0b30 + image: fluidcloudnative/jindoruntime-controller:v0.8.0-2073b38 init: portCheck: enabled: false - image: fluidcloudnative/init-users:v0.8.0-79d0b30 + image: fluidcloudnative/init-users:v0.8.0-2073b38 goosefs: runtimeWorkers: 3 portRange: 26000-32000 enabled: false init: - image: fluidcloudnative/init-users:v0.8.0-79d0b30 + image: fluidcloudnative/init-users:v0.8.0-2073b38 controller: - image: fluidcloudnative/goosefsruntime-controller:v0.8.0-79d0b30 + image: fluidcloudnative/goosefsruntime-controller:v0.8.0-2073b38 runtime: image: ccr.ccs.tencentyun.com/qcloud/goosefs:v1.2.0 fuse: @@ -64,16 +64,16 @@ runtime: juicefs: enabled: false controller: - image: fluidcloudnative/juicefsruntime-controller:v0.8.0-79d0b30 + image: fluidcloudnative/juicefsruntime-controller:v0.8.0-2073b38 fuse: image: registry.cn-hangzhou.aliyuncs.com/juicefs/juicefs-fuse:v1.0.0-beta2 webhook: enabled: true - image: fluidcloudnative/fluid-webhook:v0.8.0-79d0b30 + image: fluidcloudnative/fluid-webhook:v0.8.0-2073b38 replicas: 1 fluidapp: enabled: true controller: - image: fluidcloudnative/application-controller:v0.8.0-79d0b30 + image: fluidcloudnative/application-controller:v0.8.0-2073b38 diff --git a/docs/en/TOC.md b/docs/en/TOC.md index 832051527d7..6f1542d433c 100644 --- a/docs/en/TOC.md +++ b/docs/en/TOC.md @@ -27,6 +27,8 @@ - [Machine Learning](samples/machinelearning.md) + Advanced - [Alluxio Tieredstore Configuration](samples/tieredstore_config.md) + + Serverless + - [How to ensure the completion of serverless tasks](samples/application_controller.md) - [How to enable FUSE auto-recovery](samples/fuse_recover.md) + Troubleshooting - [Collecting logs](userguide/troubleshooting.md) diff --git a/docs/zh/TOC.md b/docs/zh/TOC.md index 0e8e521e1dd..09edb5b7865 100644 --- a/docs/zh/TOC.md +++ b/docs/zh/TOC.md @@ -29,8 +29,10 @@ + 进阶使用 - [AlluxioRuntime分层存储配置](samples/tieredstore_config.md) - [通过Webhook机制优化Pod调度](operation/pod_schedule_global.md) - - [如何在Knative环境运行](samples/knative.md) - [如何开启 FUSE 自动恢复能力](samples/fuse_recover.md) + + 无服务器场景 + - [如何在Knative环境运行](samples/knative.md) + - [如何保障 erverless 任务顺利完成](samples/application_controller.md) + 工作负载 - [机器学习](samples/machinelearning.md) + 更多Runtime实现